1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2009-2002 Netfilter core team <coreteam@netfilter.org>
6  *
7  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
8  * 	- increase module usage count as soon as we have rules inside
9  * 	  a table
10  */
11 #include <linux/config.h>
12 #include <linux/cache.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/tcp.h>
19 #include <linux/udp.h>
20 #include <linux/icmp.h>
21 #include <net/ip.h>
22 #include <asm/uaccess.h>
23 #include <asm/semaphore.h>
24 #include <linux/proc_fs.h>
25 
26 #include <linux/netfilter_ipv4/ip_tables.h>
27 
28 /*#define DEBUG_IP_FIREWALL*/
29 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
30 /*#define DEBUG_IP_FIREWALL_USER*/
31 
32 #ifdef DEBUG_IP_FIREWALL
33 #define dprintf(format, args...)  printk(format , ## args)
34 #else
35 #define dprintf(format, args...)
36 #endif
37 
38 #ifdef DEBUG_IP_FIREWALL_USER
39 #define duprintf(format, args...) printk(format , ## args)
40 #else
41 #define duprintf(format, args...)
42 #endif
43 
44 #ifdef CONFIG_NETFILTER_DEBUG
45 #define IP_NF_ASSERT(x)						\
46 do {								\
47 	if (!(x))						\
48 		printk("IP_NF_ASSERT: %s:%s:%u\n",		\
49 		       __FUNCTION__, __FILE__, __LINE__);	\
50 } while(0)
51 #else
52 #define IP_NF_ASSERT(x)
53 #endif
54 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
55 
56 static DECLARE_MUTEX(ipt_mutex);
57 
58 /* Must have mutex */
59 #define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
60 #define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
61 #include <linux/netfilter_ipv4/lockhelp.h>
62 #include <linux/netfilter_ipv4/listhelp.h>
63 
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69 
70 /*
71    We keep a set of rules for each CPU, so we can avoid write-locking
72    them in the softirq when updating the counters and therefore
73    only need to read-lock in the softirq; doing a write_lock_bh() in user
74    context stops packets coming through and allows user context to read
75    the counters or update the rules.
76 
77    To be cache friendly on SMP, we arrange them like so:
78    [ n-entries ]
79    ... cache-align padding ...
80    [ n-entries ]
81 
82    Hence the start of any table is given by get_table() below.  */
83 
84 /* The table itself */
85 struct ipt_table_info
86 {
87 	/* Size per table */
88 	unsigned int size;
89 	/* Number of entries: FIXME. --RR */
90 	unsigned int number;
91 	/* Initial number of entries. Needed for module usage count */
92 	unsigned int initial_entries;
93 
94 	/* Entry points and underflows */
95 	unsigned int hook_entry[NF_IP_NUMHOOKS];
96 	unsigned int underflow[NF_IP_NUMHOOKS];
97 
98 	/* ipt_entry tables: one per CPU */
99 	char entries[0] ____cacheline_aligned;
100 };
101 
102 static LIST_HEAD(ipt_target);
103 static LIST_HEAD(ipt_match);
104 static LIST_HEAD(ipt_tables);
105 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
106 
107 #ifdef CONFIG_SMP
108 #define TABLE_OFFSET(t,p) (SMP_ALIGN((t)->size)*(p))
109 #else
110 #define TABLE_OFFSET(t,p) 0
111 #endif
112 
113 #if 0
114 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
115 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
116 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
117 #endif
118 
119 /* Returns whether matches rule or not. */
120 static inline int
ip_packet_match(const struct iphdr * ip,const char * indev,const char * outdev,const struct ipt_ip * ipinfo,int isfrag)121 ip_packet_match(const struct iphdr *ip,
122 		const char *indev,
123 		const char *outdev,
124 		const struct ipt_ip *ipinfo,
125 		int isfrag)
126 {
127 	size_t i;
128 	unsigned long ret;
129 
130 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
131 
132 	if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
133 		  IPT_INV_SRCIP)
134 	    || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
135 		     IPT_INV_DSTIP)) {
136 		dprintf("Source or dest mismatch.\n");
137 
138 		dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
139 			NIPQUAD(ip->saddr),
140 			NIPQUAD(ipinfo->smsk.s_addr),
141 			NIPQUAD(ipinfo->src.s_addr),
142 			ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
143 		dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
144 			NIPQUAD(ip->daddr),
145 			NIPQUAD(ipinfo->dmsk.s_addr),
146 			NIPQUAD(ipinfo->dst.s_addr),
147 			ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
148 		return 0;
149 	}
150 
151 	/* Look for ifname matches; this should unroll nicely. */
152 	for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
153 		ret |= (((const unsigned long *)indev)[i]
154 			^ ((const unsigned long *)ipinfo->iniface)[i])
155 			& ((const unsigned long *)ipinfo->iniface_mask)[i];
156 	}
157 
158 	if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
159 		dprintf("VIA in mismatch (%s vs %s).%s\n",
160 			indev, ipinfo->iniface,
161 			ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
162 		return 0;
163 	}
164 
165 	for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
166 		ret |= (((const unsigned long *)outdev)[i]
167 			^ ((const unsigned long *)ipinfo->outiface)[i])
168 			& ((const unsigned long *)ipinfo->outiface_mask)[i];
169 	}
170 
171 	if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
172 		dprintf("VIA out mismatch (%s vs %s).%s\n",
173 			outdev, ipinfo->outiface,
174 			ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
175 		return 0;
176 	}
177 
178 	/* Check specific protocol */
179 	if (ipinfo->proto
180 	    && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
181 		dprintf("Packet protocol %hi does not match %hi.%s\n",
182 			ip->protocol, ipinfo->proto,
183 			ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
184 		return 0;
185 	}
186 
187 	/* If we have a fragment rule but the packet is not a fragment
188 	 * then we return zero */
189 	if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
190 		dprintf("Fragment rule but not fragment.%s\n",
191 			ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
192 		return 0;
193 	}
194 
195 	return 1;
196 }
197 
198 static inline int
ip_checkentry(const struct ipt_ip * ip)199 ip_checkentry(const struct ipt_ip *ip)
200 {
201 	if (ip->flags & ~IPT_F_MASK) {
202 		duprintf("Unknown flag bits set: %08X\n",
203 			 ip->flags & ~IPT_F_MASK);
204 		return 0;
205 	}
206 	if (ip->invflags & ~IPT_INV_MASK) {
207 		duprintf("Unknown invflag bits set: %08X\n",
208 			 ip->invflags & ~IPT_INV_MASK);
209 		return 0;
210 	}
211 	return 1;
212 }
213 
214 static unsigned int
ipt_error(struct sk_buff ** pskb,unsigned int hooknum,const struct net_device * in,const struct net_device * out,const void * targinfo,void * userinfo)215 ipt_error(struct sk_buff **pskb,
216 	  unsigned int hooknum,
217 	  const struct net_device *in,
218 	  const struct net_device *out,
219 	  const void *targinfo,
220 	  void *userinfo)
221 {
222 	if (net_ratelimit())
223 		printk("ip_tables: error: `%s'\n", (char *)targinfo);
224 
225 	return NF_DROP;
226 }
227 
228 static inline
do_match(struct ipt_entry_match * m,const struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int offset,const void * hdr,u_int16_t datalen,int * hotdrop)229 int do_match(struct ipt_entry_match *m,
230 	     const struct sk_buff *skb,
231 	     const struct net_device *in,
232 	     const struct net_device *out,
233 	     int offset,
234 	     const void *hdr,
235 	     u_int16_t datalen,
236 	     int *hotdrop)
237 {
238 	/* Stop iteration if it doesn't match */
239 	if (!m->u.kernel.match->match(skb, in, out, m->data,
240 				      offset, hdr, datalen, hotdrop))
241 		return 1;
242 	else
243 		return 0;
244 }
245 
246 static inline struct ipt_entry *
get_entry(void * base,unsigned int offset)247 get_entry(void *base, unsigned int offset)
248 {
249 	return (struct ipt_entry *)(base + offset);
250 }
251 
252 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
253 unsigned int
ipt_do_table(struct sk_buff ** pskb,unsigned int hook,const struct net_device * in,const struct net_device * out,struct ipt_table * table,void * userdata)254 ipt_do_table(struct sk_buff **pskb,
255 	     unsigned int hook,
256 	     const struct net_device *in,
257 	     const struct net_device *out,
258 	     struct ipt_table *table,
259 	     void *userdata)
260 {
261 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))) = { 0 };
262 	u_int16_t offset;
263 	struct iphdr *ip;
264 	void *protohdr;
265 	u_int16_t datalen;
266 	int hotdrop = 0;
267 	/* Initializing verdict to NF_DROP keeps gcc happy. */
268 	unsigned int verdict = NF_DROP;
269 	const char *indev, *outdev;
270 	void *table_base;
271 	struct ipt_entry *e, *back;
272 
273 	/* Initialization */
274 	ip = (*pskb)->nh.iph;
275 	protohdr = (u_int32_t *)ip + ip->ihl;
276 	datalen = (*pskb)->len - ip->ihl * 4;
277 	indev = in ? in->name : nulldevname;
278 	outdev = out ? out->name : nulldevname;
279 	/* We handle fragments by dealing with the first fragment as
280 	 * if it was a normal packet.  All other fragments are treated
281 	 * normally, except that they will NEVER match rules that ask
282 	 * things we don't know, ie. tcp syn flag or ports).  If the
283 	 * rule is also a fragment-specific rule, non-fragments won't
284 	 * match it. */
285 	offset = ntohs(ip->frag_off) & IP_OFFSET;
286 
287 	read_lock_bh(&table->lock);
288 	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
289 	table_base = (void *)table->private->entries
290 		+ TABLE_OFFSET(table->private,
291 			       cpu_number_map(smp_processor_id()));
292 	e = get_entry(table_base, table->private->hook_entry[hook]);
293 
294 #ifdef CONFIG_NETFILTER_DEBUG
295 	/* Check noone else using our table */
296 	if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
297 	    && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
298 		printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
299 		       smp_processor_id(),
300 		       table->name,
301 		       &((struct ipt_entry *)table_base)->comefrom,
302 		       ((struct ipt_entry *)table_base)->comefrom);
303 	}
304 	((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
305 #endif
306 
307 	/* For return from builtin chain */
308 	back = get_entry(table_base, table->private->underflow[hook]);
309 
310 	do {
311 		IP_NF_ASSERT(e);
312 		IP_NF_ASSERT(back);
313 		(*pskb)->nfcache |= e->nfcache;
314 		if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
315 			struct ipt_entry_target *t;
316 
317 			if (IPT_MATCH_ITERATE(e, do_match,
318 					      *pskb, in, out,
319 					      offset, protohdr,
320 					      datalen, &hotdrop) != 0)
321 				goto no_match;
322 
323 			ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
324 
325 			t = ipt_get_target(e);
326 			IP_NF_ASSERT(t->u.kernel.target);
327 			/* Standard target? */
328 			if (!t->u.kernel.target->target) {
329 				int v;
330 
331 				v = ((struct ipt_standard_target *)t)->verdict;
332 				if (v < 0) {
333 					/* Pop from stack? */
334 					if (v != IPT_RETURN) {
335 						verdict = (unsigned)(-v) - 1;
336 						break;
337 					}
338 					e = back;
339 					back = get_entry(table_base,
340 							 back->comefrom);
341 					continue;
342 				}
343 				if (table_base + v
344 				    != (void *)e + e->next_offset) {
345 					/* Save old back ptr in next entry */
346 					struct ipt_entry *next
347 						= (void *)e + e->next_offset;
348 					next->comefrom
349 						= (void *)back - table_base;
350 					/* set back pointer to next entry */
351 					back = next;
352 				}
353 
354 				e = get_entry(table_base, v);
355 			} else {
356 				/* Targets which reenter must return
357                                    abs. verdicts */
358 #ifdef CONFIG_NETFILTER_DEBUG
359 				((struct ipt_entry *)table_base)->comefrom
360 					= 0xeeeeeeec;
361 #endif
362 				verdict = t->u.kernel.target->target(pskb,
363 								     hook,
364 								     in, out,
365 								     t->data,
366 								     userdata);
367 
368 #ifdef CONFIG_NETFILTER_DEBUG
369 				if (((struct ipt_entry *)table_base)->comefrom
370 				    != 0xeeeeeeec
371 				    && verdict == IPT_CONTINUE) {
372 					printk("Target %s reentered!\n",
373 					       t->u.kernel.target->name);
374 					verdict = NF_DROP;
375 				}
376 				((struct ipt_entry *)table_base)->comefrom
377 					= 0x57acc001;
378 #endif
379 				/* Target might have changed stuff. */
380 				ip = (*pskb)->nh.iph;
381 				protohdr = (u_int32_t *)ip + ip->ihl;
382 				datalen = (*pskb)->len - ip->ihl * 4;
383 
384 				if (verdict == IPT_CONTINUE)
385 					e = (void *)e + e->next_offset;
386 				else
387 					/* Verdict */
388 					break;
389 			}
390 		} else {
391 
392 		no_match:
393 			e = (void *)e + e->next_offset;
394 		}
395 	} while (!hotdrop);
396 
397 #ifdef CONFIG_NETFILTER_DEBUG
398 	((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
399 #endif
400 	read_unlock_bh(&table->lock);
401 
402 #ifdef DEBUG_ALLOW_ALL
403 	return NF_ACCEPT;
404 #else
405 	if (hotdrop)
406 		return NF_DROP;
407 	else return verdict;
408 #endif
409 }
410 
411 /* If it succeeds, returns element and locks mutex */
412 static inline void *
find_inlist_lock_noload(struct list_head * head,const char * name,int * error,struct semaphore * mutex)413 find_inlist_lock_noload(struct list_head *head,
414 			const char *name,
415 			int *error,
416 			struct semaphore *mutex)
417 {
418 	void *ret;
419 
420 #if 0
421 	duprintf("find_inlist: searching for `%s' in %s.\n",
422 		 name, head == &ipt_target ? "ipt_target"
423 		 : head == &ipt_match ? "ipt_match"
424 		 : head == &ipt_tables ? "ipt_tables" : "UNKNOWN");
425 #endif
426 
427 	*error = down_interruptible(mutex);
428 	if (*error != 0)
429 		return NULL;
430 
431 	ret = list_named_find(head, name);
432 	if (!ret) {
433 		*error = -ENOENT;
434 		up(mutex);
435 	}
436 	return ret;
437 }
438 
439 #ifndef CONFIG_KMOD
440 #define find_inlist_lock(h,n,p,e,m) find_inlist_lock_noload((h),(n),(e),(m))
441 #else
442 static void *
find_inlist_lock(struct list_head * head,const char * name,const char * prefix,int * error,struct semaphore * mutex)443 find_inlist_lock(struct list_head *head,
444 		 const char *name,
445 		 const char *prefix,
446 		 int *error,
447 		 struct semaphore *mutex)
448 {
449 	void *ret;
450 
451 	ret = find_inlist_lock_noload(head, name, error, mutex);
452 	if (!ret) {
453 		char modulename[IPT_FUNCTION_MAXNAMELEN + strlen(prefix) + 1];
454 		strcpy(modulename, prefix);
455 		strcat(modulename, name);
456 		duprintf("find_inlist: loading `%s'.\n", modulename);
457 		request_module(modulename);
458 		ret = find_inlist_lock_noload(head, name, error, mutex);
459 	}
460 
461 	return ret;
462 }
463 #endif
464 
465 static inline struct ipt_table *
ipt_find_table_lock(const char * name,int * error,struct semaphore * mutex)466 ipt_find_table_lock(const char *name, int *error, struct semaphore *mutex)
467 {
468 	return find_inlist_lock(&ipt_tables, name, "iptable_", error, mutex);
469 }
470 
471 static inline struct ipt_match *
find_match_lock(const char * name,int * error,struct semaphore * mutex)472 find_match_lock(const char *name, int *error, struct semaphore *mutex)
473 {
474 	return find_inlist_lock(&ipt_match, name, "ipt_", error, mutex);
475 }
476 
477 struct ipt_target *
ipt_find_target_lock(const char * name,int * error,struct semaphore * mutex)478 ipt_find_target_lock(const char *name, int *error, struct semaphore *mutex)
479 {
480 	return find_inlist_lock(&ipt_target, name, "ipt_", error, mutex);
481 }
482 
483 /* All zeroes == unconditional rule. */
484 static inline int
unconditional(const struct ipt_ip * ip)485 unconditional(const struct ipt_ip *ip)
486 {
487 	unsigned int i;
488 
489 	for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
490 		if (((__u32 *)ip)[i])
491 			return 0;
492 
493 	return 1;
494 }
495 
496 /* Figures out from what hook each rule can be called: returns 0 if
497    there are loops.  Puts hook bitmask in comefrom. */
498 static int
mark_source_chains(struct ipt_table_info * newinfo,unsigned int valid_hooks)499 mark_source_chains(struct ipt_table_info *newinfo, unsigned int valid_hooks)
500 {
501 	unsigned int hook;
502 
503 	/* No recursion; use packet counter to save back ptrs (reset
504 	   to 0 as we leave), and comefrom to save source hook bitmask */
505 	for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
506 		unsigned int pos = newinfo->hook_entry[hook];
507 		struct ipt_entry *e
508 			= (struct ipt_entry *)(newinfo->entries + pos);
509 
510 		if (!(valid_hooks & (1 << hook)))
511 			continue;
512 
513 		/* Set initial back pointer. */
514 		e->counters.pcnt = pos;
515 
516 		for (;;) {
517 			struct ipt_standard_target *t
518 				= (void *)ipt_get_target(e);
519 
520 			if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
521 				printk("iptables: loop hook %u pos %u %08X.\n",
522 				       hook, pos, e->comefrom);
523 				return 0;
524 			}
525 			e->comefrom
526 				|= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
527 
528 			/* Unconditional return/END. */
529 			if (e->target_offset == sizeof(struct ipt_entry)
530 			    && (strcmp(t->target.u.user.name,
531 				       IPT_STANDARD_TARGET) == 0)
532 			    && t->verdict < 0
533 			    && unconditional(&e->ip)) {
534 				unsigned int oldpos, size;
535 
536 				/* Return: backtrack through the last
537 				   big jump. */
538 				do {
539 					e->comefrom ^= (1<<NF_IP_NUMHOOKS);
540 #ifdef DEBUG_IP_FIREWALL_USER
541 					if (e->comefrom
542 					    & (1 << NF_IP_NUMHOOKS)) {
543 						duprintf("Back unset "
544 							 "on hook %u "
545 							 "rule %u\n",
546 							 hook, pos);
547 					}
548 #endif
549 					oldpos = pos;
550 					pos = e->counters.pcnt;
551 					e->counters.pcnt = 0;
552 
553 					/* We're at the start. */
554 					if (pos == oldpos)
555 						goto next;
556 
557 					e = (struct ipt_entry *)
558 						(newinfo->entries + pos);
559 				} while (oldpos == pos + e->next_offset);
560 
561 				/* Move along one */
562 				size = e->next_offset;
563 				e = (struct ipt_entry *)
564 					(newinfo->entries + pos + size);
565 				e->counters.pcnt = pos;
566 				pos += size;
567 			} else {
568 				int newpos = t->verdict;
569 
570 				if (strcmp(t->target.u.user.name,
571 					   IPT_STANDARD_TARGET) == 0
572 				    && newpos >= 0) {
573 					/* This a jump; chase it. */
574 					duprintf("Jump rule %u -> %u\n",
575 						 pos, newpos);
576 				} else {
577 					/* ... this is a fallthru */
578 					newpos = pos + e->next_offset;
579 				}
580 				e = (struct ipt_entry *)
581 					(newinfo->entries + newpos);
582 				e->counters.pcnt = pos;
583 				pos = newpos;
584 			}
585 		}
586 		next:
587 		duprintf("Finished chain %u\n", hook);
588 	}
589 	return 1;
590 }
591 
592 static inline int
cleanup_match(struct ipt_entry_match * m,unsigned int * i)593 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
594 {
595 	if (i && (*i)-- == 0)
596 		return 1;
597 
598 	if (m->u.kernel.match->destroy)
599 		m->u.kernel.match->destroy(m->data,
600 					   m->u.match_size - sizeof(*m));
601 
602 	if (m->u.kernel.match->me)
603 		__MOD_DEC_USE_COUNT(m->u.kernel.match->me);
604 
605 	return 0;
606 }
607 
608 static inline int
standard_check(const struct ipt_entry_target * t,unsigned int max_offset)609 standard_check(const struct ipt_entry_target *t,
610 	       unsigned int max_offset)
611 {
612 	struct ipt_standard_target *targ = (void *)t;
613 
614 	/* Check standard info. */
615 	if (t->u.target_size
616 	    != IPT_ALIGN(sizeof(struct ipt_standard_target))) {
617 		duprintf("standard_check: target size %u != %u\n",
618 			 t->u.target_size,
619 			 IPT_ALIGN(sizeof(struct ipt_standard_target)));
620 		return 0;
621 	}
622 
623 	if (targ->verdict >= 0
624 	    && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
625 		duprintf("ipt_standard_check: bad verdict (%i)\n",
626 			 targ->verdict);
627 		return 0;
628 	}
629 
630 	if (targ->verdict < -NF_MAX_VERDICT - 1) {
631 		duprintf("ipt_standard_check: bad negative verdict (%i)\n",
632 			 targ->verdict);
633 		return 0;
634 	}
635 	return 1;
636 }
637 
638 static inline int
check_match(struct ipt_entry_match * m,const char * name,const struct ipt_ip * ip,unsigned int hookmask,unsigned int * i)639 check_match(struct ipt_entry_match *m,
640 	    const char *name,
641 	    const struct ipt_ip *ip,
642 	    unsigned int hookmask,
643 	    unsigned int *i)
644 {
645 	int ret;
646 	struct ipt_match *match;
647 
648 	match = find_match_lock(m->u.user.name, &ret, &ipt_mutex);
649 	if (!match) {
650 		duprintf("check_match: `%s' not found\n", m->u.user.name);
651 		return ret;
652 	}
653 	if (match->me)
654 		__MOD_INC_USE_COUNT(match->me);
655 	m->u.kernel.match = match;
656 	up(&ipt_mutex);
657 
658 	if (m->u.kernel.match->checkentry
659 	    && !m->u.kernel.match->checkentry(name, ip, m->data,
660 					      m->u.match_size - sizeof(*m),
661 					      hookmask)) {
662 		if (m->u.kernel.match->me)
663 			__MOD_DEC_USE_COUNT(m->u.kernel.match->me);
664 		duprintf("ip_tables: check failed for `%s'.\n",
665 			 m->u.kernel.match->name);
666 		return -EINVAL;
667 	}
668 
669 	(*i)++;
670 	return 0;
671 }
672 
673 static struct ipt_target ipt_standard_target;
674 
675 static inline int
check_entry(struct ipt_entry * e,const char * name,unsigned int size,unsigned int * i)676 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
677 	    unsigned int *i)
678 {
679 	struct ipt_entry_target *t;
680 	struct ipt_target *target;
681 	int ret;
682 	unsigned int j;
683 
684 	if (!ip_checkentry(&e->ip)) {
685 		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
686 		return -EINVAL;
687 	}
688 
689 	j = 0;
690 	ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
691 	if (ret != 0)
692 		goto cleanup_matches;
693 
694 	t = ipt_get_target(e);
695 	target = ipt_find_target_lock(t->u.user.name, &ret, &ipt_mutex);
696 	if (!target) {
697 		duprintf("check_entry: `%s' not found\n", t->u.user.name);
698 		goto cleanup_matches;
699 	}
700 	if (target->me)
701 		__MOD_INC_USE_COUNT(target->me);
702 	t->u.kernel.target = target;
703 	up(&ipt_mutex);
704 
705 	if (t->u.kernel.target == &ipt_standard_target) {
706 		if (!standard_check(t, size)) {
707 			ret = -EINVAL;
708 			goto cleanup_matches;
709 		}
710 	} else if (t->u.kernel.target->checkentry
711 		   && !t->u.kernel.target->checkentry(name, e, t->data,
712 						      t->u.target_size
713 						      - sizeof(*t),
714 						      e->comefrom)) {
715 		if (t->u.kernel.target->me)
716 			__MOD_DEC_USE_COUNT(t->u.kernel.target->me);
717 		duprintf("ip_tables: check failed for `%s'.\n",
718 			 t->u.kernel.target->name);
719 		ret = -EINVAL;
720 		goto cleanup_matches;
721 	}
722 
723 	(*i)++;
724 	return 0;
725 
726  cleanup_matches:
727 	IPT_MATCH_ITERATE(e, cleanup_match, &j);
728 	return ret;
729 }
730 
731 static inline int
check_entry_size_and_hooks(struct ipt_entry * e,struct ipt_table_info * newinfo,unsigned char * base,unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int * i)732 check_entry_size_and_hooks(struct ipt_entry *e,
733 			   struct ipt_table_info *newinfo,
734 			   unsigned char *base,
735 			   unsigned char *limit,
736 			   const unsigned int *hook_entries,
737 			   const unsigned int *underflows,
738 			   unsigned int *i)
739 {
740 	unsigned int h;
741 
742 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
743 	    || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
744 		duprintf("Bad offset %p\n", e);
745 		return -EINVAL;
746 	}
747 
748 	if (e->next_offset
749 	    < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
750 		duprintf("checking: element %p size %u\n",
751 			 e, e->next_offset);
752 		return -EINVAL;
753 	}
754 
755 	/* Check hooks & underflows */
756 	for (h = 0; h < NF_IP_NUMHOOKS; h++) {
757 		if ((unsigned char *)e - base == hook_entries[h])
758 			newinfo->hook_entry[h] = hook_entries[h];
759 		if ((unsigned char *)e - base == underflows[h])
760 			newinfo->underflow[h] = underflows[h];
761 	}
762 
763 	/* FIXME: underflows must be unconditional, standard verdicts
764            < 0 (not IPT_RETURN). --RR */
765 
766 	/* Clear counters and comefrom */
767 	e->counters = ((struct ipt_counters) { 0, 0 });
768 	e->comefrom = 0;
769 
770 	(*i)++;
771 	return 0;
772 }
773 
774 static inline int
cleanup_entry(struct ipt_entry * e,unsigned int * i)775 cleanup_entry(struct ipt_entry *e, unsigned int *i)
776 {
777 	struct ipt_entry_target *t;
778 
779 	if (i && (*i)-- == 0)
780 		return 1;
781 
782 	/* Cleanup all matches */
783 	IPT_MATCH_ITERATE(e, cleanup_match, NULL);
784 	t = ipt_get_target(e);
785 	if (t->u.kernel.target->destroy)
786 		t->u.kernel.target->destroy(t->data,
787 					    t->u.target_size - sizeof(*t));
788 	if (t->u.kernel.target->me)
789 		__MOD_DEC_USE_COUNT(t->u.kernel.target->me);
790 
791 	return 0;
792 }
793 
794 /* Checks and translates the user-supplied table segment (held in
795    newinfo) */
796 static int
translate_table(const char * name,unsigned int valid_hooks,struct ipt_table_info * newinfo,unsigned int size,unsigned int number,const unsigned int * hook_entries,const unsigned int * underflows)797 translate_table(const char *name,
798 		unsigned int valid_hooks,
799 		struct ipt_table_info *newinfo,
800 		unsigned int size,
801 		unsigned int number,
802 		const unsigned int *hook_entries,
803 		const unsigned int *underflows)
804 {
805 	unsigned int i;
806 	int ret;
807 
808 	newinfo->size = size;
809 	newinfo->number = number;
810 
811 	/* Init all hooks to impossible value. */
812 	for (i = 0; i < NF_IP_NUMHOOKS; i++) {
813 		newinfo->hook_entry[i] = 0xFFFFFFFF;
814 		newinfo->underflow[i] = 0xFFFFFFFF;
815 	}
816 
817 	duprintf("translate_table: size %u\n", newinfo->size);
818 	i = 0;
819 	/* Walk through entries, checking offsets. */
820 	ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
821 				check_entry_size_and_hooks,
822 				newinfo,
823 				newinfo->entries,
824 				newinfo->entries + size,
825 				hook_entries, underflows, &i);
826 	if (ret != 0)
827 		return ret;
828 
829 	if (i != number) {
830 		duprintf("translate_table: %u not %u entries\n",
831 			 i, number);
832 		return -EINVAL;
833 	}
834 
835 	/* Check hooks all assigned */
836 	for (i = 0; i < NF_IP_NUMHOOKS; i++) {
837 		/* Only hooks which are valid */
838 		if (!(valid_hooks & (1 << i)))
839 			continue;
840 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
841 			duprintf("Invalid hook entry %u %u\n",
842 				 i, hook_entries[i]);
843 			return -EINVAL;
844 		}
845 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
846 			duprintf("Invalid underflow %u %u\n",
847 				 i, underflows[i]);
848 			return -EINVAL;
849 		}
850 	}
851 
852 	if (!mark_source_chains(newinfo, valid_hooks))
853 		return -ELOOP;
854 
855 	/* Finally, each sanity check must pass */
856 	i = 0;
857 	ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
858 				check_entry, name, size, &i);
859 
860 	if (ret != 0) {
861 		IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
862 				  cleanup_entry, &i);
863 		return ret;
864 	}
865 
866 	/* And one copy for every other CPU */
867 	for (i = 1; i < smp_num_cpus; i++) {
868 		memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
869 		       newinfo->entries,
870 		       SMP_ALIGN(newinfo->size));
871 	}
872 
873 	return ret;
874 }
875 
876 static struct ipt_table_info *
replace_table(struct ipt_table * table,unsigned int num_counters,struct ipt_table_info * newinfo,int * error)877 replace_table(struct ipt_table *table,
878 	      unsigned int num_counters,
879 	      struct ipt_table_info *newinfo,
880 	      int *error)
881 {
882 	struct ipt_table_info *oldinfo;
883 
884 #ifdef CONFIG_NETFILTER_DEBUG
885 	{
886 		struct ipt_entry *table_base;
887 		unsigned int i;
888 
889 		for (i = 0; i < smp_num_cpus; i++) {
890 			table_base =
891 				(void *)newinfo->entries
892 				+ TABLE_OFFSET(newinfo, i);
893 
894 			table_base->comefrom = 0xdead57ac;
895 		}
896 	}
897 #endif
898 
899 	/* Do the substitution. */
900 	write_lock_bh(&table->lock);
901 	/* Check inside lock: is the old number correct? */
902 	if (num_counters != table->private->number) {
903 		duprintf("num_counters != table->private->number (%u/%u)\n",
904 			 num_counters, table->private->number);
905 		write_unlock_bh(&table->lock);
906 		*error = -EAGAIN;
907 		return NULL;
908 	}
909 	oldinfo = table->private;
910 	table->private = newinfo;
911 	newinfo->initial_entries = oldinfo->initial_entries;
912 	write_unlock_bh(&table->lock);
913 
914 	return oldinfo;
915 }
916 
917 /* Gets counters. */
918 static inline int
add_entry_to_counter(const struct ipt_entry * e,struct ipt_counters total[],unsigned int * i)919 add_entry_to_counter(const struct ipt_entry *e,
920 		     struct ipt_counters total[],
921 		     unsigned int *i)
922 {
923 	ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
924 
925 	(*i)++;
926 	return 0;
927 }
928 
929 static void
get_counters(const struct ipt_table_info * t,struct ipt_counters counters[])930 get_counters(const struct ipt_table_info *t,
931 	     struct ipt_counters counters[])
932 {
933 	unsigned int cpu;
934 	unsigned int i;
935 
936 	for (cpu = 0; cpu < smp_num_cpus; cpu++) {
937 		i = 0;
938 		IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
939 				  t->size,
940 				  add_entry_to_counter,
941 				  counters,
942 				  &i);
943 	}
944 }
945 
946 static int
copy_entries_to_user(unsigned int total_size,struct ipt_table * table,void * userptr)947 copy_entries_to_user(unsigned int total_size,
948 		     struct ipt_table *table,
949 		     void *userptr)
950 {
951 	unsigned int off, num, countersize;
952 	struct ipt_entry *e;
953 	struct ipt_counters *counters;
954 	int ret = 0;
955 
956 	/* We need atomic snapshot of counters: rest doesn't change
957 	   (other than comefrom, which userspace doesn't care
958 	   about). */
959 	countersize = sizeof(struct ipt_counters) * table->private->number;
960 	counters = vmalloc(countersize);
961 
962 	if (counters == NULL)
963 		return -ENOMEM;
964 
965 	/* First, sum counters... */
966 	memset(counters, 0, countersize);
967 	write_lock_bh(&table->lock);
968 	get_counters(table->private, counters);
969 	write_unlock_bh(&table->lock);
970 
971 	/* ... then copy entire thing from CPU 0... */
972 	if (copy_to_user(userptr, table->private->entries, total_size) != 0) {
973 		ret = -EFAULT;
974 		goto free_counters;
975 	}
976 
977 	/* FIXME: use iterator macros --RR */
978 	/* ... then go back and fix counters and names */
979 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
980 		unsigned int i;
981 		struct ipt_entry_match *m;
982 		struct ipt_entry_target *t;
983 
984 		e = (struct ipt_entry *)(table->private->entries + off);
985 		if (copy_to_user(userptr + off
986 				 + offsetof(struct ipt_entry, counters),
987 				 &counters[num],
988 				 sizeof(counters[num])) != 0) {
989 			ret = -EFAULT;
990 			goto free_counters;
991 		}
992 
993 		for (i = sizeof(struct ipt_entry);
994 		     i < e->target_offset;
995 		     i += m->u.match_size) {
996 			m = (void *)e + i;
997 
998 			if (copy_to_user(userptr + off + i
999 					 + offsetof(struct ipt_entry_match,
1000 						    u.user.name),
1001 					 m->u.kernel.match->name,
1002 					 strlen(m->u.kernel.match->name)+1)
1003 			    != 0) {
1004 				ret = -EFAULT;
1005 				goto free_counters;
1006 			}
1007 		}
1008 
1009 		t = ipt_get_target(e);
1010 		if (copy_to_user(userptr + off + e->target_offset
1011 				 + offsetof(struct ipt_entry_target,
1012 					    u.user.name),
1013 				 t->u.kernel.target->name,
1014 				 strlen(t->u.kernel.target->name)+1) != 0) {
1015 			ret = -EFAULT;
1016 			goto free_counters;
1017 		}
1018 	}
1019 
1020  free_counters:
1021 	vfree(counters);
1022 	return ret;
1023 }
1024 
1025 static int
get_entries(const struct ipt_get_entries * entries,struct ipt_get_entries * uptr)1026 get_entries(const struct ipt_get_entries *entries,
1027 	    struct ipt_get_entries *uptr)
1028 {
1029 	int ret;
1030 	struct ipt_table *t;
1031 
1032 	t = ipt_find_table_lock(entries->name, &ret, &ipt_mutex);
1033 	if (t) {
1034 		duprintf("t->private->number = %u\n",
1035 			 t->private->number);
1036 		if (entries->size == t->private->size)
1037 			ret = copy_entries_to_user(t->private->size,
1038 						   t, uptr->entrytable);
1039 		else {
1040 			duprintf("get_entries: I've got %u not %u!\n",
1041 				 t->private->size,
1042 				 entries->size);
1043 			ret = -EINVAL;
1044 		}
1045 		up(&ipt_mutex);
1046 	} else
1047 		duprintf("get_entries: Can't find %s!\n",
1048 			 entries->name);
1049 
1050 	return ret;
1051 }
1052 
1053 static int
do_replace(void * user,unsigned int len)1054 do_replace(void *user, unsigned int len)
1055 {
1056 	int ret;
1057 	struct ipt_replace tmp;
1058 	struct ipt_table *t;
1059 	struct ipt_table_info *newinfo, *oldinfo;
1060 	struct ipt_counters *counters;
1061 
1062 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1063 		return -EFAULT;
1064 
1065 	/* Hack: Causes ipchains to give correct error msg --RR */
1066 	if (len != sizeof(tmp) + tmp.size)
1067 		return -ENOPROTOOPT;
1068 
1069 	/* overflow check */
1070 	if (tmp.size >= (INT_MAX - sizeof(struct ipt_table_info)) / NR_CPUS -
1071 			SMP_CACHE_BYTES)
1072 		return -ENOMEM;
1073 	if (tmp.num_counters >= INT_MAX / sizeof(struct ipt_counters))
1074 		return -ENOMEM;
1075 
1076 	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1077 	if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1078 		return -ENOMEM;
1079 
1080 	newinfo = vmalloc(sizeof(struct ipt_table_info)
1081 			  + SMP_ALIGN(tmp.size) * smp_num_cpus);
1082 	if (!newinfo)
1083 		return -ENOMEM;
1084 
1085 	if (copy_from_user(newinfo->entries, user + sizeof(tmp),
1086 			   tmp.size) != 0) {
1087 		ret = -EFAULT;
1088 		goto free_newinfo;
1089 	}
1090 
1091 	counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
1092 	if (!counters) {
1093 		ret = -ENOMEM;
1094 		goto free_newinfo;
1095 	}
1096 	memset(counters, 0, tmp.num_counters * sizeof(struct ipt_counters));
1097 
1098 	ret = translate_table(tmp.name, tmp.valid_hooks,
1099 			      newinfo, tmp.size, tmp.num_entries,
1100 			      tmp.hook_entry, tmp.underflow);
1101 	if (ret != 0)
1102 		goto free_newinfo_counters;
1103 
1104 	duprintf("ip_tables: Translated table\n");
1105 
1106 	t = ipt_find_table_lock(tmp.name, &ret, &ipt_mutex);
1107 	if (!t)
1108 		goto free_newinfo_counters_untrans;
1109 
1110 	/* You lied! */
1111 	if (tmp.valid_hooks != t->valid_hooks) {
1112 		duprintf("Valid hook crap: %08X vs %08X\n",
1113 			 tmp.valid_hooks, t->valid_hooks);
1114 		ret = -EINVAL;
1115 		goto free_newinfo_counters_untrans_unlock;
1116 	}
1117 
1118 	oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
1119 	if (!oldinfo)
1120 		goto free_newinfo_counters_untrans_unlock;
1121 
1122 	/* Update module usage count based on number of rules */
1123 	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1124 		oldinfo->number, oldinfo->initial_entries, newinfo->number);
1125 	if (t->me && (oldinfo->number <= oldinfo->initial_entries) &&
1126  	    (newinfo->number > oldinfo->initial_entries))
1127 		__MOD_INC_USE_COUNT(t->me);
1128 	else if (t->me && (oldinfo->number > oldinfo->initial_entries) &&
1129 	 	 (newinfo->number <= oldinfo->initial_entries))
1130 		__MOD_DEC_USE_COUNT(t->me);
1131 
1132 	/* Get the old counters. */
1133 	get_counters(oldinfo, counters);
1134 	/* Decrease module usage counts and free resource */
1135 	IPT_ENTRY_ITERATE(oldinfo->entries, oldinfo->size, cleanup_entry,NULL);
1136 	vfree(oldinfo);
1137 	/* Silent error: too late now. */
1138 	copy_to_user(tmp.counters, counters,
1139 		     sizeof(struct ipt_counters) * tmp.num_counters);
1140 	vfree(counters);
1141 	up(&ipt_mutex);
1142 	return 0;
1143 
1144  free_newinfo_counters_untrans_unlock:
1145 	up(&ipt_mutex);
1146  free_newinfo_counters_untrans:
1147 	IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size, cleanup_entry,NULL);
1148  free_newinfo_counters:
1149 	vfree(counters);
1150  free_newinfo:
1151 	vfree(newinfo);
1152 	return ret;
1153 }
1154 
1155 /* We're lazy, and add to the first CPU; overflow works its fey magic
1156  * and everything is OK. */
1157 static inline int
add_counter_to_entry(struct ipt_entry * e,const struct ipt_counters addme[],unsigned int * i)1158 add_counter_to_entry(struct ipt_entry *e,
1159 		     const struct ipt_counters addme[],
1160 		     unsigned int *i)
1161 {
1162 #if 0
1163 	duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1164 		 *i,
1165 		 (long unsigned int)e->counters.pcnt,
1166 		 (long unsigned int)e->counters.bcnt,
1167 		 (long unsigned int)addme[*i].pcnt,
1168 		 (long unsigned int)addme[*i].bcnt);
1169 #endif
1170 
1171 	ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1172 
1173 	(*i)++;
1174 	return 0;
1175 }
1176 
1177 static int
do_add_counters(void * user,unsigned int len)1178 do_add_counters(void *user, unsigned int len)
1179 {
1180 	unsigned int i;
1181 	struct ipt_counters_info tmp, *paddc;
1182 	struct ipt_table *t;
1183 	int ret;
1184 
1185 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1186 		return -EFAULT;
1187 
1188 	if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
1189 		return -EINVAL;
1190 
1191 	paddc = vmalloc(len);
1192 	if (!paddc)
1193 		return -ENOMEM;
1194 
1195 	if (copy_from_user(paddc, user, len) != 0) {
1196 		ret = -EFAULT;
1197 		goto free;
1198 	}
1199 
1200 	t = ipt_find_table_lock(tmp.name, &ret, &ipt_mutex);
1201 	if (!t)
1202 		goto free;
1203 
1204 	write_lock_bh(&t->lock);
1205 	if (t->private->number != tmp.num_counters) {
1206 		ret = -EINVAL;
1207 		goto unlock_up_free;
1208 	}
1209 
1210 	i = 0;
1211 	IPT_ENTRY_ITERATE(t->private->entries,
1212 			  t->private->size,
1213 			  add_counter_to_entry,
1214 			  paddc->counters,
1215 			  &i);
1216  unlock_up_free:
1217 	write_unlock_bh(&t->lock);
1218 	up(&ipt_mutex);
1219  free:
1220 	vfree(paddc);
1221 
1222 	return ret;
1223 }
1224 
1225 static int
do_ipt_set_ctl(struct sock * sk,int cmd,void * user,unsigned int len)1226 do_ipt_set_ctl(struct sock *sk,	int cmd, void *user, unsigned int len)
1227 {
1228 	int ret;
1229 
1230 	if (!capable(CAP_NET_ADMIN))
1231 		return -EPERM;
1232 
1233 	switch (cmd) {
1234 	case IPT_SO_SET_REPLACE:
1235 		ret = do_replace(user, len);
1236 		break;
1237 
1238 	case IPT_SO_SET_ADD_COUNTERS:
1239 		ret = do_add_counters(user, len);
1240 		break;
1241 
1242 	default:
1243 		duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1244 		ret = -EINVAL;
1245 	}
1246 
1247 	return ret;
1248 }
1249 
1250 static int
do_ipt_get_ctl(struct sock * sk,int cmd,void * user,int * len)1251 do_ipt_get_ctl(struct sock *sk, int cmd, void *user, int *len)
1252 {
1253 	int ret;
1254 
1255 	if (!capable(CAP_NET_ADMIN))
1256 		return -EPERM;
1257 
1258 	switch (cmd) {
1259 	case IPT_SO_GET_INFO: {
1260 		char name[IPT_TABLE_MAXNAMELEN];
1261 		struct ipt_table *t;
1262 
1263 		if (*len != sizeof(struct ipt_getinfo)) {
1264 			duprintf("length %u != %u\n", *len,
1265 				 sizeof(struct ipt_getinfo));
1266 			ret = -EINVAL;
1267 			break;
1268 		}
1269 
1270 		if (copy_from_user(name, user, sizeof(name)) != 0) {
1271 			ret = -EFAULT;
1272 			break;
1273 		}
1274 		name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1275 		t = ipt_find_table_lock(name, &ret, &ipt_mutex);
1276 		if (t) {
1277 			struct ipt_getinfo info;
1278 
1279 			info.valid_hooks = t->valid_hooks;
1280 			memcpy(info.hook_entry, t->private->hook_entry,
1281 			       sizeof(info.hook_entry));
1282 			memcpy(info.underflow, t->private->underflow,
1283 			       sizeof(info.underflow));
1284 			info.num_entries = t->private->number;
1285 			info.size = t->private->size;
1286 			memcpy(info.name, name, sizeof(info.name));
1287 
1288 			if (copy_to_user(user, &info, *len) != 0)
1289 				ret = -EFAULT;
1290 			else
1291 				ret = 0;
1292 
1293 			up(&ipt_mutex);
1294 		}
1295 	}
1296 	break;
1297 
1298 	case IPT_SO_GET_ENTRIES: {
1299 		struct ipt_get_entries get;
1300 
1301 		if (*len < sizeof(get)) {
1302 			duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1303 			ret = -EINVAL;
1304 		} else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1305 			ret = -EFAULT;
1306 		} else if (*len != sizeof(struct ipt_get_entries) + get.size) {
1307 			duprintf("get_entries: %u != %u\n", *len,
1308 				 sizeof(struct ipt_get_entries) + get.size);
1309 			ret = -EINVAL;
1310 		} else
1311 			ret = get_entries(&get, user);
1312 		break;
1313 	}
1314 
1315 	default:
1316 		duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
1317 		ret = -EINVAL;
1318 	}
1319 
1320 	return ret;
1321 }
1322 
1323 /* Registration hooks for targets. */
1324 int
ipt_register_target(struct ipt_target * target)1325 ipt_register_target(struct ipt_target *target)
1326 {
1327 	int ret;
1328 
1329 	MOD_INC_USE_COUNT;
1330 	ret = down_interruptible(&ipt_mutex);
1331 	if (ret != 0) {
1332 		MOD_DEC_USE_COUNT;
1333 		return ret;
1334 	}
1335 	if (!list_named_insert(&ipt_target, target)) {
1336 		duprintf("ipt_register_target: `%s' already in list!\n",
1337 			 target->name);
1338 		ret = -EINVAL;
1339 		MOD_DEC_USE_COUNT;
1340 	}
1341 	up(&ipt_mutex);
1342 	return ret;
1343 }
1344 
1345 void
ipt_unregister_target(struct ipt_target * target)1346 ipt_unregister_target(struct ipt_target *target)
1347 {
1348 	down(&ipt_mutex);
1349 	LIST_DELETE(&ipt_target, target);
1350 	up(&ipt_mutex);
1351 	MOD_DEC_USE_COUNT;
1352 }
1353 
1354 int
ipt_register_match(struct ipt_match * match)1355 ipt_register_match(struct ipt_match *match)
1356 {
1357 	int ret;
1358 
1359 	MOD_INC_USE_COUNT;
1360 	ret = down_interruptible(&ipt_mutex);
1361 	if (ret != 0) {
1362 		MOD_DEC_USE_COUNT;
1363 		return ret;
1364 	}
1365 	if (!list_named_insert(&ipt_match, match)) {
1366 		duprintf("ipt_register_match: `%s' already in list!\n",
1367 			 match->name);
1368 		MOD_DEC_USE_COUNT;
1369 		ret = -EINVAL;
1370 	}
1371 	up(&ipt_mutex);
1372 
1373 	return ret;
1374 }
1375 
1376 void
ipt_unregister_match(struct ipt_match * match)1377 ipt_unregister_match(struct ipt_match *match)
1378 {
1379 	down(&ipt_mutex);
1380 	LIST_DELETE(&ipt_match, match);
1381 	up(&ipt_mutex);
1382 	MOD_DEC_USE_COUNT;
1383 }
1384 
ipt_register_table(struct ipt_table * table)1385 int ipt_register_table(struct ipt_table *table)
1386 {
1387 	int ret;
1388 	struct ipt_table_info *newinfo;
1389 	static struct ipt_table_info bootstrap
1390 		= { 0, 0, 0, { 0 }, { 0 }, { } };
1391 
1392 	MOD_INC_USE_COUNT;
1393 	newinfo = vmalloc(sizeof(struct ipt_table_info)
1394 			  + SMP_ALIGN(table->table->size) * smp_num_cpus);
1395 	if (!newinfo) {
1396 		ret = -ENOMEM;
1397 		MOD_DEC_USE_COUNT;
1398 		return ret;
1399 	}
1400 	memcpy(newinfo->entries, table->table->entries, table->table->size);
1401 
1402 	ret = translate_table(table->name, table->valid_hooks,
1403 			      newinfo, table->table->size,
1404 			      table->table->num_entries,
1405 			      table->table->hook_entry,
1406 			      table->table->underflow);
1407 	if (ret != 0) {
1408 		vfree(newinfo);
1409 		MOD_DEC_USE_COUNT;
1410 		return ret;
1411 	}
1412 
1413 	ret = down_interruptible(&ipt_mutex);
1414 	if (ret != 0) {
1415 		vfree(newinfo);
1416 		MOD_DEC_USE_COUNT;
1417 		return ret;
1418 	}
1419 
1420 	/* Don't autoload: we'd eat our tail... */
1421 	if (list_named_find(&ipt_tables, table->name)) {
1422 		ret = -EEXIST;
1423 		goto free_unlock;
1424 	}
1425 
1426 	/* Simplifies replace_table code. */
1427 	table->private = &bootstrap;
1428 	if (!replace_table(table, 0, newinfo, &ret))
1429 		goto free_unlock;
1430 
1431 	duprintf("table->private->number = %u\n",
1432 		 table->private->number);
1433 
1434 	/* save number of initial entries */
1435 	table->private->initial_entries = table->private->number;
1436 
1437 	table->lock = RW_LOCK_UNLOCKED;
1438 	list_prepend(&ipt_tables, table);
1439 
1440  unlock:
1441 	up(&ipt_mutex);
1442 	return ret;
1443 
1444  free_unlock:
1445 	vfree(newinfo);
1446 	MOD_DEC_USE_COUNT;
1447 	goto unlock;
1448 }
1449 
ipt_unregister_table(struct ipt_table * table)1450 void ipt_unregister_table(struct ipt_table *table)
1451 {
1452 	down(&ipt_mutex);
1453 	LIST_DELETE(&ipt_tables, table);
1454 	up(&ipt_mutex);
1455 
1456 	/* Decrease module usage counts and free resources */
1457 	IPT_ENTRY_ITERATE(table->private->entries, table->private->size,
1458 			  cleanup_entry, NULL);
1459 	vfree(table->private);
1460 	MOD_DEC_USE_COUNT;
1461 }
1462 
1463 /* Returns 1 if the port is matched by the range, 0 otherwise */
1464 static inline int
port_match(u_int16_t min,u_int16_t max,u_int16_t port,int invert)1465 port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
1466 {
1467 	int ret;
1468 
1469 	ret = (port >= min && port <= max) ^ invert;
1470 	return ret;
1471 }
1472 
1473 static int
tcp_find_option(u_int8_t option,const struct tcphdr * tcp,u_int16_t datalen,int invert,int * hotdrop)1474 tcp_find_option(u_int8_t option,
1475 		const struct tcphdr *tcp,
1476 		u_int16_t datalen,
1477 		int invert,
1478 		int *hotdrop)
1479 {
1480 	unsigned int i = sizeof(struct tcphdr);
1481 	const u_int8_t *opt = (u_int8_t *)tcp;
1482 
1483 	duprintf("tcp_match: finding option\n");
1484 	/* If we don't have the whole header, drop packet. */
1485 	if (tcp->doff * 4 < sizeof(struct tcphdr) ||
1486 	    tcp->doff * 4 > datalen) {
1487 		*hotdrop = 1;
1488 		return 0;
1489 	}
1490 
1491 	while (i < tcp->doff * 4) {
1492 		if (opt[i] == option) return !invert;
1493 		if (opt[i] < 2) i++;
1494 		else i += opt[i+1]?:1;
1495 	}
1496 
1497 	return invert;
1498 }
1499 
1500 static int
tcp_match(const struct sk_buff * skb,const struct net_device * in,const struct net_device * out,const void * matchinfo,int offset,const void * hdr,u_int16_t datalen,int * hotdrop)1501 tcp_match(const struct sk_buff *skb,
1502 	  const struct net_device *in,
1503 	  const struct net_device *out,
1504 	  const void *matchinfo,
1505 	  int offset,
1506 	  const void *hdr,
1507 	  u_int16_t datalen,
1508 	  int *hotdrop)
1509 {
1510 	const struct tcphdr *tcp = hdr;
1511 	const struct ipt_tcp *tcpinfo = matchinfo;
1512 
1513 	/* To quote Alan:
1514 
1515 	   Don't allow a fragment of TCP 8 bytes in. Nobody normal
1516 	   causes this. Its a cracker trying to break in by doing a
1517 	   flag overwrite to pass the direction checks.
1518 	*/
1519 
1520 	if (offset == 1) {
1521 		duprintf("Dropping evil TCP offset=1 frag.\n");
1522 		*hotdrop = 1;
1523 		return 0;
1524 	} else if (offset == 0 && datalen < sizeof(struct tcphdr)) {
1525 		/* We've been asked to examine this packet, and we
1526 		   can't.  Hence, no choice but to drop. */
1527 		duprintf("Dropping evil TCP offset=0 tinygram.\n");
1528 		*hotdrop = 1;
1529 		return 0;
1530 	}
1531 
1532 	/* FIXME: Try tcp doff >> packet len against various stacks --RR */
1533 
1534 #define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
1535 
1536 	/* Must not be a fragment. */
1537 	return !offset
1538 		&& port_match(tcpinfo->spts[0], tcpinfo->spts[1],
1539 			      ntohs(tcp->source),
1540 			      !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT))
1541 		&& port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
1542 			      ntohs(tcp->dest),
1543 			      !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT))
1544 		&& FWINVTCP((((unsigned char *)tcp)[13]
1545 			     & tcpinfo->flg_mask)
1546 			    == tcpinfo->flg_cmp,
1547 			    IPT_TCP_INV_FLAGS)
1548 		&& (!tcpinfo->option
1549 		    || tcp_find_option(tcpinfo->option, tcp, datalen,
1550 				       tcpinfo->invflags
1551 				       & IPT_TCP_INV_OPTION,
1552 				       hotdrop));
1553 }
1554 
1555 /* Called when user tries to insert an entry of this type. */
1556 static int
tcp_checkentry(const char * tablename,const struct ipt_ip * ip,void * matchinfo,unsigned int matchsize,unsigned int hook_mask)1557 tcp_checkentry(const char *tablename,
1558 	       const struct ipt_ip *ip,
1559 	       void *matchinfo,
1560 	       unsigned int matchsize,
1561 	       unsigned int hook_mask)
1562 {
1563 	const struct ipt_tcp *tcpinfo = matchinfo;
1564 
1565 	/* Must specify proto == TCP, and no unknown invflags */
1566 	return ip->proto == IPPROTO_TCP
1567 		&& !(ip->invflags & IPT_INV_PROTO)
1568 		&& matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
1569 		&& !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
1570 }
1571 
1572 static int
udp_match(const struct sk_buff * skb,const struct net_device * in,const struct net_device * out,const void * matchinfo,int offset,const void * hdr,u_int16_t datalen,int * hotdrop)1573 udp_match(const struct sk_buff *skb,
1574 	  const struct net_device *in,
1575 	  const struct net_device *out,
1576 	  const void *matchinfo,
1577 	  int offset,
1578 	  const void *hdr,
1579 	  u_int16_t datalen,
1580 	  int *hotdrop)
1581 {
1582 	const struct udphdr *udp = hdr;
1583 	const struct ipt_udp *udpinfo = matchinfo;
1584 
1585 	if (offset == 0 && datalen < sizeof(struct udphdr)) {
1586 		/* We've been asked to examine this packet, and we
1587 		   can't.  Hence, no choice but to drop. */
1588 		duprintf("Dropping evil UDP tinygram.\n");
1589 		*hotdrop = 1;
1590 		return 0;
1591 	}
1592 
1593 	/* Must not be a fragment. */
1594 	return !offset
1595 		&& port_match(udpinfo->spts[0], udpinfo->spts[1],
1596 			      ntohs(udp->source),
1597 			      !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
1598 		&& port_match(udpinfo->dpts[0], udpinfo->dpts[1],
1599 			      ntohs(udp->dest),
1600 			      !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
1601 }
1602 
1603 /* Called when user tries to insert an entry of this type. */
1604 static int
udp_checkentry(const char * tablename,const struct ipt_ip * ip,void * matchinfo,unsigned int matchinfosize,unsigned int hook_mask)1605 udp_checkentry(const char *tablename,
1606 	       const struct ipt_ip *ip,
1607 	       void *matchinfo,
1608 	       unsigned int matchinfosize,
1609 	       unsigned int hook_mask)
1610 {
1611 	const struct ipt_udp *udpinfo = matchinfo;
1612 
1613 	/* Must specify proto == UDP, and no unknown invflags */
1614 	if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
1615 		duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
1616 			 IPPROTO_UDP);
1617 		return 0;
1618 	}
1619 	if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
1620 		duprintf("ipt_udp: matchsize %u != %u\n",
1621 			 matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
1622 		return 0;
1623 	}
1624 	if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
1625 		duprintf("ipt_udp: unknown flags %X\n",
1626 			 udpinfo->invflags);
1627 		return 0;
1628 	}
1629 
1630 	return 1;
1631 }
1632 
1633 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1634 static inline int
icmp_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,int invert)1635 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1636 		     u_int8_t type, u_int8_t code,
1637 		     int invert)
1638 {
1639 	return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
1640 		^ invert;
1641 }
1642 
1643 static int
icmp_match(const struct sk_buff * skb,const struct net_device * in,const struct net_device * out,const void * matchinfo,int offset,const void * hdr,u_int16_t datalen,int * hotdrop)1644 icmp_match(const struct sk_buff *skb,
1645 	   const struct net_device *in,
1646 	   const struct net_device *out,
1647 	   const void *matchinfo,
1648 	   int offset,
1649 	   const void *hdr,
1650 	   u_int16_t datalen,
1651 	   int *hotdrop)
1652 {
1653 	const struct icmphdr *icmp = hdr;
1654 	const struct ipt_icmp *icmpinfo = matchinfo;
1655 
1656 	if (offset == 0 && datalen < 2) {
1657 		/* We've been asked to examine this packet, and we
1658 		   can't.  Hence, no choice but to drop. */
1659 		duprintf("Dropping evil ICMP tinygram.\n");
1660 		*hotdrop = 1;
1661 		return 0;
1662 	}
1663 
1664 	/* Must not be a fragment. */
1665 	return !offset
1666 		&& icmp_type_code_match(icmpinfo->type,
1667 					icmpinfo->code[0],
1668 					icmpinfo->code[1],
1669 					icmp->type, icmp->code,
1670 					!!(icmpinfo->invflags&IPT_ICMP_INV));
1671 }
1672 
1673 /* Called when user tries to insert an entry of this type. */
1674 static int
icmp_checkentry(const char * tablename,const struct ipt_ip * ip,void * matchinfo,unsigned int matchsize,unsigned int hook_mask)1675 icmp_checkentry(const char *tablename,
1676 	   const struct ipt_ip *ip,
1677 	   void *matchinfo,
1678 	   unsigned int matchsize,
1679 	   unsigned int hook_mask)
1680 {
1681 	const struct ipt_icmp *icmpinfo = matchinfo;
1682 
1683 	/* Must specify proto == ICMP, and no unknown invflags */
1684 	return ip->proto == IPPROTO_ICMP
1685 		&& !(ip->invflags & IPT_INV_PROTO)
1686 		&& matchsize == IPT_ALIGN(sizeof(struct ipt_icmp))
1687 		&& !(icmpinfo->invflags & ~IPT_ICMP_INV);
1688 }
1689 
1690 /* The built-in targets: standard (NULL) and error. */
1691 static struct ipt_target ipt_standard_target
1692 = { { NULL, NULL }, IPT_STANDARD_TARGET, NULL, NULL, NULL };
1693 static struct ipt_target ipt_error_target
1694 = { { NULL, NULL }, IPT_ERROR_TARGET, ipt_error, NULL, NULL };
1695 
1696 static struct nf_sockopt_ops ipt_sockopts
1697 = { { NULL, NULL }, PF_INET, IPT_BASE_CTL, IPT_SO_SET_MAX+1, do_ipt_set_ctl,
1698     IPT_BASE_CTL, IPT_SO_GET_MAX+1, do_ipt_get_ctl, 0, NULL  };
1699 
1700 static struct ipt_match tcp_matchstruct
1701 = { { NULL, NULL }, "tcp", &tcp_match, &tcp_checkentry, NULL };
1702 static struct ipt_match udp_matchstruct
1703 = { { NULL, NULL }, "udp", &udp_match, &udp_checkentry, NULL };
1704 static struct ipt_match icmp_matchstruct
1705 = { { NULL, NULL }, "icmp", &icmp_match, &icmp_checkentry, NULL };
1706 
1707 #ifdef CONFIG_PROC_FS
print_name(const char * i,off_t start_offset,char * buffer,int length,off_t * pos,unsigned int * count)1708 static inline int print_name(const char *i,
1709 			     off_t start_offset, char *buffer, int length,
1710 			     off_t *pos, unsigned int *count)
1711 {
1712 	if ((*count)++ >= start_offset) {
1713 		unsigned int namelen;
1714 
1715 		namelen = sprintf(buffer + *pos, "%s\n",
1716 				  i + sizeof(struct list_head));
1717 		if (*pos + namelen > length) {
1718 			/* Stop iterating */
1719 			return 1;
1720 		}
1721 		*pos += namelen;
1722 	}
1723 	return 0;
1724 }
1725 
print_target(const struct ipt_target * t,off_t start_offset,char * buffer,int length,off_t * pos,unsigned int * count)1726 static inline int print_target(const struct ipt_target *t,
1727                                off_t start_offset, char *buffer, int length,
1728                                off_t *pos, unsigned int *count)
1729 {
1730 	if (t == &ipt_standard_target || t == &ipt_error_target)
1731 		return 0;
1732 	return print_name((char *)t, start_offset, buffer, length, pos, count);
1733 }
1734 
ipt_get_tables(char * buffer,char ** start,off_t offset,int length)1735 static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
1736 {
1737 	off_t pos = 0;
1738 	unsigned int count = 0;
1739 
1740 	if (down_interruptible(&ipt_mutex) != 0)
1741 		return 0;
1742 
1743 	LIST_FIND(&ipt_tables, print_name, void *,
1744 		  offset, buffer, length, &pos, &count);
1745 
1746 	up(&ipt_mutex);
1747 
1748 	/* `start' hack - see fs/proc/generic.c line ~105 */
1749 	*start=(char *)((unsigned long)count-offset);
1750 	return pos;
1751 }
1752 
ipt_get_targets(char * buffer,char ** start,off_t offset,int length)1753 static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
1754 {
1755 	off_t pos = 0;
1756 	unsigned int count = 0;
1757 
1758 	if (down_interruptible(&ipt_mutex) != 0)
1759 		return 0;
1760 
1761 	LIST_FIND(&ipt_target, print_target, struct ipt_target *,
1762 		  offset, buffer, length, &pos, &count);
1763 
1764 	up(&ipt_mutex);
1765 
1766 	*start = (char *)((unsigned long)count - offset);
1767 	return pos;
1768 }
1769 
ipt_get_matches(char * buffer,char ** start,off_t offset,int length)1770 static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
1771 {
1772 	off_t pos = 0;
1773 	unsigned int count = 0;
1774 
1775 	if (down_interruptible(&ipt_mutex) != 0)
1776 		return 0;
1777 
1778 	LIST_FIND(&ipt_match, print_name, void *,
1779 		  offset, buffer, length, &pos, &count);
1780 
1781 	up(&ipt_mutex);
1782 
1783 	*start = (char *)((unsigned long)count - offset);
1784 	return pos;
1785 }
1786 
1787 static struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
1788 { { "ip_tables_names", ipt_get_tables },
1789   { "ip_tables_targets", ipt_get_targets },
1790   { "ip_tables_matches", ipt_get_matches },
1791   { NULL, NULL} };
1792 #endif /*CONFIG_PROC_FS*/
1793 
init(void)1794 static int __init init(void)
1795 {
1796 	int ret;
1797 
1798 	/* Noone else will be downing sem now, so we won't sleep */
1799 	down(&ipt_mutex);
1800 	list_append(&ipt_target, &ipt_standard_target);
1801 	list_append(&ipt_target, &ipt_error_target);
1802 	list_append(&ipt_match, &tcp_matchstruct);
1803 	list_append(&ipt_match, &udp_matchstruct);
1804 	list_append(&ipt_match, &icmp_matchstruct);
1805 	up(&ipt_mutex);
1806 
1807 	/* Register setsockopt */
1808 	ret = nf_register_sockopt(&ipt_sockopts);
1809 	if (ret < 0) {
1810 		duprintf("Unable to register sockopts.\n");
1811 		return ret;
1812 	}
1813 
1814 #ifdef CONFIG_PROC_FS
1815 	{
1816 	struct proc_dir_entry *proc;
1817 	int i;
1818 
1819 	for (i = 0; ipt_proc_entry[i].name; i++) {
1820 		proc = proc_net_create(ipt_proc_entry[i].name, 0,
1821 				       ipt_proc_entry[i].get_info);
1822 		if (!proc) {
1823 			while (--i >= 0)
1824 				proc_net_remove(ipt_proc_entry[i].name);
1825 			nf_unregister_sockopt(&ipt_sockopts);
1826 			return -ENOMEM;
1827 		}
1828 		proc->owner = THIS_MODULE;
1829 	}
1830 	}
1831 #endif
1832 
1833 	printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
1834 	return 0;
1835 }
1836 
fini(void)1837 static void __exit fini(void)
1838 {
1839 	nf_unregister_sockopt(&ipt_sockopts);
1840 #ifdef CONFIG_PROC_FS
1841 	{
1842 	int i;
1843 	for (i = 0; ipt_proc_entry[i].name; i++)
1844 		proc_net_remove(ipt_proc_entry[i].name);
1845 	}
1846 #endif
1847 }
1848 
1849 EXPORT_SYMBOL(ipt_register_table);
1850 EXPORT_SYMBOL(ipt_unregister_table);
1851 EXPORT_SYMBOL(ipt_register_match);
1852 EXPORT_SYMBOL(ipt_unregister_match);
1853 EXPORT_SYMBOL(ipt_do_table);
1854 EXPORT_SYMBOL(ipt_register_target);
1855 EXPORT_SYMBOL(ipt_unregister_target);
1856 EXPORT_SYMBOL(ipt_find_target_lock);
1857 
1858 module_init(init);
1859 module_exit(fini);
1860 MODULE_LICENSE("GPL");
1861