1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Packet matching code.
4 *
5 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
6 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/cache.h>
11 #include <linux/capability.h>
12 #include <linux/skbuff.h>
13 #include <linux/kmod.h>
14 #include <linux/vmalloc.h>
15 #include <linux/netdevice.h>
16 #include <linux/module.h>
17 #include <linux/icmp.h>
18 #include <net/ip.h>
19 #include <net/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mutex.h>
22 #include <linux/proc_fs.h>
23 #include <linux/err.h>
24 #include <linux/cpumask.h>
25
26 #include <linux/netfilter/x_tables.h>
27 #include <linux/netfilter_ipv4/ip_tables.h>
28 #include <net/netfilter/nf_log.h>
29 #include "../../netfilter/xt_repldata.h"
30
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
34 MODULE_ALIAS("ipt_icmp");
35
ipt_alloc_initial_table(const struct xt_table * info)36 void *ipt_alloc_initial_table(const struct xt_table *info)
37 {
38 return xt_alloc_initial_table(ipt, IPT);
39 }
40 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
41
42 /* Returns whether matches rule or not. */
43 /* Performance critical - called for every packet */
44 static inline bool
ip_packet_match(const struct iphdr * ip,const char * indev,const char * outdev,const struct ipt_ip * ipinfo,int isfrag)45 ip_packet_match(const struct iphdr *ip,
46 const char *indev,
47 const char *outdev,
48 const struct ipt_ip *ipinfo,
49 int isfrag)
50 {
51 unsigned long ret;
52
53 if (NF_INVF(ipinfo, IPT_INV_SRCIP,
54 (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
55 NF_INVF(ipinfo, IPT_INV_DSTIP,
56 (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
57 return false;
58
59 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
60
61 if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
62 return false;
63
64 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
65
66 if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
67 return false;
68
69 /* Check specific protocol */
70 if (ipinfo->proto &&
71 NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
72 return false;
73
74 /* If we have a fragment rule but the packet is not a fragment
75 * then we return zero */
76 if (NF_INVF(ipinfo, IPT_INV_FRAG,
77 (ipinfo->flags & IPT_F_FRAG) && !isfrag))
78 return false;
79
80 return true;
81 }
82
83 static bool
ip_checkentry(const struct ipt_ip * ip)84 ip_checkentry(const struct ipt_ip *ip)
85 {
86 if (ip->flags & ~IPT_F_MASK)
87 return false;
88 if (ip->invflags & ~IPT_INV_MASK)
89 return false;
90 return true;
91 }
92
93 static unsigned int
ipt_error(struct sk_buff * skb,const struct xt_action_param * par)94 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
95 {
96 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
97
98 return NF_DROP;
99 }
100
101 /* Performance critical */
102 static inline struct ipt_entry *
get_entry(const void * base,unsigned int offset)103 get_entry(const void *base, unsigned int offset)
104 {
105 return (struct ipt_entry *)(base + offset);
106 }
107
108 /* All zeroes == unconditional rule. */
109 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ipt_entry * e)110 static inline bool unconditional(const struct ipt_entry *e)
111 {
112 static const struct ipt_ip uncond;
113
114 return e->target_offset == sizeof(struct ipt_entry) &&
115 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
116 }
117
118 /* for const-correctness */
119 static inline const struct xt_entry_target *
ipt_get_target_c(const struct ipt_entry * e)120 ipt_get_target_c(const struct ipt_entry *e)
121 {
122 return ipt_get_target((struct ipt_entry *)e);
123 }
124
125 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
126 static const char *const hooknames[] = {
127 [NF_INET_PRE_ROUTING] = "PREROUTING",
128 [NF_INET_LOCAL_IN] = "INPUT",
129 [NF_INET_FORWARD] = "FORWARD",
130 [NF_INET_LOCAL_OUT] = "OUTPUT",
131 [NF_INET_POST_ROUTING] = "POSTROUTING",
132 };
133
134 enum nf_ip_trace_comments {
135 NF_IP_TRACE_COMMENT_RULE,
136 NF_IP_TRACE_COMMENT_RETURN,
137 NF_IP_TRACE_COMMENT_POLICY,
138 };
139
140 static const char *const comments[] = {
141 [NF_IP_TRACE_COMMENT_RULE] = "rule",
142 [NF_IP_TRACE_COMMENT_RETURN] = "return",
143 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
144 };
145
146 static const struct nf_loginfo trace_loginfo = {
147 .type = NF_LOG_TYPE_LOG,
148 .u = {
149 .log = {
150 .level = 4,
151 .logflags = NF_LOG_DEFAULT_MASK,
152 },
153 },
154 };
155
156 /* Mildly perf critical (only if packet tracing is on) */
157 static inline int
get_chainname_rulenum(const struct ipt_entry * s,const struct ipt_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)158 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
159 const char *hookname, const char **chainname,
160 const char **comment, unsigned int *rulenum)
161 {
162 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
163
164 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
165 /* Head of user chain: ERROR target with chainname */
166 *chainname = t->target.data;
167 (*rulenum) = 0;
168 } else if (s == e) {
169 (*rulenum)++;
170
171 if (unconditional(s) &&
172 strcmp(t->target.u.kernel.target->name,
173 XT_STANDARD_TARGET) == 0 &&
174 t->verdict < 0) {
175 /* Tail of chains: STANDARD target (return/policy) */
176 *comment = *chainname == hookname
177 ? comments[NF_IP_TRACE_COMMENT_POLICY]
178 : comments[NF_IP_TRACE_COMMENT_RETURN];
179 }
180 return 1;
181 } else
182 (*rulenum)++;
183
184 return 0;
185 }
186
trace_packet(struct net * net,const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ipt_entry * e)187 static void trace_packet(struct net *net,
188 const struct sk_buff *skb,
189 unsigned int hook,
190 const struct net_device *in,
191 const struct net_device *out,
192 const char *tablename,
193 const struct xt_table_info *private,
194 const struct ipt_entry *e)
195 {
196 const struct ipt_entry *root;
197 const char *hookname, *chainname, *comment;
198 const struct ipt_entry *iter;
199 unsigned int rulenum = 0;
200
201 root = get_entry(private->entries, private->hook_entry[hook]);
202
203 hookname = chainname = hooknames[hook];
204 comment = comments[NF_IP_TRACE_COMMENT_RULE];
205
206 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
207 if (get_chainname_rulenum(iter, e, hookname,
208 &chainname, &comment, &rulenum) != 0)
209 break;
210
211 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
212 "TRACE: %s:%s:%s:%u ",
213 tablename, chainname, comment, rulenum);
214 }
215 #endif
216
217 static inline
ipt_next_entry(const struct ipt_entry * entry)218 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
219 {
220 return (void *)entry + entry->next_offset;
221 }
222
223 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
224 unsigned int
ipt_do_table(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)225 ipt_do_table(void *priv,
226 struct sk_buff *skb,
227 const struct nf_hook_state *state)
228 {
229 const struct xt_table *table = priv;
230 unsigned int hook = state->hook;
231 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
232 const struct iphdr *ip;
233 /* Initializing verdict to NF_DROP keeps gcc happy. */
234 unsigned int verdict = NF_DROP;
235 const char *indev, *outdev;
236 const void *table_base;
237 struct ipt_entry *e, **jumpstack;
238 unsigned int stackidx, cpu;
239 const struct xt_table_info *private;
240 struct xt_action_param acpar;
241 unsigned int addend;
242
243 /* Initialization */
244 stackidx = 0;
245 ip = ip_hdr(skb);
246 indev = state->in ? state->in->name : nulldevname;
247 outdev = state->out ? state->out->name : nulldevname;
248 /* We handle fragments by dealing with the first fragment as
249 * if it was a normal packet. All other fragments are treated
250 * normally, except that they will NEVER match rules that ask
251 * things we don't know, ie. tcp syn flag or ports). If the
252 * rule is also a fragment-specific rule, non-fragments won't
253 * match it. */
254 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
255 acpar.thoff = ip_hdrlen(skb);
256 acpar.hotdrop = false;
257 acpar.state = state;
258
259 WARN_ON(!(table->valid_hooks & (1 << hook)));
260 local_bh_disable();
261 addend = xt_write_recseq_begin();
262 private = READ_ONCE(table->private); /* Address dependency. */
263 cpu = smp_processor_id();
264 table_base = private->entries;
265 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
266
267 /* Switch to alternate jumpstack if we're being invoked via TEE.
268 * TEE issues XT_CONTINUE verdict on original skb so we must not
269 * clobber the jumpstack.
270 *
271 * For recursion via REJECT or SYNPROXY the stack will be clobbered
272 * but it is no problem since absolute verdict is issued by these.
273 */
274 if (static_key_false(&xt_tee_enabled))
275 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
276
277 e = get_entry(table_base, private->hook_entry[hook]);
278
279 do {
280 const struct xt_entry_target *t;
281 const struct xt_entry_match *ematch;
282 struct xt_counters *counter;
283
284 WARN_ON(!e);
285 if (!ip_packet_match(ip, indev, outdev,
286 &e->ip, acpar.fragoff)) {
287 no_match:
288 e = ipt_next_entry(e);
289 continue;
290 }
291
292 xt_ematch_foreach(ematch, e) {
293 acpar.match = ematch->u.kernel.match;
294 acpar.matchinfo = ematch->data;
295 if (!acpar.match->match(skb, &acpar))
296 goto no_match;
297 }
298
299 counter = xt_get_this_cpu_counter(&e->counters);
300 ADD_COUNTER(*counter, skb->len, 1);
301
302 t = ipt_get_target_c(e);
303 WARN_ON(!t->u.kernel.target);
304
305 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
306 /* The packet is traced: log it */
307 if (unlikely(skb->nf_trace))
308 trace_packet(state->net, skb, hook, state->in,
309 state->out, table->name, private, e);
310 #endif
311 /* Standard target? */
312 if (!t->u.kernel.target->target) {
313 int v;
314
315 v = ((struct xt_standard_target *)t)->verdict;
316 if (v < 0) {
317 /* Pop from stack? */
318 if (v != XT_RETURN) {
319 verdict = (unsigned int)(-v) - 1;
320 break;
321 }
322 if (stackidx == 0) {
323 e = get_entry(table_base,
324 private->underflow[hook]);
325 } else {
326 e = jumpstack[--stackidx];
327 e = ipt_next_entry(e);
328 }
329 continue;
330 }
331 if (table_base + v != ipt_next_entry(e) &&
332 !(e->ip.flags & IPT_F_GOTO)) {
333 if (unlikely(stackidx >= private->stacksize)) {
334 verdict = NF_DROP;
335 break;
336 }
337 jumpstack[stackidx++] = e;
338 }
339
340 e = get_entry(table_base, v);
341 continue;
342 }
343
344 acpar.target = t->u.kernel.target;
345 acpar.targinfo = t->data;
346
347 verdict = t->u.kernel.target->target(skb, &acpar);
348 if (verdict == XT_CONTINUE) {
349 /* Target might have changed stuff. */
350 ip = ip_hdr(skb);
351 e = ipt_next_entry(e);
352 } else {
353 /* Verdict */
354 break;
355 }
356 } while (!acpar.hotdrop);
357
358 xt_write_recseq_end(addend);
359 local_bh_enable();
360
361 if (acpar.hotdrop)
362 return NF_DROP;
363 else return verdict;
364 }
365
366 /* Figures out from what hook each rule can be called: returns 0 if
367 there are loops. Puts hook bitmask in comefrom. */
368 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0,unsigned int * offsets)369 mark_source_chains(const struct xt_table_info *newinfo,
370 unsigned int valid_hooks, void *entry0,
371 unsigned int *offsets)
372 {
373 unsigned int hook;
374
375 /* No recursion; use packet counter to save back ptrs (reset
376 to 0 as we leave), and comefrom to save source hook bitmask */
377 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
378 unsigned int pos = newinfo->hook_entry[hook];
379 struct ipt_entry *e = entry0 + pos;
380
381 if (!(valid_hooks & (1 << hook)))
382 continue;
383
384 /* Set initial back pointer. */
385 e->counters.pcnt = pos;
386
387 for (;;) {
388 const struct xt_standard_target *t
389 = (void *)ipt_get_target_c(e);
390 int visited = e->comefrom & (1 << hook);
391
392 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
393 return 0;
394
395 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
396
397 /* Unconditional return/END. */
398 if ((unconditional(e) &&
399 (strcmp(t->target.u.user.name,
400 XT_STANDARD_TARGET) == 0) &&
401 t->verdict < 0) || visited) {
402 unsigned int oldpos, size;
403
404 /* Return: backtrack through the last
405 big jump. */
406 do {
407 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
408 oldpos = pos;
409 pos = e->counters.pcnt;
410 e->counters.pcnt = 0;
411
412 /* We're at the start. */
413 if (pos == oldpos)
414 goto next;
415
416 e = entry0 + pos;
417 } while (oldpos == pos + e->next_offset);
418
419 /* Move along one */
420 size = e->next_offset;
421 e = entry0 + pos + size;
422 if (pos + size >= newinfo->size)
423 return 0;
424 e->counters.pcnt = pos;
425 pos += size;
426 } else {
427 int newpos = t->verdict;
428
429 if (strcmp(t->target.u.user.name,
430 XT_STANDARD_TARGET) == 0 &&
431 newpos >= 0) {
432 /* This a jump; chase it. */
433 if (!xt_find_jump_offset(offsets, newpos,
434 newinfo->number))
435 return 0;
436 } else {
437 /* ... this is a fallthru */
438 newpos = pos + e->next_offset;
439 if (newpos >= newinfo->size)
440 return 0;
441 }
442 e = entry0 + newpos;
443 e->counters.pcnt = pos;
444 pos = newpos;
445 }
446 }
447 next: ;
448 }
449 return 1;
450 }
451
cleanup_match(struct xt_entry_match * m,struct net * net)452 static void cleanup_match(struct xt_entry_match *m, struct net *net)
453 {
454 struct xt_mtdtor_param par;
455
456 par.net = net;
457 par.match = m->u.kernel.match;
458 par.matchinfo = m->data;
459 par.family = NFPROTO_IPV4;
460 if (par.match->destroy != NULL)
461 par.match->destroy(&par);
462 module_put(par.match->me);
463 }
464
465 static int
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)466 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
467 {
468 const struct ipt_ip *ip = par->entryinfo;
469
470 par->match = m->u.kernel.match;
471 par->matchinfo = m->data;
472
473 return xt_check_match(par, m->u.match_size - sizeof(*m),
474 ip->proto, ip->invflags & IPT_INV_PROTO);
475 }
476
477 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)478 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
479 {
480 struct xt_match *match;
481 int ret;
482
483 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
484 m->u.user.revision);
485 if (IS_ERR(match))
486 return PTR_ERR(match);
487 m->u.kernel.match = match;
488
489 ret = check_match(m, par);
490 if (ret)
491 goto err;
492
493 return 0;
494 err:
495 module_put(m->u.kernel.match->me);
496 return ret;
497 }
498
check_target(struct ipt_entry * e,struct net * net,const char * name)499 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
500 {
501 struct xt_entry_target *t = ipt_get_target(e);
502 struct xt_tgchk_param par = {
503 .net = net,
504 .table = name,
505 .entryinfo = e,
506 .target = t->u.kernel.target,
507 .targinfo = t->data,
508 .hook_mask = e->comefrom,
509 .family = NFPROTO_IPV4,
510 };
511
512 return xt_check_target(&par, t->u.target_size - sizeof(*t),
513 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
514 }
515
516 static int
find_check_entry(struct ipt_entry * e,struct net * net,const char * name,unsigned int size,struct xt_percpu_counter_alloc_state * alloc_state)517 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
518 unsigned int size,
519 struct xt_percpu_counter_alloc_state *alloc_state)
520 {
521 struct xt_entry_target *t;
522 struct xt_target *target;
523 int ret;
524 unsigned int j;
525 struct xt_mtchk_param mtpar;
526 struct xt_entry_match *ematch;
527
528 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
529 return -ENOMEM;
530
531 j = 0;
532 memset(&mtpar, 0, sizeof(mtpar));
533 mtpar.net = net;
534 mtpar.table = name;
535 mtpar.entryinfo = &e->ip;
536 mtpar.hook_mask = e->comefrom;
537 mtpar.family = NFPROTO_IPV4;
538 xt_ematch_foreach(ematch, e) {
539 ret = find_check_match(ematch, &mtpar);
540 if (ret != 0)
541 goto cleanup_matches;
542 ++j;
543 }
544
545 t = ipt_get_target(e);
546 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
547 t->u.user.revision);
548 if (IS_ERR(target)) {
549 ret = PTR_ERR(target);
550 goto cleanup_matches;
551 }
552 t->u.kernel.target = target;
553
554 ret = check_target(e, net, name);
555 if (ret)
556 goto err;
557
558 return 0;
559 err:
560 module_put(t->u.kernel.target->me);
561 cleanup_matches:
562 xt_ematch_foreach(ematch, e) {
563 if (j-- == 0)
564 break;
565 cleanup_match(ematch, net);
566 }
567
568 xt_percpu_counter_free(&e->counters);
569
570 return ret;
571 }
572
check_underflow(const struct ipt_entry * e)573 static bool check_underflow(const struct ipt_entry *e)
574 {
575 const struct xt_entry_target *t;
576 unsigned int verdict;
577
578 if (!unconditional(e))
579 return false;
580 t = ipt_get_target_c(e);
581 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
582 return false;
583 verdict = ((struct xt_standard_target *)t)->verdict;
584 verdict = -verdict - 1;
585 return verdict == NF_DROP || verdict == NF_ACCEPT;
586 }
587
588 static int
check_entry_size_and_hooks(struct ipt_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)589 check_entry_size_and_hooks(struct ipt_entry *e,
590 struct xt_table_info *newinfo,
591 const unsigned char *base,
592 const unsigned char *limit,
593 const unsigned int *hook_entries,
594 const unsigned int *underflows,
595 unsigned int valid_hooks)
596 {
597 unsigned int h;
598 int err;
599
600 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
601 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
602 (unsigned char *)e + e->next_offset > limit)
603 return -EINVAL;
604
605 if (e->next_offset
606 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
607 return -EINVAL;
608
609 if (!ip_checkentry(&e->ip))
610 return -EINVAL;
611
612 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
613 e->next_offset);
614 if (err)
615 return err;
616
617 /* Check hooks & underflows */
618 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
619 if (!(valid_hooks & (1 << h)))
620 continue;
621 if ((unsigned char *)e - base == hook_entries[h])
622 newinfo->hook_entry[h] = hook_entries[h];
623 if ((unsigned char *)e - base == underflows[h]) {
624 if (!check_underflow(e))
625 return -EINVAL;
626
627 newinfo->underflow[h] = underflows[h];
628 }
629 }
630
631 /* Clear counters and comefrom */
632 e->counters = ((struct xt_counters) { 0, 0 });
633 e->comefrom = 0;
634 return 0;
635 }
636
637 static void
cleanup_entry(struct ipt_entry * e,struct net * net)638 cleanup_entry(struct ipt_entry *e, struct net *net)
639 {
640 struct xt_tgdtor_param par;
641 struct xt_entry_target *t;
642 struct xt_entry_match *ematch;
643
644 /* Cleanup all matches */
645 xt_ematch_foreach(ematch, e)
646 cleanup_match(ematch, net);
647 t = ipt_get_target(e);
648
649 par.net = net;
650 par.target = t->u.kernel.target;
651 par.targinfo = t->data;
652 par.family = NFPROTO_IPV4;
653 if (par.target->destroy != NULL)
654 par.target->destroy(&par);
655 module_put(par.target->me);
656 xt_percpu_counter_free(&e->counters);
657 }
658
659 /* Checks and translates the user-supplied table segment (held in
660 newinfo) */
661 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ipt_replace * repl)662 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
663 const struct ipt_replace *repl)
664 {
665 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
666 struct ipt_entry *iter;
667 unsigned int *offsets;
668 unsigned int i;
669 int ret = 0;
670
671 newinfo->size = repl->size;
672 newinfo->number = repl->num_entries;
673
674 /* Init all hooks to impossible value. */
675 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
676 newinfo->hook_entry[i] = 0xFFFFFFFF;
677 newinfo->underflow[i] = 0xFFFFFFFF;
678 }
679
680 offsets = xt_alloc_entry_offsets(newinfo->number);
681 if (!offsets)
682 return -ENOMEM;
683 i = 0;
684 /* Walk through entries, checking offsets. */
685 xt_entry_foreach(iter, entry0, newinfo->size) {
686 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
687 entry0 + repl->size,
688 repl->hook_entry,
689 repl->underflow,
690 repl->valid_hooks);
691 if (ret != 0)
692 goto out_free;
693 if (i < repl->num_entries)
694 offsets[i] = (void *)iter - entry0;
695 ++i;
696 if (strcmp(ipt_get_target(iter)->u.user.name,
697 XT_ERROR_TARGET) == 0)
698 ++newinfo->stacksize;
699 }
700
701 ret = -EINVAL;
702 if (i != repl->num_entries)
703 goto out_free;
704
705 ret = xt_check_table_hooks(newinfo, repl->valid_hooks);
706 if (ret)
707 goto out_free;
708
709 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
710 ret = -ELOOP;
711 goto out_free;
712 }
713 kvfree(offsets);
714
715 /* Finally, each sanity check must pass */
716 i = 0;
717 xt_entry_foreach(iter, entry0, newinfo->size) {
718 ret = find_check_entry(iter, net, repl->name, repl->size,
719 &alloc_state);
720 if (ret != 0)
721 break;
722 ++i;
723 }
724
725 if (ret != 0) {
726 xt_entry_foreach(iter, entry0, newinfo->size) {
727 if (i-- == 0)
728 break;
729 cleanup_entry(iter, net);
730 }
731 return ret;
732 }
733
734 return ret;
735 out_free:
736 kvfree(offsets);
737 return ret;
738 }
739
740 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])741 get_counters(const struct xt_table_info *t,
742 struct xt_counters counters[])
743 {
744 struct ipt_entry *iter;
745 unsigned int cpu;
746 unsigned int i;
747
748 for_each_possible_cpu(cpu) {
749 seqcount_t *s = &per_cpu(xt_recseq, cpu);
750
751 i = 0;
752 xt_entry_foreach(iter, t->entries, t->size) {
753 struct xt_counters *tmp;
754 u64 bcnt, pcnt;
755 unsigned int start;
756
757 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
758 do {
759 start = read_seqcount_begin(s);
760 bcnt = tmp->bcnt;
761 pcnt = tmp->pcnt;
762 } while (read_seqcount_retry(s, start));
763
764 ADD_COUNTER(counters[i], bcnt, pcnt);
765 ++i; /* macro does multi eval of i */
766 cond_resched();
767 }
768 }
769 }
770
get_old_counters(const struct xt_table_info * t,struct xt_counters counters[])771 static void get_old_counters(const struct xt_table_info *t,
772 struct xt_counters counters[])
773 {
774 struct ipt_entry *iter;
775 unsigned int cpu, i;
776
777 for_each_possible_cpu(cpu) {
778 i = 0;
779 xt_entry_foreach(iter, t->entries, t->size) {
780 const struct xt_counters *tmp;
781
782 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
783 ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
784 ++i; /* macro does multi eval of i */
785 }
786
787 cond_resched();
788 }
789 }
790
alloc_counters(const struct xt_table * table)791 static struct xt_counters *alloc_counters(const struct xt_table *table)
792 {
793 unsigned int countersize;
794 struct xt_counters *counters;
795 const struct xt_table_info *private = table->private;
796
797 /* We need atomic snapshot of counters: rest doesn't change
798 (other than comefrom, which userspace doesn't care
799 about). */
800 countersize = sizeof(struct xt_counters) * private->number;
801 counters = vzalloc(countersize);
802
803 if (counters == NULL)
804 return ERR_PTR(-ENOMEM);
805
806 get_counters(private, counters);
807
808 return counters;
809 }
810
811 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)812 copy_entries_to_user(unsigned int total_size,
813 const struct xt_table *table,
814 void __user *userptr)
815 {
816 unsigned int off, num;
817 const struct ipt_entry *e;
818 struct xt_counters *counters;
819 const struct xt_table_info *private = table->private;
820 int ret = 0;
821 const void *loc_cpu_entry;
822
823 counters = alloc_counters(table);
824 if (IS_ERR(counters))
825 return PTR_ERR(counters);
826
827 loc_cpu_entry = private->entries;
828
829 /* FIXME: use iterator macros --RR */
830 /* ... then go back and fix counters and names */
831 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
832 unsigned int i;
833 const struct xt_entry_match *m;
834 const struct xt_entry_target *t;
835
836 e = loc_cpu_entry + off;
837 if (copy_to_user(userptr + off, e, sizeof(*e))) {
838 ret = -EFAULT;
839 goto free_counters;
840 }
841 if (copy_to_user(userptr + off
842 + offsetof(struct ipt_entry, counters),
843 &counters[num],
844 sizeof(counters[num])) != 0) {
845 ret = -EFAULT;
846 goto free_counters;
847 }
848
849 for (i = sizeof(struct ipt_entry);
850 i < e->target_offset;
851 i += m->u.match_size) {
852 m = (void *)e + i;
853
854 if (xt_match_to_user(m, userptr + off + i)) {
855 ret = -EFAULT;
856 goto free_counters;
857 }
858 }
859
860 t = ipt_get_target_c(e);
861 if (xt_target_to_user(t, userptr + off + e->target_offset)) {
862 ret = -EFAULT;
863 goto free_counters;
864 }
865 }
866
867 free_counters:
868 vfree(counters);
869 return ret;
870 }
871
872 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
compat_standard_from_user(void * dst,const void * src)873 static void compat_standard_from_user(void *dst, const void *src)
874 {
875 int v = *(compat_int_t *)src;
876
877 if (v > 0)
878 v += xt_compat_calc_jump(AF_INET, v);
879 memcpy(dst, &v, sizeof(v));
880 }
881
compat_standard_to_user(void __user * dst,const void * src)882 static int compat_standard_to_user(void __user *dst, const void *src)
883 {
884 compat_int_t cv = *(int *)src;
885
886 if (cv > 0)
887 cv -= xt_compat_calc_jump(AF_INET, cv);
888 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
889 }
890
compat_calc_entry(const struct ipt_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)891 static int compat_calc_entry(const struct ipt_entry *e,
892 const struct xt_table_info *info,
893 const void *base, struct xt_table_info *newinfo)
894 {
895 const struct xt_entry_match *ematch;
896 const struct xt_entry_target *t;
897 unsigned int entry_offset;
898 int off, i, ret;
899
900 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
901 entry_offset = (void *)e - base;
902 xt_ematch_foreach(ematch, e)
903 off += xt_compat_match_offset(ematch->u.kernel.match);
904 t = ipt_get_target_c(e);
905 off += xt_compat_target_offset(t->u.kernel.target);
906 newinfo->size -= off;
907 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
908 if (ret)
909 return ret;
910
911 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
912 if (info->hook_entry[i] &&
913 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
914 newinfo->hook_entry[i] -= off;
915 if (info->underflow[i] &&
916 (e < (struct ipt_entry *)(base + info->underflow[i])))
917 newinfo->underflow[i] -= off;
918 }
919 return 0;
920 }
921
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)922 static int compat_table_info(const struct xt_table_info *info,
923 struct xt_table_info *newinfo)
924 {
925 struct ipt_entry *iter;
926 const void *loc_cpu_entry;
927 int ret;
928
929 if (!newinfo || !info)
930 return -EINVAL;
931
932 /* we dont care about newinfo->entries */
933 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
934 newinfo->initial_entries = 0;
935 loc_cpu_entry = info->entries;
936 ret = xt_compat_init_offsets(AF_INET, info->number);
937 if (ret)
938 return ret;
939 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
940 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
941 if (ret != 0)
942 return ret;
943 }
944 return 0;
945 }
946 #endif
947
get_info(struct net * net,void __user * user,const int * len)948 static int get_info(struct net *net, void __user *user, const int *len)
949 {
950 char name[XT_TABLE_MAXNAMELEN];
951 struct xt_table *t;
952 int ret;
953
954 if (*len != sizeof(struct ipt_getinfo))
955 return -EINVAL;
956
957 if (copy_from_user(name, user, sizeof(name)) != 0)
958 return -EFAULT;
959
960 name[XT_TABLE_MAXNAMELEN-1] = '\0';
961 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
962 if (in_compat_syscall())
963 xt_compat_lock(AF_INET);
964 #endif
965 t = xt_request_find_table_lock(net, AF_INET, name);
966 if (!IS_ERR(t)) {
967 struct ipt_getinfo info;
968 const struct xt_table_info *private = t->private;
969 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
970 struct xt_table_info tmp;
971
972 if (in_compat_syscall()) {
973 ret = compat_table_info(private, &tmp);
974 xt_compat_flush_offsets(AF_INET);
975 private = &tmp;
976 }
977 #endif
978 memset(&info, 0, sizeof(info));
979 info.valid_hooks = t->valid_hooks;
980 memcpy(info.hook_entry, private->hook_entry,
981 sizeof(info.hook_entry));
982 memcpy(info.underflow, private->underflow,
983 sizeof(info.underflow));
984 info.num_entries = private->number;
985 info.size = private->size;
986 strcpy(info.name, name);
987
988 if (copy_to_user(user, &info, *len) != 0)
989 ret = -EFAULT;
990 else
991 ret = 0;
992
993 xt_table_unlock(t);
994 module_put(t->me);
995 } else
996 ret = PTR_ERR(t);
997 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
998 if (in_compat_syscall())
999 xt_compat_unlock(AF_INET);
1000 #endif
1001 return ret;
1002 }
1003
1004 static int
get_entries(struct net * net,struct ipt_get_entries __user * uptr,const int * len)1005 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1006 const int *len)
1007 {
1008 int ret;
1009 struct ipt_get_entries get;
1010 struct xt_table *t;
1011
1012 if (*len < sizeof(get))
1013 return -EINVAL;
1014 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1015 return -EFAULT;
1016 if (*len != sizeof(struct ipt_get_entries) + get.size)
1017 return -EINVAL;
1018 get.name[sizeof(get.name) - 1] = '\0';
1019
1020 t = xt_find_table_lock(net, AF_INET, get.name);
1021 if (!IS_ERR(t)) {
1022 const struct xt_table_info *private = t->private;
1023 if (get.size == private->size)
1024 ret = copy_entries_to_user(private->size,
1025 t, uptr->entrytable);
1026 else
1027 ret = -EAGAIN;
1028
1029 module_put(t->me);
1030 xt_table_unlock(t);
1031 } else
1032 ret = PTR_ERR(t);
1033
1034 return ret;
1035 }
1036
1037 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1038 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1039 struct xt_table_info *newinfo, unsigned int num_counters,
1040 void __user *counters_ptr)
1041 {
1042 int ret;
1043 struct xt_table *t;
1044 struct xt_table_info *oldinfo;
1045 struct xt_counters *counters;
1046 struct ipt_entry *iter;
1047
1048 ret = 0;
1049 counters = xt_counters_alloc(num_counters);
1050 if (!counters) {
1051 ret = -ENOMEM;
1052 goto out;
1053 }
1054
1055 t = xt_request_find_table_lock(net, AF_INET, name);
1056 if (IS_ERR(t)) {
1057 ret = PTR_ERR(t);
1058 goto free_newinfo_counters_untrans;
1059 }
1060
1061 /* You lied! */
1062 if (valid_hooks != t->valid_hooks) {
1063 ret = -EINVAL;
1064 goto put_module;
1065 }
1066
1067 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1068 if (!oldinfo)
1069 goto put_module;
1070
1071 /* Update module usage count based on number of rules */
1072 if ((oldinfo->number > oldinfo->initial_entries) ||
1073 (newinfo->number <= oldinfo->initial_entries))
1074 module_put(t->me);
1075 if ((oldinfo->number > oldinfo->initial_entries) &&
1076 (newinfo->number <= oldinfo->initial_entries))
1077 module_put(t->me);
1078
1079 xt_table_unlock(t);
1080
1081 get_old_counters(oldinfo, counters);
1082
1083 /* Decrease module usage counts and free resource */
1084 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1085 cleanup_entry(iter, net);
1086
1087 xt_free_table_info(oldinfo);
1088 if (copy_to_user(counters_ptr, counters,
1089 sizeof(struct xt_counters) * num_counters) != 0) {
1090 /* Silent error, can't fail, new table is already in place */
1091 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1092 }
1093 vfree(counters);
1094 return ret;
1095
1096 put_module:
1097 module_put(t->me);
1098 xt_table_unlock(t);
1099 free_newinfo_counters_untrans:
1100 vfree(counters);
1101 out:
1102 return ret;
1103 }
1104
1105 static int
do_replace(struct net * net,sockptr_t arg,unsigned int len)1106 do_replace(struct net *net, sockptr_t arg, unsigned int len)
1107 {
1108 int ret;
1109 struct ipt_replace tmp;
1110 struct xt_table_info *newinfo;
1111 void *loc_cpu_entry;
1112 struct ipt_entry *iter;
1113
1114 if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
1115 return -EFAULT;
1116
1117 /* overflow check */
1118 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1119 return -ENOMEM;
1120 if (tmp.num_counters == 0)
1121 return -EINVAL;
1122
1123 tmp.name[sizeof(tmp.name)-1] = 0;
1124
1125 newinfo = xt_alloc_table_info(tmp.size);
1126 if (!newinfo)
1127 return -ENOMEM;
1128
1129 loc_cpu_entry = newinfo->entries;
1130 if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
1131 tmp.size) != 0) {
1132 ret = -EFAULT;
1133 goto free_newinfo;
1134 }
1135
1136 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1137 if (ret != 0)
1138 goto free_newinfo;
1139
1140 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1141 tmp.num_counters, tmp.counters);
1142 if (ret)
1143 goto free_newinfo_untrans;
1144 return 0;
1145
1146 free_newinfo_untrans:
1147 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1148 cleanup_entry(iter, net);
1149 free_newinfo:
1150 xt_free_table_info(newinfo);
1151 return ret;
1152 }
1153
1154 static int
do_add_counters(struct net * net,sockptr_t arg,unsigned int len)1155 do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
1156 {
1157 unsigned int i;
1158 struct xt_counters_info tmp;
1159 struct xt_counters *paddc;
1160 struct xt_table *t;
1161 const struct xt_table_info *private;
1162 int ret = 0;
1163 struct ipt_entry *iter;
1164 unsigned int addend;
1165
1166 paddc = xt_copy_counters(arg, len, &tmp);
1167 if (IS_ERR(paddc))
1168 return PTR_ERR(paddc);
1169
1170 t = xt_find_table_lock(net, AF_INET, tmp.name);
1171 if (IS_ERR(t)) {
1172 ret = PTR_ERR(t);
1173 goto free;
1174 }
1175
1176 local_bh_disable();
1177 private = t->private;
1178 if (private->number != tmp.num_counters) {
1179 ret = -EINVAL;
1180 goto unlock_up_free;
1181 }
1182
1183 i = 0;
1184 addend = xt_write_recseq_begin();
1185 xt_entry_foreach(iter, private->entries, private->size) {
1186 struct xt_counters *tmp;
1187
1188 tmp = xt_get_this_cpu_counter(&iter->counters);
1189 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1190 ++i;
1191 }
1192 xt_write_recseq_end(addend);
1193 unlock_up_free:
1194 local_bh_enable();
1195 xt_table_unlock(t);
1196 module_put(t->me);
1197 free:
1198 vfree(paddc);
1199
1200 return ret;
1201 }
1202
1203 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1204 struct compat_ipt_replace {
1205 char name[XT_TABLE_MAXNAMELEN];
1206 u32 valid_hooks;
1207 u32 num_entries;
1208 u32 size;
1209 u32 hook_entry[NF_INET_NUMHOOKS];
1210 u32 underflow[NF_INET_NUMHOOKS];
1211 u32 num_counters;
1212 compat_uptr_t counters; /* struct xt_counters * */
1213 struct compat_ipt_entry entries[];
1214 };
1215
1216 static int
compat_copy_entry_to_user(struct ipt_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1217 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1218 unsigned int *size, struct xt_counters *counters,
1219 unsigned int i)
1220 {
1221 struct xt_entry_target *t;
1222 struct compat_ipt_entry __user *ce;
1223 u_int16_t target_offset, next_offset;
1224 compat_uint_t origsize;
1225 const struct xt_entry_match *ematch;
1226 int ret = 0;
1227
1228 origsize = *size;
1229 ce = *dstptr;
1230 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1231 copy_to_user(&ce->counters, &counters[i],
1232 sizeof(counters[i])) != 0)
1233 return -EFAULT;
1234
1235 *dstptr += sizeof(struct compat_ipt_entry);
1236 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1237
1238 xt_ematch_foreach(ematch, e) {
1239 ret = xt_compat_match_to_user(ematch, dstptr, size);
1240 if (ret != 0)
1241 return ret;
1242 }
1243 target_offset = e->target_offset - (origsize - *size);
1244 t = ipt_get_target(e);
1245 ret = xt_compat_target_to_user(t, dstptr, size);
1246 if (ret)
1247 return ret;
1248 next_offset = e->next_offset - (origsize - *size);
1249 if (put_user(target_offset, &ce->target_offset) != 0 ||
1250 put_user(next_offset, &ce->next_offset) != 0)
1251 return -EFAULT;
1252 return 0;
1253 }
1254
1255 static int
compat_find_calc_match(struct xt_entry_match * m,const struct ipt_ip * ip,int * size)1256 compat_find_calc_match(struct xt_entry_match *m,
1257 const struct ipt_ip *ip,
1258 int *size)
1259 {
1260 struct xt_match *match;
1261
1262 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1263 m->u.user.revision);
1264 if (IS_ERR(match))
1265 return PTR_ERR(match);
1266
1267 m->u.kernel.match = match;
1268 *size += xt_compat_match_offset(match);
1269 return 0;
1270 }
1271
compat_release_entry(struct compat_ipt_entry * e)1272 static void compat_release_entry(struct compat_ipt_entry *e)
1273 {
1274 struct xt_entry_target *t;
1275 struct xt_entry_match *ematch;
1276
1277 /* Cleanup all matches */
1278 xt_ematch_foreach(ematch, e)
1279 module_put(ematch->u.kernel.match->me);
1280 t = compat_ipt_get_target(e);
1281 module_put(t->u.kernel.target->me);
1282 }
1283
1284 static int
check_compat_entry_size_and_hooks(struct compat_ipt_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit)1285 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1286 struct xt_table_info *newinfo,
1287 unsigned int *size,
1288 const unsigned char *base,
1289 const unsigned char *limit)
1290 {
1291 struct xt_entry_match *ematch;
1292 struct xt_entry_target *t;
1293 struct xt_target *target;
1294 unsigned int entry_offset;
1295 unsigned int j;
1296 int ret, off;
1297
1298 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1299 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1300 (unsigned char *)e + e->next_offset > limit)
1301 return -EINVAL;
1302
1303 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1304 sizeof(struct compat_xt_entry_target))
1305 return -EINVAL;
1306
1307 if (!ip_checkentry(&e->ip))
1308 return -EINVAL;
1309
1310 ret = xt_compat_check_entry_offsets(e, e->elems,
1311 e->target_offset, e->next_offset);
1312 if (ret)
1313 return ret;
1314
1315 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1316 entry_offset = (void *)e - (void *)base;
1317 j = 0;
1318 xt_ematch_foreach(ematch, e) {
1319 ret = compat_find_calc_match(ematch, &e->ip, &off);
1320 if (ret != 0)
1321 goto release_matches;
1322 ++j;
1323 }
1324
1325 t = compat_ipt_get_target(e);
1326 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1327 t->u.user.revision);
1328 if (IS_ERR(target)) {
1329 ret = PTR_ERR(target);
1330 goto release_matches;
1331 }
1332 t->u.kernel.target = target;
1333
1334 off += xt_compat_target_offset(target);
1335 *size += off;
1336 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1337 if (ret)
1338 goto out;
1339
1340 return 0;
1341
1342 out:
1343 module_put(t->u.kernel.target->me);
1344 release_matches:
1345 xt_ematch_foreach(ematch, e) {
1346 if (j-- == 0)
1347 break;
1348 module_put(ematch->u.kernel.match->me);
1349 }
1350 return ret;
1351 }
1352
1353 static void
compat_copy_entry_from_user(struct compat_ipt_entry * e,void ** dstptr,unsigned int * size,struct xt_table_info * newinfo,unsigned char * base)1354 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1355 unsigned int *size,
1356 struct xt_table_info *newinfo, unsigned char *base)
1357 {
1358 struct xt_entry_target *t;
1359 struct ipt_entry *de;
1360 unsigned int origsize;
1361 int h;
1362 struct xt_entry_match *ematch;
1363
1364 origsize = *size;
1365 de = *dstptr;
1366 memcpy(de, e, sizeof(struct ipt_entry));
1367 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1368
1369 *dstptr += sizeof(struct ipt_entry);
1370 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1371
1372 xt_ematch_foreach(ematch, e)
1373 xt_compat_match_from_user(ematch, dstptr, size);
1374
1375 de->target_offset = e->target_offset - (origsize - *size);
1376 t = compat_ipt_get_target(e);
1377 xt_compat_target_from_user(t, dstptr, size);
1378
1379 de->next_offset = e->next_offset - (origsize - *size);
1380
1381 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1382 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1383 newinfo->hook_entry[h] -= origsize - *size;
1384 if ((unsigned char *)de - base < newinfo->underflow[h])
1385 newinfo->underflow[h] -= origsize - *size;
1386 }
1387 }
1388
1389 static int
translate_compat_table(struct net * net,struct xt_table_info ** pinfo,void ** pentry0,const struct compat_ipt_replace * compatr)1390 translate_compat_table(struct net *net,
1391 struct xt_table_info **pinfo,
1392 void **pentry0,
1393 const struct compat_ipt_replace *compatr)
1394 {
1395 unsigned int i, j;
1396 struct xt_table_info *newinfo, *info;
1397 void *pos, *entry0, *entry1;
1398 struct compat_ipt_entry *iter0;
1399 struct ipt_replace repl;
1400 unsigned int size;
1401 int ret;
1402
1403 info = *pinfo;
1404 entry0 = *pentry0;
1405 size = compatr->size;
1406 info->number = compatr->num_entries;
1407
1408 j = 0;
1409 xt_compat_lock(AF_INET);
1410 ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
1411 if (ret)
1412 goto out_unlock;
1413 /* Walk through entries, checking offsets. */
1414 xt_entry_foreach(iter0, entry0, compatr->size) {
1415 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1416 entry0,
1417 entry0 + compatr->size);
1418 if (ret != 0)
1419 goto out_unlock;
1420 ++j;
1421 }
1422
1423 ret = -EINVAL;
1424 if (j != compatr->num_entries)
1425 goto out_unlock;
1426
1427 ret = -ENOMEM;
1428 newinfo = xt_alloc_table_info(size);
1429 if (!newinfo)
1430 goto out_unlock;
1431
1432 memset(newinfo->entries, 0, size);
1433
1434 newinfo->number = compatr->num_entries;
1435 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1436 newinfo->hook_entry[i] = compatr->hook_entry[i];
1437 newinfo->underflow[i] = compatr->underflow[i];
1438 }
1439 entry1 = newinfo->entries;
1440 pos = entry1;
1441 size = compatr->size;
1442 xt_entry_foreach(iter0, entry0, compatr->size)
1443 compat_copy_entry_from_user(iter0, &pos, &size,
1444 newinfo, entry1);
1445
1446 /* all module references in entry0 are now gone.
1447 * entry1/newinfo contains a 64bit ruleset that looks exactly as
1448 * generated by 64bit userspace.
1449 *
1450 * Call standard translate_table() to validate all hook_entrys,
1451 * underflows, check for loops, etc.
1452 */
1453 xt_compat_flush_offsets(AF_INET);
1454 xt_compat_unlock(AF_INET);
1455
1456 memcpy(&repl, compatr, sizeof(*compatr));
1457
1458 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1459 repl.hook_entry[i] = newinfo->hook_entry[i];
1460 repl.underflow[i] = newinfo->underflow[i];
1461 }
1462
1463 repl.num_counters = 0;
1464 repl.counters = NULL;
1465 repl.size = newinfo->size;
1466 ret = translate_table(net, newinfo, entry1, &repl);
1467 if (ret)
1468 goto free_newinfo;
1469
1470 *pinfo = newinfo;
1471 *pentry0 = entry1;
1472 xt_free_table_info(info);
1473 return 0;
1474
1475 free_newinfo:
1476 xt_free_table_info(newinfo);
1477 return ret;
1478 out_unlock:
1479 xt_compat_flush_offsets(AF_INET);
1480 xt_compat_unlock(AF_INET);
1481 xt_entry_foreach(iter0, entry0, compatr->size) {
1482 if (j-- == 0)
1483 break;
1484 compat_release_entry(iter0);
1485 }
1486 return ret;
1487 }
1488
1489 static int
compat_do_replace(struct net * net,sockptr_t arg,unsigned int len)1490 compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
1491 {
1492 int ret;
1493 struct compat_ipt_replace tmp;
1494 struct xt_table_info *newinfo;
1495 void *loc_cpu_entry;
1496 struct ipt_entry *iter;
1497
1498 if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
1499 return -EFAULT;
1500
1501 /* overflow check */
1502 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1503 return -ENOMEM;
1504 if (tmp.num_counters == 0)
1505 return -EINVAL;
1506
1507 tmp.name[sizeof(tmp.name)-1] = 0;
1508
1509 newinfo = xt_alloc_table_info(tmp.size);
1510 if (!newinfo)
1511 return -ENOMEM;
1512
1513 loc_cpu_entry = newinfo->entries;
1514 if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp),
1515 tmp.size) != 0) {
1516 ret = -EFAULT;
1517 goto free_newinfo;
1518 }
1519
1520 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1521 if (ret != 0)
1522 goto free_newinfo;
1523
1524 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1525 tmp.num_counters, compat_ptr(tmp.counters));
1526 if (ret)
1527 goto free_newinfo_untrans;
1528 return 0;
1529
1530 free_newinfo_untrans:
1531 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1532 cleanup_entry(iter, net);
1533 free_newinfo:
1534 xt_free_table_info(newinfo);
1535 return ret;
1536 }
1537
1538 struct compat_ipt_get_entries {
1539 char name[XT_TABLE_MAXNAMELEN];
1540 compat_uint_t size;
1541 struct compat_ipt_entry entrytable[];
1542 };
1543
1544 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1545 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1546 void __user *userptr)
1547 {
1548 struct xt_counters *counters;
1549 const struct xt_table_info *private = table->private;
1550 void __user *pos;
1551 unsigned int size;
1552 int ret = 0;
1553 unsigned int i = 0;
1554 struct ipt_entry *iter;
1555
1556 counters = alloc_counters(table);
1557 if (IS_ERR(counters))
1558 return PTR_ERR(counters);
1559
1560 pos = userptr;
1561 size = total_size;
1562 xt_entry_foreach(iter, private->entries, total_size) {
1563 ret = compat_copy_entry_to_user(iter, &pos,
1564 &size, counters, i++);
1565 if (ret != 0)
1566 break;
1567 }
1568
1569 vfree(counters);
1570 return ret;
1571 }
1572
1573 static int
compat_get_entries(struct net * net,struct compat_ipt_get_entries __user * uptr,int * len)1574 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1575 int *len)
1576 {
1577 int ret;
1578 struct compat_ipt_get_entries get;
1579 struct xt_table *t;
1580
1581 if (*len < sizeof(get))
1582 return -EINVAL;
1583
1584 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1585 return -EFAULT;
1586
1587 if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
1588 return -EINVAL;
1589
1590 get.name[sizeof(get.name) - 1] = '\0';
1591
1592 xt_compat_lock(AF_INET);
1593 t = xt_find_table_lock(net, AF_INET, get.name);
1594 if (!IS_ERR(t)) {
1595 const struct xt_table_info *private = t->private;
1596 struct xt_table_info info;
1597 ret = compat_table_info(private, &info);
1598 if (!ret && get.size == info.size)
1599 ret = compat_copy_entries_to_user(private->size,
1600 t, uptr->entrytable);
1601 else if (!ret)
1602 ret = -EAGAIN;
1603
1604 xt_compat_flush_offsets(AF_INET);
1605 module_put(t->me);
1606 xt_table_unlock(t);
1607 } else
1608 ret = PTR_ERR(t);
1609
1610 xt_compat_unlock(AF_INET);
1611 return ret;
1612 }
1613 #endif
1614
1615 static int
do_ipt_set_ctl(struct sock * sk,int cmd,sockptr_t arg,unsigned int len)1616 do_ipt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len)
1617 {
1618 int ret;
1619
1620 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1621 return -EPERM;
1622
1623 switch (cmd) {
1624 case IPT_SO_SET_REPLACE:
1625 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1626 if (in_compat_syscall())
1627 ret = compat_do_replace(sock_net(sk), arg, len);
1628 else
1629 #endif
1630 ret = do_replace(sock_net(sk), arg, len);
1631 break;
1632
1633 case IPT_SO_SET_ADD_COUNTERS:
1634 ret = do_add_counters(sock_net(sk), arg, len);
1635 break;
1636
1637 default:
1638 ret = -EINVAL;
1639 }
1640
1641 return ret;
1642 }
1643
1644 static int
do_ipt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1645 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1646 {
1647 int ret;
1648
1649 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1650 return -EPERM;
1651
1652 switch (cmd) {
1653 case IPT_SO_GET_INFO:
1654 ret = get_info(sock_net(sk), user, len);
1655 break;
1656
1657 case IPT_SO_GET_ENTRIES:
1658 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1659 if (in_compat_syscall())
1660 ret = compat_get_entries(sock_net(sk), user, len);
1661 else
1662 #endif
1663 ret = get_entries(sock_net(sk), user, len);
1664 break;
1665
1666 case IPT_SO_GET_REVISION_MATCH:
1667 case IPT_SO_GET_REVISION_TARGET: {
1668 struct xt_get_revision rev;
1669 int target;
1670
1671 if (*len != sizeof(rev)) {
1672 ret = -EINVAL;
1673 break;
1674 }
1675 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1676 ret = -EFAULT;
1677 break;
1678 }
1679 rev.name[sizeof(rev.name)-1] = 0;
1680
1681 if (cmd == IPT_SO_GET_REVISION_TARGET)
1682 target = 1;
1683 else
1684 target = 0;
1685
1686 try_then_request_module(xt_find_revision(AF_INET, rev.name,
1687 rev.revision,
1688 target, &ret),
1689 "ipt_%s", rev.name);
1690 break;
1691 }
1692
1693 default:
1694 ret = -EINVAL;
1695 }
1696
1697 return ret;
1698 }
1699
__ipt_unregister_table(struct net * net,struct xt_table * table)1700 static void __ipt_unregister_table(struct net *net, struct xt_table *table)
1701 {
1702 struct xt_table_info *private;
1703 void *loc_cpu_entry;
1704 struct module *table_owner = table->me;
1705 struct ipt_entry *iter;
1706
1707 private = xt_unregister_table(table);
1708
1709 /* Decrease module usage counts and free resources */
1710 loc_cpu_entry = private->entries;
1711 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1712 cleanup_entry(iter, net);
1713 if (private->number > private->initial_entries)
1714 module_put(table_owner);
1715 xt_free_table_info(private);
1716 }
1717
ipt_register_table(struct net * net,const struct xt_table * table,const struct ipt_replace * repl,const struct nf_hook_ops * template_ops)1718 int ipt_register_table(struct net *net, const struct xt_table *table,
1719 const struct ipt_replace *repl,
1720 const struct nf_hook_ops *template_ops)
1721 {
1722 struct nf_hook_ops *ops;
1723 unsigned int num_ops;
1724 int ret, i;
1725 struct xt_table_info *newinfo;
1726 struct xt_table_info bootstrap = {0};
1727 void *loc_cpu_entry;
1728 struct xt_table *new_table;
1729
1730 newinfo = xt_alloc_table_info(repl->size);
1731 if (!newinfo)
1732 return -ENOMEM;
1733
1734 loc_cpu_entry = newinfo->entries;
1735 memcpy(loc_cpu_entry, repl->entries, repl->size);
1736
1737 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1738 if (ret != 0) {
1739 xt_free_table_info(newinfo);
1740 return ret;
1741 }
1742
1743 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1744 if (IS_ERR(new_table)) {
1745 xt_free_table_info(newinfo);
1746 return PTR_ERR(new_table);
1747 }
1748
1749 /* No template? No need to do anything. This is used by 'nat' table, it registers
1750 * with the nat core instead of the netfilter core.
1751 */
1752 if (!template_ops)
1753 return 0;
1754
1755 num_ops = hweight32(table->valid_hooks);
1756 if (num_ops == 0) {
1757 ret = -EINVAL;
1758 goto out_free;
1759 }
1760
1761 ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL);
1762 if (!ops) {
1763 ret = -ENOMEM;
1764 goto out_free;
1765 }
1766
1767 for (i = 0; i < num_ops; i++)
1768 ops[i].priv = new_table;
1769
1770 new_table->ops = ops;
1771
1772 ret = nf_register_net_hooks(net, ops, num_ops);
1773 if (ret != 0)
1774 goto out_free;
1775
1776 return ret;
1777
1778 out_free:
1779 __ipt_unregister_table(net, new_table);
1780 return ret;
1781 }
1782
ipt_unregister_table_pre_exit(struct net * net,const char * name)1783 void ipt_unregister_table_pre_exit(struct net *net, const char *name)
1784 {
1785 struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
1786
1787 if (table)
1788 nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
1789 }
1790
ipt_unregister_table_exit(struct net * net,const char * name)1791 void ipt_unregister_table_exit(struct net *net, const char *name)
1792 {
1793 struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
1794
1795 if (table)
1796 __ipt_unregister_table(net, table);
1797 }
1798
1799 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1800 static inline bool
icmp_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)1801 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1802 u_int8_t type, u_int8_t code,
1803 bool invert)
1804 {
1805 return ((test_type == 0xFF) ||
1806 (type == test_type && code >= min_code && code <= max_code))
1807 ^ invert;
1808 }
1809
1810 static bool
icmp_match(const struct sk_buff * skb,struct xt_action_param * par)1811 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
1812 {
1813 const struct icmphdr *ic;
1814 struct icmphdr _icmph;
1815 const struct ipt_icmp *icmpinfo = par->matchinfo;
1816
1817 /* Must not be a fragment. */
1818 if (par->fragoff != 0)
1819 return false;
1820
1821 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1822 if (ic == NULL) {
1823 /* We've been asked to examine this packet, and we
1824 * can't. Hence, no choice but to drop.
1825 */
1826 par->hotdrop = true;
1827 return false;
1828 }
1829
1830 return icmp_type_code_match(icmpinfo->type,
1831 icmpinfo->code[0],
1832 icmpinfo->code[1],
1833 ic->type, ic->code,
1834 !!(icmpinfo->invflags&IPT_ICMP_INV));
1835 }
1836
icmp_checkentry(const struct xt_mtchk_param * par)1837 static int icmp_checkentry(const struct xt_mtchk_param *par)
1838 {
1839 const struct ipt_icmp *icmpinfo = par->matchinfo;
1840
1841 /* Must specify no unknown invflags */
1842 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
1843 }
1844
1845 static struct xt_target ipt_builtin_tg[] __read_mostly = {
1846 {
1847 .name = XT_STANDARD_TARGET,
1848 .targetsize = sizeof(int),
1849 .family = NFPROTO_IPV4,
1850 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1851 .compatsize = sizeof(compat_int_t),
1852 .compat_from_user = compat_standard_from_user,
1853 .compat_to_user = compat_standard_to_user,
1854 #endif
1855 },
1856 {
1857 .name = XT_ERROR_TARGET,
1858 .target = ipt_error,
1859 .targetsize = XT_FUNCTION_MAXNAMELEN,
1860 .family = NFPROTO_IPV4,
1861 },
1862 };
1863
1864 static struct nf_sockopt_ops ipt_sockopts = {
1865 .pf = PF_INET,
1866 .set_optmin = IPT_BASE_CTL,
1867 .set_optmax = IPT_SO_SET_MAX+1,
1868 .set = do_ipt_set_ctl,
1869 .get_optmin = IPT_BASE_CTL,
1870 .get_optmax = IPT_SO_GET_MAX+1,
1871 .get = do_ipt_get_ctl,
1872 .owner = THIS_MODULE,
1873 };
1874
1875 static struct xt_match ipt_builtin_mt[] __read_mostly = {
1876 {
1877 .name = "icmp",
1878 .match = icmp_match,
1879 .matchsize = sizeof(struct ipt_icmp),
1880 .checkentry = icmp_checkentry,
1881 .proto = IPPROTO_ICMP,
1882 .family = NFPROTO_IPV4,
1883 .me = THIS_MODULE,
1884 },
1885 };
1886
ip_tables_net_init(struct net * net)1887 static int __net_init ip_tables_net_init(struct net *net)
1888 {
1889 return xt_proto_init(net, NFPROTO_IPV4);
1890 }
1891
ip_tables_net_exit(struct net * net)1892 static void __net_exit ip_tables_net_exit(struct net *net)
1893 {
1894 xt_proto_fini(net, NFPROTO_IPV4);
1895 }
1896
1897 static struct pernet_operations ip_tables_net_ops = {
1898 .init = ip_tables_net_init,
1899 .exit = ip_tables_net_exit,
1900 };
1901
ip_tables_init(void)1902 static int __init ip_tables_init(void)
1903 {
1904 int ret;
1905
1906 ret = register_pernet_subsys(&ip_tables_net_ops);
1907 if (ret < 0)
1908 goto err1;
1909
1910 /* No one else will be downing sem now, so we won't sleep */
1911 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1912 if (ret < 0)
1913 goto err2;
1914 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1915 if (ret < 0)
1916 goto err4;
1917
1918 /* Register setsockopt */
1919 ret = nf_register_sockopt(&ipt_sockopts);
1920 if (ret < 0)
1921 goto err5;
1922
1923 return 0;
1924
1925 err5:
1926 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1927 err4:
1928 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1929 err2:
1930 unregister_pernet_subsys(&ip_tables_net_ops);
1931 err1:
1932 return ret;
1933 }
1934
ip_tables_fini(void)1935 static void __exit ip_tables_fini(void)
1936 {
1937 nf_unregister_sockopt(&ipt_sockopts);
1938
1939 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1940 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1941 unregister_pernet_subsys(&ip_tables_net_ops);
1942 }
1943
1944 EXPORT_SYMBOL(ipt_register_table);
1945 EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
1946 EXPORT_SYMBOL(ipt_unregister_table_exit);
1947 EXPORT_SYMBOL(ipt_do_table);
1948 module_init(ip_tables_init);
1949 module_exit(ip_tables_fini);
1950