1 /* netfilter.c: look after the filters for various protocols.
2  * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3  *
4  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5  * way.
6  *
7  * This code is GPL.
8  */
9 #include <linux/kernel.h>
10 #include <linux/netfilter.h>
11 #include <net/protocol.h>
12 #include <linux/init.h>
13 #include <linux/skbuff.h>
14 #include <linux/wait.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/if.h>
18 #include <linux/netdevice.h>
19 #include <linux/netfilter_ipv6.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/rcupdate.h>
25 #include <net/net_namespace.h>
26 #include <net/netfilter/nf_queue.h>
27 #include <net/sock.h>
28 
29 #include "nf_internals.h"
30 
31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33 
34 DEFINE_PER_CPU(bool, nf_skb_duplicated);
35 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36 
37 #ifdef CONFIG_JUMP_LABEL
38 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 EXPORT_SYMBOL(nf_hooks_needed);
40 #endif
41 
42 static DEFINE_MUTEX(nf_hook_mutex);
43 
44 /* max hooks per family/hooknum */
45 #define MAX_HOOK_COUNT		1024
46 
47 #define nf_entry_dereference(e) \
48 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
49 
allocate_hook_entries_size(u16 num)50 static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
51 {
52 	struct nf_hook_entries *e;
53 	size_t alloc = sizeof(*e) +
54 		       sizeof(struct nf_hook_entry) * num +
55 		       sizeof(struct nf_hook_ops *) * num +
56 		       sizeof(struct nf_hook_entries_rcu_head);
57 
58 	if (num == 0)
59 		return NULL;
60 
61 	e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT);
62 	if (e)
63 		e->num_hook_entries = num;
64 	return e;
65 }
66 
__nf_hook_entries_free(struct rcu_head * h)67 static void __nf_hook_entries_free(struct rcu_head *h)
68 {
69 	struct nf_hook_entries_rcu_head *head;
70 
71 	head = container_of(h, struct nf_hook_entries_rcu_head, head);
72 	kvfree(head->allocation);
73 }
74 
nf_hook_entries_free(struct nf_hook_entries * e)75 static void nf_hook_entries_free(struct nf_hook_entries *e)
76 {
77 	struct nf_hook_entries_rcu_head *head;
78 	struct nf_hook_ops **ops;
79 	unsigned int num;
80 
81 	if (!e)
82 		return;
83 
84 	num = e->num_hook_entries;
85 	ops = nf_hook_entries_get_hook_ops(e);
86 	head = (void *)&ops[num];
87 	head->allocation = e;
88 	call_rcu(&head->head, __nf_hook_entries_free);
89 }
90 
accept_all(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)91 static unsigned int accept_all(void *priv,
92 			       struct sk_buff *skb,
93 			       const struct nf_hook_state *state)
94 {
95 	return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
96 }
97 
98 static const struct nf_hook_ops dummy_ops = {
99 	.hook = accept_all,
100 	.priority = INT_MIN,
101 };
102 
103 static struct nf_hook_entries *
nf_hook_entries_grow(const struct nf_hook_entries * old,const struct nf_hook_ops * reg)104 nf_hook_entries_grow(const struct nf_hook_entries *old,
105 		     const struct nf_hook_ops *reg)
106 {
107 	unsigned int i, alloc_entries, nhooks, old_entries;
108 	struct nf_hook_ops **orig_ops = NULL;
109 	struct nf_hook_ops **new_ops;
110 	struct nf_hook_entries *new;
111 	bool inserted = false;
112 
113 	alloc_entries = 1;
114 	old_entries = old ? old->num_hook_entries : 0;
115 
116 	if (old) {
117 		orig_ops = nf_hook_entries_get_hook_ops(old);
118 
119 		for (i = 0; i < old_entries; i++) {
120 			if (orig_ops[i] != &dummy_ops)
121 				alloc_entries++;
122 		}
123 	}
124 
125 	if (alloc_entries > MAX_HOOK_COUNT)
126 		return ERR_PTR(-E2BIG);
127 
128 	new = allocate_hook_entries_size(alloc_entries);
129 	if (!new)
130 		return ERR_PTR(-ENOMEM);
131 
132 	new_ops = nf_hook_entries_get_hook_ops(new);
133 
134 	i = 0;
135 	nhooks = 0;
136 	while (i < old_entries) {
137 		if (orig_ops[i] == &dummy_ops) {
138 			++i;
139 			continue;
140 		}
141 
142 		if (inserted || reg->priority > orig_ops[i]->priority) {
143 			new_ops[nhooks] = (void *)orig_ops[i];
144 			new->hooks[nhooks] = old->hooks[i];
145 			i++;
146 		} else {
147 			new_ops[nhooks] = (void *)reg;
148 			new->hooks[nhooks].hook = reg->hook;
149 			new->hooks[nhooks].priv = reg->priv;
150 			inserted = true;
151 		}
152 		nhooks++;
153 	}
154 
155 	if (!inserted) {
156 		new_ops[nhooks] = (void *)reg;
157 		new->hooks[nhooks].hook = reg->hook;
158 		new->hooks[nhooks].priv = reg->priv;
159 	}
160 
161 	return new;
162 }
163 
hooks_validate(const struct nf_hook_entries * hooks)164 static void hooks_validate(const struct nf_hook_entries *hooks)
165 {
166 #ifdef CONFIG_DEBUG_MISC
167 	struct nf_hook_ops **orig_ops;
168 	int prio = INT_MIN;
169 	size_t i = 0;
170 
171 	orig_ops = nf_hook_entries_get_hook_ops(hooks);
172 
173 	for (i = 0; i < hooks->num_hook_entries; i++) {
174 		if (orig_ops[i] == &dummy_ops)
175 			continue;
176 
177 		WARN_ON(orig_ops[i]->priority < prio);
178 
179 		if (orig_ops[i]->priority > prio)
180 			prio = orig_ops[i]->priority;
181 	}
182 #endif
183 }
184 
nf_hook_entries_insert_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)185 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
186 				const struct nf_hook_ops *reg)
187 {
188 	struct nf_hook_entries *new_hooks;
189 	struct nf_hook_entries *p;
190 
191 	p = rcu_dereference_raw(*pp);
192 	new_hooks = nf_hook_entries_grow(p, reg);
193 	if (IS_ERR(new_hooks))
194 		return PTR_ERR(new_hooks);
195 
196 	hooks_validate(new_hooks);
197 
198 	rcu_assign_pointer(*pp, new_hooks);
199 
200 	BUG_ON(p == new_hooks);
201 	nf_hook_entries_free(p);
202 	return 0;
203 }
204 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
205 
206 /*
207  * __nf_hook_entries_try_shrink - try to shrink hook array
208  *
209  * @old -- current hook blob at @pp
210  * @pp -- location of hook blob
211  *
212  * Hook unregistration must always succeed, so to-be-removed hooks
213  * are replaced by a dummy one that will just move to next hook.
214  *
215  * This counts the current dummy hooks, attempts to allocate new blob,
216  * copies the live hooks, then replaces and discards old one.
217  *
218  * return values:
219  *
220  * Returns address to free, or NULL.
221  */
__nf_hook_entries_try_shrink(struct nf_hook_entries * old,struct nf_hook_entries __rcu ** pp)222 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
223 					  struct nf_hook_entries __rcu **pp)
224 {
225 	unsigned int i, j, skip = 0, hook_entries;
226 	struct nf_hook_entries *new = NULL;
227 	struct nf_hook_ops **orig_ops;
228 	struct nf_hook_ops **new_ops;
229 
230 	if (WARN_ON_ONCE(!old))
231 		return NULL;
232 
233 	orig_ops = nf_hook_entries_get_hook_ops(old);
234 	for (i = 0; i < old->num_hook_entries; i++) {
235 		if (orig_ops[i] == &dummy_ops)
236 			skip++;
237 	}
238 
239 	/* if skip == hook_entries all hooks have been removed */
240 	hook_entries = old->num_hook_entries;
241 	if (skip == hook_entries)
242 		goto out_assign;
243 
244 	if (skip == 0)
245 		return NULL;
246 
247 	hook_entries -= skip;
248 	new = allocate_hook_entries_size(hook_entries);
249 	if (!new)
250 		return NULL;
251 
252 	new_ops = nf_hook_entries_get_hook_ops(new);
253 	for (i = 0, j = 0; i < old->num_hook_entries; i++) {
254 		if (orig_ops[i] == &dummy_ops)
255 			continue;
256 		new->hooks[j] = old->hooks[i];
257 		new_ops[j] = (void *)orig_ops[i];
258 		j++;
259 	}
260 	hooks_validate(new);
261 out_assign:
262 	rcu_assign_pointer(*pp, new);
263 	return old;
264 }
265 
266 static struct nf_hook_entries __rcu **
nf_hook_entry_head(struct net * net,int pf,unsigned int hooknum,struct net_device * dev)267 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
268 		   struct net_device *dev)
269 {
270 	switch (pf) {
271 	case NFPROTO_NETDEV:
272 		break;
273 #ifdef CONFIG_NETFILTER_FAMILY_ARP
274 	case NFPROTO_ARP:
275 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
276 			return NULL;
277 		return net->nf.hooks_arp + hooknum;
278 #endif
279 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
280 	case NFPROTO_BRIDGE:
281 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
282 			return NULL;
283 		return net->nf.hooks_bridge + hooknum;
284 #endif
285 #ifdef CONFIG_NETFILTER_INGRESS
286 	case NFPROTO_INET:
287 		if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
288 			return NULL;
289 		if (!dev || dev_net(dev) != net) {
290 			WARN_ON_ONCE(1);
291 			return NULL;
292 		}
293 		return &dev->nf_hooks_ingress;
294 #endif
295 	case NFPROTO_IPV4:
296 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
297 			return NULL;
298 		return net->nf.hooks_ipv4 + hooknum;
299 	case NFPROTO_IPV6:
300 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
301 			return NULL;
302 		return net->nf.hooks_ipv6 + hooknum;
303 #if IS_ENABLED(CONFIG_DECNET)
304 	case NFPROTO_DECNET:
305 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
306 			return NULL;
307 		return net->nf.hooks_decnet + hooknum;
308 #endif
309 	default:
310 		WARN_ON_ONCE(1);
311 		return NULL;
312 	}
313 
314 #ifdef CONFIG_NETFILTER_INGRESS
315 	if (hooknum == NF_NETDEV_INGRESS) {
316 		if (dev && dev_net(dev) == net)
317 			return &dev->nf_hooks_ingress;
318 	}
319 #endif
320 #ifdef CONFIG_NETFILTER_EGRESS
321 	if (hooknum == NF_NETDEV_EGRESS) {
322 		if (dev && dev_net(dev) == net)
323 			return &dev->nf_hooks_egress;
324 	}
325 #endif
326 	WARN_ON_ONCE(1);
327 	return NULL;
328 }
329 
nf_ingress_check(struct net * net,const struct nf_hook_ops * reg,int hooknum)330 static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
331 			    int hooknum)
332 {
333 #ifndef CONFIG_NETFILTER_INGRESS
334 	if (reg->hooknum == hooknum)
335 		return -EOPNOTSUPP;
336 #endif
337 	if (reg->hooknum != hooknum ||
338 	    !reg->dev || dev_net(reg->dev) != net)
339 		return -EINVAL;
340 
341 	return 0;
342 }
343 
nf_ingress_hook(const struct nf_hook_ops * reg,int pf)344 static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
345 						  int pf)
346 {
347 	if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
348 	    (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
349 		return true;
350 
351 	return false;
352 }
353 
nf_egress_hook(const struct nf_hook_ops * reg,int pf)354 static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
355 						 int pf)
356 {
357 	return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
358 }
359 
nf_static_key_inc(const struct nf_hook_ops * reg,int pf)360 static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
361 {
362 #ifdef CONFIG_JUMP_LABEL
363 	int hooknum;
364 
365 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
366 		pf = NFPROTO_NETDEV;
367 		hooknum = NF_NETDEV_INGRESS;
368 	} else {
369 		hooknum = reg->hooknum;
370 	}
371 	static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
372 #endif
373 }
374 
nf_static_key_dec(const struct nf_hook_ops * reg,int pf)375 static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
376 {
377 #ifdef CONFIG_JUMP_LABEL
378 	int hooknum;
379 
380 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
381 		pf = NFPROTO_NETDEV;
382 		hooknum = NF_NETDEV_INGRESS;
383 	} else {
384 		hooknum = reg->hooknum;
385 	}
386 	static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
387 #endif
388 }
389 
__nf_register_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)390 static int __nf_register_net_hook(struct net *net, int pf,
391 				  const struct nf_hook_ops *reg)
392 {
393 	struct nf_hook_entries *p, *new_hooks;
394 	struct nf_hook_entries __rcu **pp;
395 	int err;
396 
397 	switch (pf) {
398 	case NFPROTO_NETDEV:
399 #ifndef CONFIG_NETFILTER_INGRESS
400 		if (reg->hooknum == NF_NETDEV_INGRESS)
401 			return -EOPNOTSUPP;
402 #endif
403 #ifndef CONFIG_NETFILTER_EGRESS
404 		if (reg->hooknum == NF_NETDEV_EGRESS)
405 			return -EOPNOTSUPP;
406 #endif
407 		if ((reg->hooknum != NF_NETDEV_INGRESS &&
408 		     reg->hooknum != NF_NETDEV_EGRESS) ||
409 		    !reg->dev || dev_net(reg->dev) != net)
410 			return -EINVAL;
411 		break;
412 	case NFPROTO_INET:
413 		if (reg->hooknum != NF_INET_INGRESS)
414 			break;
415 
416 		err = nf_ingress_check(net, reg, NF_INET_INGRESS);
417 		if (err < 0)
418 			return err;
419 		break;
420 	}
421 
422 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
423 	if (!pp)
424 		return -EINVAL;
425 
426 	mutex_lock(&nf_hook_mutex);
427 
428 	p = nf_entry_dereference(*pp);
429 	new_hooks = nf_hook_entries_grow(p, reg);
430 
431 	if (!IS_ERR(new_hooks)) {
432 		hooks_validate(new_hooks);
433 		rcu_assign_pointer(*pp, new_hooks);
434 	}
435 
436 	mutex_unlock(&nf_hook_mutex);
437 	if (IS_ERR(new_hooks))
438 		return PTR_ERR(new_hooks);
439 
440 #ifdef CONFIG_NETFILTER_INGRESS
441 	if (nf_ingress_hook(reg, pf))
442 		net_inc_ingress_queue();
443 #endif
444 #ifdef CONFIG_NETFILTER_EGRESS
445 	if (nf_egress_hook(reg, pf))
446 		net_inc_egress_queue();
447 #endif
448 	nf_static_key_inc(reg, pf);
449 
450 	BUG_ON(p == new_hooks);
451 	nf_hook_entries_free(p);
452 	return 0;
453 }
454 
455 /*
456  * nf_remove_net_hook - remove a hook from blob
457  *
458  * @oldp: current address of hook blob
459  * @unreg: hook to unregister
460  *
461  * This cannot fail, hook unregistration must always succeed.
462  * Therefore replace the to-be-removed hook with a dummy hook.
463  */
nf_remove_net_hook(struct nf_hook_entries * old,const struct nf_hook_ops * unreg)464 static bool nf_remove_net_hook(struct nf_hook_entries *old,
465 			       const struct nf_hook_ops *unreg)
466 {
467 	struct nf_hook_ops **orig_ops;
468 	unsigned int i;
469 
470 	orig_ops = nf_hook_entries_get_hook_ops(old);
471 	for (i = 0; i < old->num_hook_entries; i++) {
472 		if (orig_ops[i] != unreg)
473 			continue;
474 		WRITE_ONCE(old->hooks[i].hook, accept_all);
475 		WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
476 		return true;
477 	}
478 
479 	return false;
480 }
481 
__nf_unregister_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)482 static void __nf_unregister_net_hook(struct net *net, int pf,
483 				     const struct nf_hook_ops *reg)
484 {
485 	struct nf_hook_entries __rcu **pp;
486 	struct nf_hook_entries *p;
487 
488 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
489 	if (!pp)
490 		return;
491 
492 	mutex_lock(&nf_hook_mutex);
493 
494 	p = nf_entry_dereference(*pp);
495 	if (WARN_ON_ONCE(!p)) {
496 		mutex_unlock(&nf_hook_mutex);
497 		return;
498 	}
499 
500 	if (nf_remove_net_hook(p, reg)) {
501 #ifdef CONFIG_NETFILTER_INGRESS
502 		if (nf_ingress_hook(reg, pf))
503 			net_dec_ingress_queue();
504 #endif
505 #ifdef CONFIG_NETFILTER_EGRESS
506 		if (nf_egress_hook(reg, pf))
507 			net_dec_egress_queue();
508 #endif
509 		nf_static_key_dec(reg, pf);
510 	} else {
511 		WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
512 	}
513 
514 	p = __nf_hook_entries_try_shrink(p, pp);
515 	mutex_unlock(&nf_hook_mutex);
516 	if (!p)
517 		return;
518 
519 	nf_queue_nf_hook_drop(net);
520 	nf_hook_entries_free(p);
521 }
522 
nf_unregister_net_hook(struct net * net,const struct nf_hook_ops * reg)523 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
524 {
525 	if (reg->pf == NFPROTO_INET) {
526 		if (reg->hooknum == NF_INET_INGRESS) {
527 			__nf_unregister_net_hook(net, NFPROTO_INET, reg);
528 		} else {
529 			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
530 			__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
531 		}
532 	} else {
533 		__nf_unregister_net_hook(net, reg->pf, reg);
534 	}
535 }
536 EXPORT_SYMBOL(nf_unregister_net_hook);
537 
nf_hook_entries_delete_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)538 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
539 				const struct nf_hook_ops *reg)
540 {
541 	struct nf_hook_entries *p;
542 
543 	p = rcu_dereference_raw(*pp);
544 	if (nf_remove_net_hook(p, reg)) {
545 		p = __nf_hook_entries_try_shrink(p, pp);
546 		nf_hook_entries_free(p);
547 	}
548 }
549 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
550 
nf_register_net_hook(struct net * net,const struct nf_hook_ops * reg)551 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
552 {
553 	int err;
554 
555 	if (reg->pf == NFPROTO_INET) {
556 		if (reg->hooknum == NF_INET_INGRESS) {
557 			err = __nf_register_net_hook(net, NFPROTO_INET, reg);
558 			if (err < 0)
559 				return err;
560 		} else {
561 			err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
562 			if (err < 0)
563 				return err;
564 
565 			err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
566 			if (err < 0) {
567 				__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
568 				return err;
569 			}
570 		}
571 	} else {
572 		err = __nf_register_net_hook(net, reg->pf, reg);
573 		if (err < 0)
574 			return err;
575 	}
576 
577 	return 0;
578 }
579 EXPORT_SYMBOL(nf_register_net_hook);
580 
nf_register_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int n)581 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
582 			  unsigned int n)
583 {
584 	unsigned int i;
585 	int err = 0;
586 
587 	for (i = 0; i < n; i++) {
588 		err = nf_register_net_hook(net, &reg[i]);
589 		if (err)
590 			goto err;
591 	}
592 	return err;
593 
594 err:
595 	if (i > 0)
596 		nf_unregister_net_hooks(net, reg, i);
597 	return err;
598 }
599 EXPORT_SYMBOL(nf_register_net_hooks);
600 
nf_unregister_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int hookcount)601 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
602 			     unsigned int hookcount)
603 {
604 	unsigned int i;
605 
606 	for (i = 0; i < hookcount; i++)
607 		nf_unregister_net_hook(net, &reg[i]);
608 }
609 EXPORT_SYMBOL(nf_unregister_net_hooks);
610 
611 /* Returns 1 if okfn() needs to be executed by the caller,
612  * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
nf_hook_slow(struct sk_buff * skb,struct nf_hook_state * state,const struct nf_hook_entries * e,unsigned int s)613 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
614 		 const struct nf_hook_entries *e, unsigned int s)
615 {
616 	unsigned int verdict;
617 	int ret;
618 
619 	for (; s < e->num_hook_entries; s++) {
620 		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
621 		switch (verdict & NF_VERDICT_MASK) {
622 		case NF_ACCEPT:
623 			break;
624 		case NF_DROP:
625 			kfree_skb_reason(skb,
626 					 SKB_DROP_REASON_NETFILTER_DROP);
627 			ret = NF_DROP_GETERR(verdict);
628 			if (ret == 0)
629 				ret = -EPERM;
630 			return ret;
631 		case NF_QUEUE:
632 			ret = nf_queue(skb, state, s, verdict);
633 			if (ret == 1)
634 				continue;
635 			return ret;
636 		default:
637 			/* Implicit handling for NF_STOLEN, as well as any other
638 			 * non conventional verdicts.
639 			 */
640 			return 0;
641 		}
642 	}
643 
644 	return 1;
645 }
646 EXPORT_SYMBOL(nf_hook_slow);
647 
nf_hook_slow_list(struct list_head * head,struct nf_hook_state * state,const struct nf_hook_entries * e)648 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
649 		       const struct nf_hook_entries *e)
650 {
651 	struct sk_buff *skb, *next;
652 	struct list_head sublist;
653 	int ret;
654 
655 	INIT_LIST_HEAD(&sublist);
656 
657 	list_for_each_entry_safe(skb, next, head, list) {
658 		skb_list_del_init(skb);
659 		ret = nf_hook_slow(skb, state, e, 0);
660 		if (ret == 1)
661 			list_add_tail(&skb->list, &sublist);
662 	}
663 	/* Put passed packets back on main list */
664 	list_splice(&sublist, head);
665 }
666 EXPORT_SYMBOL(nf_hook_slow_list);
667 
668 /* This needs to be compiled in any case to avoid dependencies between the
669  * nfnetlink_queue code and nf_conntrack.
670  */
671 const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
672 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
673 
674 const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
675 EXPORT_SYMBOL_GPL(nf_ct_hook);
676 
677 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
678 const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
679 EXPORT_SYMBOL_GPL(nf_nat_hook);
680 
681 /* This does not belong here, but locally generated errors need it if connection
682  * tracking in use: without this, connection may not be in hash table, and hence
683  * manufactured ICMP or RST packets will not be associated with it.
684  */
nf_ct_attach(struct sk_buff * new,const struct sk_buff * skb)685 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
686 {
687 	const struct nf_ct_hook *ct_hook;
688 
689 	if (skb->_nfct) {
690 		rcu_read_lock();
691 		ct_hook = rcu_dereference(nf_ct_hook);
692 		if (ct_hook)
693 			ct_hook->attach(new, skb);
694 		rcu_read_unlock();
695 	}
696 }
697 EXPORT_SYMBOL(nf_ct_attach);
698 
nf_conntrack_destroy(struct nf_conntrack * nfct)699 void nf_conntrack_destroy(struct nf_conntrack *nfct)
700 {
701 	const struct nf_ct_hook *ct_hook;
702 
703 	rcu_read_lock();
704 	ct_hook = rcu_dereference(nf_ct_hook);
705 	BUG_ON(ct_hook == NULL);
706 	ct_hook->destroy(nfct);
707 	rcu_read_unlock();
708 }
709 EXPORT_SYMBOL(nf_conntrack_destroy);
710 
nf_ct_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)711 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
712 			 const struct sk_buff *skb)
713 {
714 	const struct nf_ct_hook *ct_hook;
715 	bool ret = false;
716 
717 	rcu_read_lock();
718 	ct_hook = rcu_dereference(nf_ct_hook);
719 	if (ct_hook)
720 		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
721 	rcu_read_unlock();
722 	return ret;
723 }
724 EXPORT_SYMBOL(nf_ct_get_tuple_skb);
725 
726 /* Built-in default zone used e.g. by modules. */
727 const struct nf_conntrack_zone nf_ct_zone_dflt = {
728 	.id	= NF_CT_DEFAULT_ZONE_ID,
729 	.dir	= NF_CT_DEFAULT_ZONE_DIR,
730 };
731 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
732 #endif /* CONFIG_NF_CONNTRACK */
733 
734 static void __net_init
__netfilter_net_init(struct nf_hook_entries __rcu ** e,int max)735 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
736 {
737 	int h;
738 
739 	for (h = 0; h < max; h++)
740 		RCU_INIT_POINTER(e[h], NULL);
741 }
742 
netfilter_net_init(struct net * net)743 static int __net_init netfilter_net_init(struct net *net)
744 {
745 	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
746 	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
747 #ifdef CONFIG_NETFILTER_FAMILY_ARP
748 	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
749 #endif
750 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
751 	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
752 #endif
753 #if IS_ENABLED(CONFIG_DECNET)
754 	__netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
755 #endif
756 
757 #ifdef CONFIG_PROC_FS
758 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
759 						net->proc_net);
760 	if (!net->nf.proc_netfilter) {
761 		if (!net_eq(net, &init_net))
762 			pr_err("cannot create netfilter proc entry");
763 
764 		return -ENOMEM;
765 	}
766 #endif
767 
768 	return 0;
769 }
770 
netfilter_net_exit(struct net * net)771 static void __net_exit netfilter_net_exit(struct net *net)
772 {
773 	remove_proc_entry("netfilter", net->proc_net);
774 }
775 
776 static struct pernet_operations netfilter_net_ops = {
777 	.init = netfilter_net_init,
778 	.exit = netfilter_net_exit,
779 };
780 
netfilter_init(void)781 int __init netfilter_init(void)
782 {
783 	int ret;
784 
785 	ret = register_pernet_subsys(&netfilter_net_ops);
786 	if (ret < 0)
787 		goto err;
788 
789 	ret = netfilter_log_init();
790 	if (ret < 0)
791 		goto err_pernet;
792 
793 	return 0;
794 err_pernet:
795 	unregister_pernet_subsys(&netfilter_net_ops);
796 err:
797 	return ret;
798 }
799