1 /*
2  * net/sched/cls_api.c	Packet classifier API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  *
13  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14  *
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/kmod.h>
25 #include <linux/netlink.h>
26 #include <linux/err.h>
27 #include <linux/slab.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
33 
34 /* The list of all installed classifier types */
35 
36 static struct tcf_proto_ops *tcf_proto_base __read_mostly;
37 
38 /* Protects list of registered TC modules. It is pure SMP lock. */
39 static DEFINE_RWLOCK(cls_mod_lock);
40 
41 /* Find classifier type by string name */
42 
tcf_proto_lookup_ops(struct nlattr * kind)43 static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind)
44 {
45 	const struct tcf_proto_ops *t = NULL;
46 
47 	if (kind) {
48 		read_lock(&cls_mod_lock);
49 		for (t = tcf_proto_base; t; t = t->next) {
50 			if (nla_strcmp(kind, t->kind) == 0) {
51 				if (!try_module_get(t->owner))
52 					t = NULL;
53 				break;
54 			}
55 		}
56 		read_unlock(&cls_mod_lock);
57 	}
58 	return t;
59 }
60 
61 /* Register(unregister) new classifier type */
62 
register_tcf_proto_ops(struct tcf_proto_ops * ops)63 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
64 {
65 	struct tcf_proto_ops *t, **tp;
66 	int rc = -EEXIST;
67 
68 	write_lock(&cls_mod_lock);
69 	for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
70 		if (!strcmp(ops->kind, t->kind))
71 			goto out;
72 
73 	ops->next = NULL;
74 	*tp = ops;
75 	rc = 0;
76 out:
77 	write_unlock(&cls_mod_lock);
78 	return rc;
79 }
80 EXPORT_SYMBOL(register_tcf_proto_ops);
81 
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
83 {
84 	struct tcf_proto_ops *t, **tp;
85 	int rc = -ENOENT;
86 
87 	write_lock(&cls_mod_lock);
88 	for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
89 		if (t == ops)
90 			break;
91 
92 	if (!t)
93 		goto out;
94 	*tp = t->next;
95 	rc = 0;
96 out:
97 	write_unlock(&cls_mod_lock);
98 	return rc;
99 }
100 EXPORT_SYMBOL(unregister_tcf_proto_ops);
101 
102 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
103 			  struct nlmsghdr *n, struct tcf_proto *tp,
104 			  unsigned long fh, int event);
105 
106 
107 /* Select new prio value from the range, managed by kernel. */
108 
tcf_auto_prio(struct tcf_proto * tp)109 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
110 {
111 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
112 
113 	if (tp)
114 		first = tp->prio - 1;
115 
116 	return first;
117 }
118 
119 /* Add/change/delete/get a filter node */
120 
tc_ctl_tfilter(struct sk_buff * skb,struct nlmsghdr * n,void * arg)121 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
122 {
123 	struct net *net = sock_net(skb->sk);
124 	struct nlattr *tca[TCA_MAX + 1];
125 	spinlock_t *root_lock;
126 	struct tcmsg *t;
127 	u32 protocol;
128 	u32 prio;
129 	u32 nprio;
130 	u32 parent;
131 	struct net_device *dev;
132 	struct Qdisc  *q;
133 	struct tcf_proto **back, **chain;
134 	struct tcf_proto *tp;
135 	const struct tcf_proto_ops *tp_ops;
136 	const struct Qdisc_class_ops *cops;
137 	unsigned long cl;
138 	unsigned long fh;
139 	int err;
140 	int tp_created = 0;
141 
142 replay:
143 	t = NLMSG_DATA(n);
144 	protocol = TC_H_MIN(t->tcm_info);
145 	prio = TC_H_MAJ(t->tcm_info);
146 	nprio = prio;
147 	parent = t->tcm_parent;
148 	cl = 0;
149 
150 	if (prio == 0) {
151 		/* If no priority is given, user wants we allocated it. */
152 		if (n->nlmsg_type != RTM_NEWTFILTER ||
153 		    !(n->nlmsg_flags & NLM_F_CREATE))
154 			return -ENOENT;
155 		prio = TC_H_MAKE(0x80000000U, 0U);
156 	}
157 
158 	/* Find head of filter chain. */
159 
160 	/* Find link */
161 	dev = __dev_get_by_index(net, t->tcm_ifindex);
162 	if (dev == NULL)
163 		return -ENODEV;
164 
165 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
166 	if (err < 0)
167 		return err;
168 
169 	/* Find qdisc */
170 	if (!parent) {
171 		q = dev->qdisc;
172 		parent = q->handle;
173 	} else {
174 		q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
175 		if (q == NULL)
176 			return -EINVAL;
177 	}
178 
179 	/* Is it classful? */
180 	cops = q->ops->cl_ops;
181 	if (!cops)
182 		return -EINVAL;
183 
184 	if (cops->tcf_chain == NULL)
185 		return -EOPNOTSUPP;
186 
187 	/* Do we search for filter, attached to class? */
188 	if (TC_H_MIN(parent)) {
189 		cl = cops->get(q, parent);
190 		if (cl == 0)
191 			return -ENOENT;
192 	}
193 
194 	/* And the last stroke */
195 	chain = cops->tcf_chain(q, cl);
196 	err = -EINVAL;
197 	if (chain == NULL)
198 		goto errout;
199 
200 	/* Check the chain for existence of proto-tcf with this priority */
201 	for (back = chain; (tp = *back) != NULL; back = &tp->next) {
202 		if (tp->prio >= prio) {
203 			if (tp->prio == prio) {
204 				if (!nprio ||
205 				    (tp->protocol != protocol && protocol))
206 					goto errout;
207 			} else
208 				tp = NULL;
209 			break;
210 		}
211 	}
212 
213 	root_lock = qdisc_root_sleeping_lock(q);
214 
215 	if (tp == NULL) {
216 		/* Proto-tcf does not exist, create new one */
217 
218 		if (tca[TCA_KIND] == NULL || !protocol)
219 			goto errout;
220 
221 		err = -ENOENT;
222 		if (n->nlmsg_type != RTM_NEWTFILTER ||
223 		    !(n->nlmsg_flags & NLM_F_CREATE))
224 			goto errout;
225 
226 
227 		/* Create new proto tcf */
228 
229 		err = -ENOBUFS;
230 		tp = kzalloc(sizeof(*tp), GFP_KERNEL);
231 		if (tp == NULL)
232 			goto errout;
233 		err = -ENOENT;
234 		tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]);
235 		if (tp_ops == NULL) {
236 #ifdef CONFIG_MODULES
237 			struct nlattr *kind = tca[TCA_KIND];
238 			char name[IFNAMSIZ];
239 
240 			if (kind != NULL &&
241 			    nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
242 				rtnl_unlock();
243 				request_module("cls_%s", name);
244 				rtnl_lock();
245 				tp_ops = tcf_proto_lookup_ops(kind);
246 				/* We dropped the RTNL semaphore in order to
247 				 * perform the module load.  So, even if we
248 				 * succeeded in loading the module we have to
249 				 * replay the request.  We indicate this using
250 				 * -EAGAIN.
251 				 */
252 				if (tp_ops != NULL) {
253 					module_put(tp_ops->owner);
254 					err = -EAGAIN;
255 				}
256 			}
257 #endif
258 			kfree(tp);
259 			goto errout;
260 		}
261 		tp->ops = tp_ops;
262 		tp->protocol = protocol;
263 		tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back));
264 		tp->q = q;
265 		tp->classify = tp_ops->classify;
266 		tp->classid = parent;
267 
268 		err = tp_ops->init(tp);
269 		if (err != 0) {
270 			module_put(tp_ops->owner);
271 			kfree(tp);
272 			goto errout;
273 		}
274 
275 		tp_created = 1;
276 
277 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
278 		goto errout;
279 
280 	fh = tp->ops->get(tp, t->tcm_handle);
281 
282 	if (fh == 0) {
283 		if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
284 			spin_lock_bh(root_lock);
285 			*back = tp->next;
286 			spin_unlock_bh(root_lock);
287 
288 			tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
289 			tcf_destroy(tp);
290 			err = 0;
291 			goto errout;
292 		}
293 
294 		err = -ENOENT;
295 		if (n->nlmsg_type != RTM_NEWTFILTER ||
296 		    !(n->nlmsg_flags & NLM_F_CREATE))
297 			goto errout;
298 	} else {
299 		switch (n->nlmsg_type) {
300 		case RTM_NEWTFILTER:
301 			err = -EEXIST;
302 			if (n->nlmsg_flags & NLM_F_EXCL) {
303 				if (tp_created)
304 					tcf_destroy(tp);
305 				goto errout;
306 			}
307 			break;
308 		case RTM_DELTFILTER:
309 			err = tp->ops->delete(tp, fh);
310 			if (err == 0)
311 				tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
312 			goto errout;
313 		case RTM_GETTFILTER:
314 			err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
315 			goto errout;
316 		default:
317 			err = -EINVAL;
318 			goto errout;
319 		}
320 	}
321 
322 	err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
323 	if (err == 0) {
324 		if (tp_created) {
325 			spin_lock_bh(root_lock);
326 			tp->next = *back;
327 			*back = tp;
328 			spin_unlock_bh(root_lock);
329 		}
330 		tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
331 	} else {
332 		if (tp_created)
333 			tcf_destroy(tp);
334 	}
335 
336 errout:
337 	if (cl)
338 		cops->put(q, cl);
339 	if (err == -EAGAIN)
340 		/* Replay the request. */
341 		goto replay;
342 	return err;
343 }
344 
tcf_fill_node(struct sk_buff * skb,struct tcf_proto * tp,unsigned long fh,u32 pid,u32 seq,u16 flags,int event)345 static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
346 			 unsigned long fh, u32 pid, u32 seq, u16 flags, int event)
347 {
348 	struct tcmsg *tcm;
349 	struct nlmsghdr  *nlh;
350 	unsigned char *b = skb_tail_pointer(skb);
351 
352 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
353 	tcm = NLMSG_DATA(nlh);
354 	tcm->tcm_family = AF_UNSPEC;
355 	tcm->tcm__pad1 = 0;
356 	tcm->tcm__pad2 = 0;
357 	tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
358 	tcm->tcm_parent = tp->classid;
359 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
360 	NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
361 	tcm->tcm_handle = fh;
362 	if (RTM_DELTFILTER != event) {
363 		tcm->tcm_handle = 0;
364 		if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0)
365 			goto nla_put_failure;
366 	}
367 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
368 	return skb->len;
369 
370 nlmsg_failure:
371 nla_put_failure:
372 	nlmsg_trim(skb, b);
373 	return -1;
374 }
375 
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,unsigned long fh,int event)376 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
377 			  struct nlmsghdr *n, struct tcf_proto *tp,
378 			  unsigned long fh, int event)
379 {
380 	struct sk_buff *skb;
381 	u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
382 
383 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
384 	if (!skb)
385 		return -ENOBUFS;
386 
387 	if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) {
388 		kfree_skb(skb);
389 		return -EINVAL;
390 	}
391 
392 	return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
393 			      n->nlmsg_flags & NLM_F_ECHO);
394 }
395 
396 struct tcf_dump_args {
397 	struct tcf_walker w;
398 	struct sk_buff *skb;
399 	struct netlink_callback *cb;
400 };
401 
tcf_node_dump(struct tcf_proto * tp,unsigned long n,struct tcf_walker * arg)402 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
403 			 struct tcf_walker *arg)
404 {
405 	struct tcf_dump_args *a = (void *)arg;
406 
407 	return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid,
408 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
409 }
410 
411 /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)412 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
413 {
414 	struct net *net = sock_net(skb->sk);
415 	int t;
416 	int s_t;
417 	struct net_device *dev;
418 	struct Qdisc *q;
419 	struct tcf_proto *tp, **chain;
420 	struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
421 	unsigned long cl = 0;
422 	const struct Qdisc_class_ops *cops;
423 	struct tcf_dump_args arg;
424 
425 	if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
426 		return skb->len;
427 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
428 	if (!dev)
429 		return skb->len;
430 
431 	if (!tcm->tcm_parent)
432 		q = dev->qdisc;
433 	else
434 		q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
435 	if (!q)
436 		goto out;
437 	cops = q->ops->cl_ops;
438 	if (!cops)
439 		goto errout;
440 	if (cops->tcf_chain == NULL)
441 		goto errout;
442 	if (TC_H_MIN(tcm->tcm_parent)) {
443 		cl = cops->get(q, tcm->tcm_parent);
444 		if (cl == 0)
445 			goto errout;
446 	}
447 	chain = cops->tcf_chain(q, cl);
448 	if (chain == NULL)
449 		goto errout;
450 
451 	s_t = cb->args[0];
452 
453 	for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
454 		if (t < s_t)
455 			continue;
456 		if (TC_H_MAJ(tcm->tcm_info) &&
457 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
458 			continue;
459 		if (TC_H_MIN(tcm->tcm_info) &&
460 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
461 			continue;
462 		if (t > s_t)
463 			memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
464 		if (cb->args[1] == 0) {
465 			if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
466 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
467 					  RTM_NEWTFILTER) <= 0)
468 				break;
469 
470 			cb->args[1] = 1;
471 		}
472 		if (tp->ops->walk == NULL)
473 			continue;
474 		arg.w.fn = tcf_node_dump;
475 		arg.skb = skb;
476 		arg.cb = cb;
477 		arg.w.stop = 0;
478 		arg.w.skip = cb->args[1] - 1;
479 		arg.w.count = 0;
480 		tp->ops->walk(tp, &arg.w);
481 		cb->args[1] = arg.w.count + 1;
482 		if (arg.w.stop)
483 			break;
484 	}
485 
486 	cb->args[0] = t;
487 
488 errout:
489 	if (cl)
490 		cops->put(q, cl);
491 out:
492 	return skb->len;
493 }
494 
tcf_exts_destroy(struct tcf_proto * tp,struct tcf_exts * exts)495 void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
496 {
497 #ifdef CONFIG_NET_CLS_ACT
498 	if (exts->action) {
499 		tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
500 		exts->action = NULL;
501 	}
502 #endif
503 }
504 EXPORT_SYMBOL(tcf_exts_destroy);
505 
tcf_exts_validate(struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,const struct tcf_ext_map * map)506 int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
507 		  struct nlattr *rate_tlv, struct tcf_exts *exts,
508 		  const struct tcf_ext_map *map)
509 {
510 	memset(exts, 0, sizeof(*exts));
511 
512 #ifdef CONFIG_NET_CLS_ACT
513 	{
514 		struct tc_action *act;
515 
516 		if (map->police && tb[map->police]) {
517 			act = tcf_action_init_1(tb[map->police], rate_tlv,
518 						"police", TCA_ACT_NOREPLACE,
519 						TCA_ACT_BIND);
520 			if (IS_ERR(act))
521 				return PTR_ERR(act);
522 
523 			act->type = TCA_OLD_COMPAT;
524 			exts->action = act;
525 		} else if (map->action && tb[map->action]) {
526 			act = tcf_action_init(tb[map->action], rate_tlv, NULL,
527 					      TCA_ACT_NOREPLACE, TCA_ACT_BIND);
528 			if (IS_ERR(act))
529 				return PTR_ERR(act);
530 
531 			exts->action = act;
532 		}
533 	}
534 #else
535 	if ((map->action && tb[map->action]) ||
536 	    (map->police && tb[map->police]))
537 		return -EOPNOTSUPP;
538 #endif
539 
540 	return 0;
541 }
542 EXPORT_SYMBOL(tcf_exts_validate);
543 
tcf_exts_change(struct tcf_proto * tp,struct tcf_exts * dst,struct tcf_exts * src)544 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
545 		     struct tcf_exts *src)
546 {
547 #ifdef CONFIG_NET_CLS_ACT
548 	if (src->action) {
549 		struct tc_action *act;
550 		tcf_tree_lock(tp);
551 		act = dst->action;
552 		dst->action = src->action;
553 		tcf_tree_unlock(tp);
554 		if (act)
555 			tcf_action_destroy(act, TCA_ACT_UNBIND);
556 	}
557 #endif
558 }
559 EXPORT_SYMBOL(tcf_exts_change);
560 
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts,const struct tcf_ext_map * map)561 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
562 		  const struct tcf_ext_map *map)
563 {
564 #ifdef CONFIG_NET_CLS_ACT
565 	if (map->action && exts->action) {
566 		/*
567 		 * again for backward compatible mode - we want
568 		 * to work with both old and new modes of entering
569 		 * tc data even if iproute2  was newer - jhs
570 		 */
571 		struct nlattr *nest;
572 
573 		if (exts->action->type != TCA_OLD_COMPAT) {
574 			nest = nla_nest_start(skb, map->action);
575 			if (nest == NULL)
576 				goto nla_put_failure;
577 			if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
578 				goto nla_put_failure;
579 			nla_nest_end(skb, nest);
580 		} else if (map->police) {
581 			nest = nla_nest_start(skb, map->police);
582 			if (nest == NULL)
583 				goto nla_put_failure;
584 			if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
585 				goto nla_put_failure;
586 			nla_nest_end(skb, nest);
587 		}
588 	}
589 #endif
590 	return 0;
591 nla_put_failure: __attribute__ ((unused))
592 	return -1;
593 }
594 EXPORT_SYMBOL(tcf_exts_dump);
595 
596 
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts,const struct tcf_ext_map * map)597 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
598 			const struct tcf_ext_map *map)
599 {
600 #ifdef CONFIG_NET_CLS_ACT
601 	if (exts->action)
602 		if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
603 			goto nla_put_failure;
604 #endif
605 	return 0;
606 nla_put_failure: __attribute__ ((unused))
607 	return -1;
608 }
609 EXPORT_SYMBOL(tcf_exts_dump_stats);
610 
tc_filter_init(void)611 static int __init tc_filter_init(void)
612 {
613 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL);
614 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL);
615 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
616 		      tc_dump_tfilter, NULL);
617 
618 	return 0;
619 }
620 
621 subsys_initcall(tc_filter_init);
622