1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		ROUTE - implementation of the IP router.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
14  *
15  * Fixes:
16  *		Alan Cox	:	Verify area fixes.
17  *		Alan Cox	:	cli() protects routing changes
18  *		Rui Oliveira	:	ICMP routing table updates
19  *		(rco@di.uminho.pt)	Routing table insertion and update
20  *		Linus Torvalds	:	Rewrote bits to be sensible
21  *		Alan Cox	:	Added BSD route gw semantics
22  *		Alan Cox	:	Super /proc >4K
23  *		Alan Cox	:	MTU in route table
24  *		Alan Cox	:	MSS actually. Also added the window
25  *					clamper.
26  *		Sam Lantinga	:	Fixed route matching in rt_del()
27  *		Alan Cox	:	Routing cache support.
28  *		Alan Cox	:	Removed compatibility cruft.
29  *		Alan Cox	:	RTF_REJECT support.
30  *		Alan Cox	:	TCP irtt support.
31  *		Jonathan Naylor	:	Added Metric support.
32  *	Miquel van Smoorenburg	:	BSD API fixes.
33  *	Miquel van Smoorenburg	:	Metrics.
34  *		Alan Cox	:	Use __u32 properly
35  *		Alan Cox	:	Aligned routing errors more closely with BSD
36  *					our system is still very different.
37  *		Alan Cox	:	Faster /proc handling
38  *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
39  *					routing caches and better behaviour.
40  *
41  *		Olaf Erb	:	irtt wasn't being copied right.
42  *		Bjorn Ekwall	:	Kerneld route support.
43  *		Alan Cox	:	Multicast fixed (I hope)
44  *		Pavel Krauz	:	Limited broadcast fixed
45  *		Mike McLagan	:	Routing by source
46  *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
47  *					route.c and rewritten from scratch.
48  *		Andi Kleen	:	Load-limit warning messages.
49  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
50  *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
51  *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
52  *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
53  *		Marc Boucher	:	routing by fwmark
54  *	Robert Olsson		:	Added rt_cache statistics
55  *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
56  *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
57  *	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
58  *	Ilia Sotnikov		:	Removed TOS from hash calculations
59  */
60 
61 #define pr_fmt(fmt) "IPv4: " fmt
62 
63 #include <linux/module.h>
64 #include <linux/bitops.h>
65 #include <linux/kernel.h>
66 #include <linux/mm.h>
67 #include <linux/memblock.h>
68 #include <linux/socket.h>
69 #include <linux/errno.h>
70 #include <linux/in.h>
71 #include <linux/inet.h>
72 #include <linux/netdevice.h>
73 #include <linux/proc_fs.h>
74 #include <linux/init.h>
75 #include <linux/skbuff.h>
76 #include <linux/inetdevice.h>
77 #include <linux/igmp.h>
78 #include <linux/pkt_sched.h>
79 #include <linux/mroute.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/random.h>
82 #include <linux/rcupdate.h>
83 #include <linux/slab.h>
84 #include <linux/jhash.h>
85 #include <net/dst.h>
86 #include <net/dst_metadata.h>
87 #include <net/inet_dscp.h>
88 #include <net/net_namespace.h>
89 #include <net/ip.h>
90 #include <net/route.h>
91 #include <net/inetpeer.h>
92 #include <net/sock.h>
93 #include <net/ip_fib.h>
94 #include <net/nexthop.h>
95 #include <net/tcp.h>
96 #include <net/icmp.h>
97 #include <net/xfrm.h>
98 #include <net/lwtunnel.h>
99 #include <net/netevent.h>
100 #include <net/rtnetlink.h>
101 #ifdef CONFIG_SYSCTL
102 #include <linux/sysctl.h>
103 #endif
104 #include <net/secure_seq.h>
105 #include <net/ip_tunnels.h>
106 
107 #include "fib_lookup.h"
108 
109 #define RT_FL_TOS(oldflp4) \
110 	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
111 
112 #define RT_GC_TIMEOUT (300*HZ)
113 
114 #define DEFAULT_MIN_PMTU (512 + 20 + 20)
115 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
116 #define DEFAULT_MIN_ADVMSS 256
117 static int ip_rt_max_size;
118 static int ip_rt_redirect_number __read_mostly	= 9;
119 static int ip_rt_redirect_load __read_mostly	= HZ / 50;
120 static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
121 static int ip_rt_error_cost __read_mostly	= HZ;
122 static int ip_rt_error_burst __read_mostly	= 5 * HZ;
123 
124 static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
125 
126 /*
127  *	Interface to generic destination cache.
128  */
129 
130 INDIRECT_CALLABLE_SCOPE
131 struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
132 static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
133 INDIRECT_CALLABLE_SCOPE
134 unsigned int		ipv4_mtu(const struct dst_entry *dst);
135 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
136 static void		 ipv4_link_failure(struct sk_buff *skb);
137 static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
138 					   struct sk_buff *skb, u32 mtu,
139 					   bool confirm_neigh);
140 static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
141 					struct sk_buff *skb);
142 static void		ipv4_dst_destroy(struct dst_entry *dst);
143 
ipv4_cow_metrics(struct dst_entry * dst,unsigned long old)144 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
145 {
146 	WARN_ON(1);
147 	return NULL;
148 }
149 
150 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
151 					   struct sk_buff *skb,
152 					   const void *daddr);
153 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
154 
155 static struct dst_ops ipv4_dst_ops = {
156 	.family =		AF_INET,
157 	.check =		ipv4_dst_check,
158 	.default_advmss =	ipv4_default_advmss,
159 	.mtu =			ipv4_mtu,
160 	.cow_metrics =		ipv4_cow_metrics,
161 	.destroy =		ipv4_dst_destroy,
162 	.negative_advice =	ipv4_negative_advice,
163 	.link_failure =		ipv4_link_failure,
164 	.update_pmtu =		ip_rt_update_pmtu,
165 	.redirect =		ip_do_redirect,
166 	.local_out =		__ip_local_out,
167 	.neigh_lookup =		ipv4_neigh_lookup,
168 	.confirm_neigh =	ipv4_confirm_neigh,
169 };
170 
171 #define ECN_OR_COST(class)	TC_PRIO_##class
172 
173 const __u8 ip_tos2prio[16] = {
174 	TC_PRIO_BESTEFFORT,
175 	ECN_OR_COST(BESTEFFORT),
176 	TC_PRIO_BESTEFFORT,
177 	ECN_OR_COST(BESTEFFORT),
178 	TC_PRIO_BULK,
179 	ECN_OR_COST(BULK),
180 	TC_PRIO_BULK,
181 	ECN_OR_COST(BULK),
182 	TC_PRIO_INTERACTIVE,
183 	ECN_OR_COST(INTERACTIVE),
184 	TC_PRIO_INTERACTIVE,
185 	ECN_OR_COST(INTERACTIVE),
186 	TC_PRIO_INTERACTIVE_BULK,
187 	ECN_OR_COST(INTERACTIVE_BULK),
188 	TC_PRIO_INTERACTIVE_BULK,
189 	ECN_OR_COST(INTERACTIVE_BULK)
190 };
191 EXPORT_SYMBOL(ip_tos2prio);
192 
193 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
194 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
195 
196 #ifdef CONFIG_PROC_FS
rt_cache_seq_start(struct seq_file * seq,loff_t * pos)197 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
198 {
199 	if (*pos)
200 		return NULL;
201 	return SEQ_START_TOKEN;
202 }
203 
rt_cache_seq_next(struct seq_file * seq,void * v,loff_t * pos)204 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
205 {
206 	++*pos;
207 	return NULL;
208 }
209 
rt_cache_seq_stop(struct seq_file * seq,void * v)210 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
211 {
212 }
213 
rt_cache_seq_show(struct seq_file * seq,void * v)214 static int rt_cache_seq_show(struct seq_file *seq, void *v)
215 {
216 	if (v == SEQ_START_TOKEN)
217 		seq_printf(seq, "%-127s\n",
218 			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
219 			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
220 			   "HHUptod\tSpecDst");
221 	return 0;
222 }
223 
224 static const struct seq_operations rt_cache_seq_ops = {
225 	.start  = rt_cache_seq_start,
226 	.next   = rt_cache_seq_next,
227 	.stop   = rt_cache_seq_stop,
228 	.show   = rt_cache_seq_show,
229 };
230 
rt_cpu_seq_start(struct seq_file * seq,loff_t * pos)231 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
232 {
233 	int cpu;
234 
235 	if (*pos == 0)
236 		return SEQ_START_TOKEN;
237 
238 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
239 		if (!cpu_possible(cpu))
240 			continue;
241 		*pos = cpu+1;
242 		return &per_cpu(rt_cache_stat, cpu);
243 	}
244 	return NULL;
245 }
246 
rt_cpu_seq_next(struct seq_file * seq,void * v,loff_t * pos)247 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
248 {
249 	int cpu;
250 
251 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
252 		if (!cpu_possible(cpu))
253 			continue;
254 		*pos = cpu+1;
255 		return &per_cpu(rt_cache_stat, cpu);
256 	}
257 	(*pos)++;
258 	return NULL;
259 
260 }
261 
rt_cpu_seq_stop(struct seq_file * seq,void * v)262 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
263 {
264 
265 }
266 
rt_cpu_seq_show(struct seq_file * seq,void * v)267 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
268 {
269 	struct rt_cache_stat *st = v;
270 
271 	if (v == SEQ_START_TOKEN) {
272 		seq_puts(seq, "entries  in_hit   in_slow_tot in_slow_mc in_no_route in_brd   in_martian_dst in_martian_src out_hit  out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
273 		return 0;
274 	}
275 
276 	seq_printf(seq, "%08x %08x %08x    %08x   %08x    %08x %08x       "
277 			"%08x       %08x %08x     %08x    %08x %08x   "
278 			"%08x     %08x        %08x        %08x\n",
279 		   dst_entries_get_slow(&ipv4_dst_ops),
280 		   0, /* st->in_hit */
281 		   st->in_slow_tot,
282 		   st->in_slow_mc,
283 		   st->in_no_route,
284 		   st->in_brd,
285 		   st->in_martian_dst,
286 		   st->in_martian_src,
287 
288 		   0, /* st->out_hit */
289 		   st->out_slow_tot,
290 		   st->out_slow_mc,
291 
292 		   0, /* st->gc_total */
293 		   0, /* st->gc_ignored */
294 		   0, /* st->gc_goal_miss */
295 		   0, /* st->gc_dst_overflow */
296 		   0, /* st->in_hlist_search */
297 		   0  /* st->out_hlist_search */
298 		);
299 	return 0;
300 }
301 
302 static const struct seq_operations rt_cpu_seq_ops = {
303 	.start  = rt_cpu_seq_start,
304 	.next   = rt_cpu_seq_next,
305 	.stop   = rt_cpu_seq_stop,
306 	.show   = rt_cpu_seq_show,
307 };
308 
309 #ifdef CONFIG_IP_ROUTE_CLASSID
rt_acct_proc_show(struct seq_file * m,void * v)310 static int rt_acct_proc_show(struct seq_file *m, void *v)
311 {
312 	struct ip_rt_acct *dst, *src;
313 	unsigned int i, j;
314 
315 	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
316 	if (!dst)
317 		return -ENOMEM;
318 
319 	for_each_possible_cpu(i) {
320 		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
321 		for (j = 0; j < 256; j++) {
322 			dst[j].o_bytes   += src[j].o_bytes;
323 			dst[j].o_packets += src[j].o_packets;
324 			dst[j].i_bytes   += src[j].i_bytes;
325 			dst[j].i_packets += src[j].i_packets;
326 		}
327 	}
328 
329 	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
330 	kfree(dst);
331 	return 0;
332 }
333 #endif
334 
ip_rt_do_proc_init(struct net * net)335 static int __net_init ip_rt_do_proc_init(struct net *net)
336 {
337 	struct proc_dir_entry *pde;
338 
339 	pde = proc_create_seq("rt_cache", 0444, net->proc_net,
340 			      &rt_cache_seq_ops);
341 	if (!pde)
342 		goto err1;
343 
344 	pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
345 			      &rt_cpu_seq_ops);
346 	if (!pde)
347 		goto err2;
348 
349 #ifdef CONFIG_IP_ROUTE_CLASSID
350 	pde = proc_create_single("rt_acct", 0, net->proc_net,
351 			rt_acct_proc_show);
352 	if (!pde)
353 		goto err3;
354 #endif
355 	return 0;
356 
357 #ifdef CONFIG_IP_ROUTE_CLASSID
358 err3:
359 	remove_proc_entry("rt_cache", net->proc_net_stat);
360 #endif
361 err2:
362 	remove_proc_entry("rt_cache", net->proc_net);
363 err1:
364 	return -ENOMEM;
365 }
366 
ip_rt_do_proc_exit(struct net * net)367 static void __net_exit ip_rt_do_proc_exit(struct net *net)
368 {
369 	remove_proc_entry("rt_cache", net->proc_net_stat);
370 	remove_proc_entry("rt_cache", net->proc_net);
371 #ifdef CONFIG_IP_ROUTE_CLASSID
372 	remove_proc_entry("rt_acct", net->proc_net);
373 #endif
374 }
375 
376 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
377 	.init = ip_rt_do_proc_init,
378 	.exit = ip_rt_do_proc_exit,
379 };
380 
ip_rt_proc_init(void)381 static int __init ip_rt_proc_init(void)
382 {
383 	return register_pernet_subsys(&ip_rt_proc_ops);
384 }
385 
386 #else
ip_rt_proc_init(void)387 static inline int ip_rt_proc_init(void)
388 {
389 	return 0;
390 }
391 #endif /* CONFIG_PROC_FS */
392 
rt_is_expired(const struct rtable * rth)393 static inline bool rt_is_expired(const struct rtable *rth)
394 {
395 	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
396 }
397 
rt_cache_flush(struct net * net)398 void rt_cache_flush(struct net *net)
399 {
400 	rt_genid_bump_ipv4(net);
401 }
402 
ipv4_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)403 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
404 					   struct sk_buff *skb,
405 					   const void *daddr)
406 {
407 	const struct rtable *rt = container_of(dst, struct rtable, dst);
408 	struct net_device *dev = dst->dev;
409 	struct neighbour *n;
410 
411 	rcu_read_lock_bh();
412 
413 	if (likely(rt->rt_gw_family == AF_INET)) {
414 		n = ip_neigh_gw4(dev, rt->rt_gw4);
415 	} else if (rt->rt_gw_family == AF_INET6) {
416 		n = ip_neigh_gw6(dev, &rt->rt_gw6);
417         } else {
418 		__be32 pkey;
419 
420 		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
421 		n = ip_neigh_gw4(dev, pkey);
422 	}
423 
424 	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
425 		n = NULL;
426 
427 	rcu_read_unlock_bh();
428 
429 	return n;
430 }
431 
ipv4_confirm_neigh(const struct dst_entry * dst,const void * daddr)432 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
433 {
434 	const struct rtable *rt = container_of(dst, struct rtable, dst);
435 	struct net_device *dev = dst->dev;
436 	const __be32 *pkey = daddr;
437 
438 	if (rt->rt_gw_family == AF_INET) {
439 		pkey = (const __be32 *)&rt->rt_gw4;
440 	} else if (rt->rt_gw_family == AF_INET6) {
441 		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
442 	} else if (!daddr ||
443 		 (rt->rt_flags &
444 		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
445 		return;
446 	}
447 	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
448 }
449 
450 /* Hash tables of size 2048..262144 depending on RAM size.
451  * Each bucket uses 8 bytes.
452  */
453 static u32 ip_idents_mask __read_mostly;
454 static atomic_t *ip_idents __read_mostly;
455 static u32 *ip_tstamps __read_mostly;
456 
457 /* In order to protect privacy, we add a perturbation to identifiers
458  * if one generator is seldom used. This makes hard for an attacker
459  * to infer how many packets were sent between two points in time.
460  */
ip_idents_reserve(u32 hash,int segs)461 static u32 ip_idents_reserve(u32 hash, int segs)
462 {
463 	u32 bucket, old, now = (u32)jiffies;
464 	atomic_t *p_id;
465 	u32 *p_tstamp;
466 	u32 delta = 0;
467 
468 	bucket = hash & ip_idents_mask;
469 	p_tstamp = ip_tstamps + bucket;
470 	p_id = ip_idents + bucket;
471 	old = READ_ONCE(*p_tstamp);
472 
473 	if (old != now && cmpxchg(p_tstamp, old, now) == old)
474 		delta = prandom_u32_max(now - old);
475 
476 	/* If UBSAN reports an error there, please make sure your compiler
477 	 * supports -fno-strict-overflow before reporting it that was a bug
478 	 * in UBSAN, and it has been fixed in GCC-8.
479 	 */
480 	return atomic_add_return(segs + delta, p_id) - segs;
481 }
482 
__ip_select_ident(struct net * net,struct iphdr * iph,int segs)483 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
484 {
485 	u32 hash, id;
486 
487 	/* Note the following code is not safe, but this is okay. */
488 	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
489 		get_random_bytes(&net->ipv4.ip_id_key,
490 				 sizeof(net->ipv4.ip_id_key));
491 
492 	hash = siphash_3u32((__force u32)iph->daddr,
493 			    (__force u32)iph->saddr,
494 			    iph->protocol,
495 			    &net->ipv4.ip_id_key);
496 	id = ip_idents_reserve(hash, segs);
497 	iph->id = htons(id);
498 }
499 EXPORT_SYMBOL(__ip_select_ident);
500 
ip_rt_fix_tos(struct flowi4 * fl4)501 static void ip_rt_fix_tos(struct flowi4 *fl4)
502 {
503 	__u8 tos = RT_FL_TOS(fl4);
504 
505 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
506 	if (tos & RTO_ONLINK)
507 		fl4->flowi4_scope = RT_SCOPE_LINK;
508 }
509 
__build_flow_key(const struct net * net,struct flowi4 * fl4,const struct sock * sk,const struct iphdr * iph,int oif,__u8 tos,u8 prot,u32 mark,int flow_flags)510 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
511 			     const struct sock *sk, const struct iphdr *iph,
512 			     int oif, __u8 tos, u8 prot, u32 mark,
513 			     int flow_flags)
514 {
515 	__u8 scope = RT_SCOPE_UNIVERSE;
516 
517 	if (sk) {
518 		const struct inet_sock *inet = inet_sk(sk);
519 
520 		oif = sk->sk_bound_dev_if;
521 		mark = sk->sk_mark;
522 		tos = ip_sock_rt_tos(sk);
523 		scope = ip_sock_rt_scope(sk);
524 		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
525 	}
526 
527 	flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
528 			   prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
529 			   sock_net_uid(net, sk));
530 }
531 
build_skb_flow_key(struct flowi4 * fl4,const struct sk_buff * skb,const struct sock * sk)532 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
533 			       const struct sock *sk)
534 {
535 	const struct net *net = dev_net(skb->dev);
536 	const struct iphdr *iph = ip_hdr(skb);
537 	int oif = skb->dev->ifindex;
538 	u8 prot = iph->protocol;
539 	u32 mark = skb->mark;
540 	__u8 tos = iph->tos;
541 
542 	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
543 }
544 
build_sk_flow_key(struct flowi4 * fl4,const struct sock * sk)545 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
546 {
547 	const struct inet_sock *inet = inet_sk(sk);
548 	const struct ip_options_rcu *inet_opt;
549 	__be32 daddr = inet->inet_daddr;
550 
551 	rcu_read_lock();
552 	inet_opt = rcu_dereference(inet->inet_opt);
553 	if (inet_opt && inet_opt->opt.srr)
554 		daddr = inet_opt->opt.faddr;
555 	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
556 			   ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
557 			   ip_sock_rt_scope(sk),
558 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
559 			   inet_sk_flowi_flags(sk),
560 			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
561 	rcu_read_unlock();
562 }
563 
ip_rt_build_flow_key(struct flowi4 * fl4,const struct sock * sk,const struct sk_buff * skb)564 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
565 				 const struct sk_buff *skb)
566 {
567 	if (skb)
568 		build_skb_flow_key(fl4, skb, sk);
569 	else
570 		build_sk_flow_key(fl4, sk);
571 }
572 
573 static DEFINE_SPINLOCK(fnhe_lock);
574 
fnhe_flush_routes(struct fib_nh_exception * fnhe)575 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
576 {
577 	struct rtable *rt;
578 
579 	rt = rcu_dereference(fnhe->fnhe_rth_input);
580 	if (rt) {
581 		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
582 		dst_dev_put(&rt->dst);
583 		dst_release(&rt->dst);
584 	}
585 	rt = rcu_dereference(fnhe->fnhe_rth_output);
586 	if (rt) {
587 		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
588 		dst_dev_put(&rt->dst);
589 		dst_release(&rt->dst);
590 	}
591 }
592 
fnhe_remove_oldest(struct fnhe_hash_bucket * hash)593 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
594 {
595 	struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
596 	struct fib_nh_exception *fnhe, *oldest = NULL;
597 
598 	for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
599 		fnhe = rcu_dereference_protected(*fnhe_p,
600 						 lockdep_is_held(&fnhe_lock));
601 		if (!fnhe)
602 			break;
603 		if (!oldest ||
604 		    time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
605 			oldest = fnhe;
606 			oldest_p = fnhe_p;
607 		}
608 	}
609 	fnhe_flush_routes(oldest);
610 	*oldest_p = oldest->fnhe_next;
611 	kfree_rcu(oldest, rcu);
612 }
613 
fnhe_hashfun(__be32 daddr)614 static u32 fnhe_hashfun(__be32 daddr)
615 {
616 	static siphash_aligned_key_t fnhe_hash_key;
617 	u64 hval;
618 
619 	net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
620 	hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
621 	return hash_64(hval, FNHE_HASH_SHIFT);
622 }
623 
fill_route_from_fnhe(struct rtable * rt,struct fib_nh_exception * fnhe)624 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
625 {
626 	rt->rt_pmtu = fnhe->fnhe_pmtu;
627 	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
628 	rt->dst.expires = fnhe->fnhe_expires;
629 
630 	if (fnhe->fnhe_gw) {
631 		rt->rt_flags |= RTCF_REDIRECTED;
632 		rt->rt_uses_gateway = 1;
633 		rt->rt_gw_family = AF_INET;
634 		rt->rt_gw4 = fnhe->fnhe_gw;
635 	}
636 }
637 
update_or_create_fnhe(struct fib_nh_common * nhc,__be32 daddr,__be32 gw,u32 pmtu,bool lock,unsigned long expires)638 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
639 				  __be32 gw, u32 pmtu, bool lock,
640 				  unsigned long expires)
641 {
642 	struct fnhe_hash_bucket *hash;
643 	struct fib_nh_exception *fnhe;
644 	struct rtable *rt;
645 	u32 genid, hval;
646 	unsigned int i;
647 	int depth;
648 
649 	genid = fnhe_genid(dev_net(nhc->nhc_dev));
650 	hval = fnhe_hashfun(daddr);
651 
652 	spin_lock_bh(&fnhe_lock);
653 
654 	hash = rcu_dereference(nhc->nhc_exceptions);
655 	if (!hash) {
656 		hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
657 		if (!hash)
658 			goto out_unlock;
659 		rcu_assign_pointer(nhc->nhc_exceptions, hash);
660 	}
661 
662 	hash += hval;
663 
664 	depth = 0;
665 	for (fnhe = rcu_dereference(hash->chain); fnhe;
666 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
667 		if (fnhe->fnhe_daddr == daddr)
668 			break;
669 		depth++;
670 	}
671 
672 	if (fnhe) {
673 		if (fnhe->fnhe_genid != genid)
674 			fnhe->fnhe_genid = genid;
675 		if (gw)
676 			fnhe->fnhe_gw = gw;
677 		if (pmtu) {
678 			fnhe->fnhe_pmtu = pmtu;
679 			fnhe->fnhe_mtu_locked = lock;
680 		}
681 		fnhe->fnhe_expires = max(1UL, expires);
682 		/* Update all cached dsts too */
683 		rt = rcu_dereference(fnhe->fnhe_rth_input);
684 		if (rt)
685 			fill_route_from_fnhe(rt, fnhe);
686 		rt = rcu_dereference(fnhe->fnhe_rth_output);
687 		if (rt)
688 			fill_route_from_fnhe(rt, fnhe);
689 	} else {
690 		/* Randomize max depth to avoid some side channels attacks. */
691 		int max_depth = FNHE_RECLAIM_DEPTH +
692 				prandom_u32_max(FNHE_RECLAIM_DEPTH);
693 
694 		while (depth > max_depth) {
695 			fnhe_remove_oldest(hash);
696 			depth--;
697 		}
698 
699 		fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
700 		if (!fnhe)
701 			goto out_unlock;
702 
703 		fnhe->fnhe_next = hash->chain;
704 
705 		fnhe->fnhe_genid = genid;
706 		fnhe->fnhe_daddr = daddr;
707 		fnhe->fnhe_gw = gw;
708 		fnhe->fnhe_pmtu = pmtu;
709 		fnhe->fnhe_mtu_locked = lock;
710 		fnhe->fnhe_expires = max(1UL, expires);
711 
712 		rcu_assign_pointer(hash->chain, fnhe);
713 
714 		/* Exception created; mark the cached routes for the nexthop
715 		 * stale, so anyone caching it rechecks if this exception
716 		 * applies to them.
717 		 */
718 		rt = rcu_dereference(nhc->nhc_rth_input);
719 		if (rt)
720 			rt->dst.obsolete = DST_OBSOLETE_KILL;
721 
722 		for_each_possible_cpu(i) {
723 			struct rtable __rcu **prt;
724 
725 			prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
726 			rt = rcu_dereference(*prt);
727 			if (rt)
728 				rt->dst.obsolete = DST_OBSOLETE_KILL;
729 		}
730 	}
731 
732 	fnhe->fnhe_stamp = jiffies;
733 
734 out_unlock:
735 	spin_unlock_bh(&fnhe_lock);
736 }
737 
__ip_do_redirect(struct rtable * rt,struct sk_buff * skb,struct flowi4 * fl4,bool kill_route)738 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
739 			     bool kill_route)
740 {
741 	__be32 new_gw = icmp_hdr(skb)->un.gateway;
742 	__be32 old_gw = ip_hdr(skb)->saddr;
743 	struct net_device *dev = skb->dev;
744 	struct in_device *in_dev;
745 	struct fib_result res;
746 	struct neighbour *n;
747 	struct net *net;
748 
749 	switch (icmp_hdr(skb)->code & 7) {
750 	case ICMP_REDIR_NET:
751 	case ICMP_REDIR_NETTOS:
752 	case ICMP_REDIR_HOST:
753 	case ICMP_REDIR_HOSTTOS:
754 		break;
755 
756 	default:
757 		return;
758 	}
759 
760 	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
761 		return;
762 
763 	in_dev = __in_dev_get_rcu(dev);
764 	if (!in_dev)
765 		return;
766 
767 	net = dev_net(dev);
768 	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
769 	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
770 	    ipv4_is_zeronet(new_gw))
771 		goto reject_redirect;
772 
773 	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
774 		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
775 			goto reject_redirect;
776 		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
777 			goto reject_redirect;
778 	} else {
779 		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
780 			goto reject_redirect;
781 	}
782 
783 	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
784 	if (!n)
785 		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
786 	if (!IS_ERR(n)) {
787 		if (!(n->nud_state & NUD_VALID)) {
788 			neigh_event_send(n, NULL);
789 		} else {
790 			if (fib_lookup(net, fl4, &res, 0) == 0) {
791 				struct fib_nh_common *nhc;
792 
793 				fib_select_path(net, &res, fl4, skb);
794 				nhc = FIB_RES_NHC(res);
795 				update_or_create_fnhe(nhc, fl4->daddr, new_gw,
796 						0, false,
797 						jiffies + ip_rt_gc_timeout);
798 			}
799 			if (kill_route)
800 				rt->dst.obsolete = DST_OBSOLETE_KILL;
801 			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
802 		}
803 		neigh_release(n);
804 	}
805 	return;
806 
807 reject_redirect:
808 #ifdef CONFIG_IP_ROUTE_VERBOSE
809 	if (IN_DEV_LOG_MARTIANS(in_dev)) {
810 		const struct iphdr *iph = (const struct iphdr *) skb->data;
811 		__be32 daddr = iph->daddr;
812 		__be32 saddr = iph->saddr;
813 
814 		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
815 				     "  Advised path = %pI4 -> %pI4\n",
816 				     &old_gw, dev->name, &new_gw,
817 				     &saddr, &daddr);
818 	}
819 #endif
820 	;
821 }
822 
ip_do_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)823 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
824 {
825 	struct rtable *rt;
826 	struct flowi4 fl4;
827 	const struct iphdr *iph = (const struct iphdr *) skb->data;
828 	struct net *net = dev_net(skb->dev);
829 	int oif = skb->dev->ifindex;
830 	u8 prot = iph->protocol;
831 	u32 mark = skb->mark;
832 	__u8 tos = iph->tos;
833 
834 	rt = (struct rtable *) dst;
835 
836 	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
837 	__ip_do_redirect(rt, skb, &fl4, true);
838 }
839 
ipv4_negative_advice(struct dst_entry * dst)840 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
841 {
842 	struct rtable *rt = (struct rtable *)dst;
843 	struct dst_entry *ret = dst;
844 
845 	if (rt) {
846 		if (dst->obsolete > 0) {
847 			ip_rt_put(rt);
848 			ret = NULL;
849 		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
850 			   rt->dst.expires) {
851 			ip_rt_put(rt);
852 			ret = NULL;
853 		}
854 	}
855 	return ret;
856 }
857 
858 /*
859  * Algorithm:
860  *	1. The first ip_rt_redirect_number redirects are sent
861  *	   with exponential backoff, then we stop sending them at all,
862  *	   assuming that the host ignores our redirects.
863  *	2. If we did not see packets requiring redirects
864  *	   during ip_rt_redirect_silence, we assume that the host
865  *	   forgot redirected route and start to send redirects again.
866  *
867  * This algorithm is much cheaper and more intelligent than dumb load limiting
868  * in icmp.c.
869  *
870  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
871  * and "frag. need" (breaks PMTU discovery) in icmp.c.
872  */
873 
ip_rt_send_redirect(struct sk_buff * skb)874 void ip_rt_send_redirect(struct sk_buff *skb)
875 {
876 	struct rtable *rt = skb_rtable(skb);
877 	struct in_device *in_dev;
878 	struct inet_peer *peer;
879 	struct net *net;
880 	int log_martians;
881 	int vif;
882 
883 	rcu_read_lock();
884 	in_dev = __in_dev_get_rcu(rt->dst.dev);
885 	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
886 		rcu_read_unlock();
887 		return;
888 	}
889 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
890 	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
891 	rcu_read_unlock();
892 
893 	net = dev_net(rt->dst.dev);
894 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
895 	if (!peer) {
896 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
897 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
898 		return;
899 	}
900 
901 	/* No redirected packets during ip_rt_redirect_silence;
902 	 * reset the algorithm.
903 	 */
904 	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
905 		peer->rate_tokens = 0;
906 		peer->n_redirects = 0;
907 	}
908 
909 	/* Too many ignored redirects; do not send anything
910 	 * set dst.rate_last to the last seen redirected packet.
911 	 */
912 	if (peer->n_redirects >= ip_rt_redirect_number) {
913 		peer->rate_last = jiffies;
914 		goto out_put_peer;
915 	}
916 
917 	/* Check for load limit; set rate_last to the latest sent
918 	 * redirect.
919 	 */
920 	if (peer->n_redirects == 0 ||
921 	    time_after(jiffies,
922 		       (peer->rate_last +
923 			(ip_rt_redirect_load << peer->n_redirects)))) {
924 		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
925 
926 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
927 		peer->rate_last = jiffies;
928 		++peer->n_redirects;
929 #ifdef CONFIG_IP_ROUTE_VERBOSE
930 		if (log_martians &&
931 		    peer->n_redirects == ip_rt_redirect_number)
932 			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
933 					     &ip_hdr(skb)->saddr, inet_iif(skb),
934 					     &ip_hdr(skb)->daddr, &gw);
935 #endif
936 	}
937 out_put_peer:
938 	inet_putpeer(peer);
939 }
940 
ip_error(struct sk_buff * skb)941 static int ip_error(struct sk_buff *skb)
942 {
943 	struct rtable *rt = skb_rtable(skb);
944 	struct net_device *dev = skb->dev;
945 	struct in_device *in_dev;
946 	struct inet_peer *peer;
947 	unsigned long now;
948 	struct net *net;
949 	SKB_DR(reason);
950 	bool send;
951 	int code;
952 
953 	if (netif_is_l3_master(skb->dev)) {
954 		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
955 		if (!dev)
956 			goto out;
957 	}
958 
959 	in_dev = __in_dev_get_rcu(dev);
960 
961 	/* IP on this device is disabled. */
962 	if (!in_dev)
963 		goto out;
964 
965 	net = dev_net(rt->dst.dev);
966 	if (!IN_DEV_FORWARD(in_dev)) {
967 		switch (rt->dst.error) {
968 		case EHOSTUNREACH:
969 			SKB_DR_SET(reason, IP_INADDRERRORS);
970 			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
971 			break;
972 
973 		case ENETUNREACH:
974 			SKB_DR_SET(reason, IP_INNOROUTES);
975 			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
976 			break;
977 		}
978 		goto out;
979 	}
980 
981 	switch (rt->dst.error) {
982 	case EINVAL:
983 	default:
984 		goto out;
985 	case EHOSTUNREACH:
986 		code = ICMP_HOST_UNREACH;
987 		break;
988 	case ENETUNREACH:
989 		code = ICMP_NET_UNREACH;
990 		SKB_DR_SET(reason, IP_INNOROUTES);
991 		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
992 		break;
993 	case EACCES:
994 		code = ICMP_PKT_FILTERED;
995 		break;
996 	}
997 
998 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
999 			       l3mdev_master_ifindex(skb->dev), 1);
1000 
1001 	send = true;
1002 	if (peer) {
1003 		now = jiffies;
1004 		peer->rate_tokens += now - peer->rate_last;
1005 		if (peer->rate_tokens > ip_rt_error_burst)
1006 			peer->rate_tokens = ip_rt_error_burst;
1007 		peer->rate_last = now;
1008 		if (peer->rate_tokens >= ip_rt_error_cost)
1009 			peer->rate_tokens -= ip_rt_error_cost;
1010 		else
1011 			send = false;
1012 		inet_putpeer(peer);
1013 	}
1014 	if (send)
1015 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1016 
1017 out:	kfree_skb_reason(skb, reason);
1018 	return 0;
1019 }
1020 
__ip_rt_update_pmtu(struct rtable * rt,struct flowi4 * fl4,u32 mtu)1021 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1022 {
1023 	struct dst_entry *dst = &rt->dst;
1024 	struct net *net = dev_net(dst->dev);
1025 	struct fib_result res;
1026 	bool lock = false;
1027 	u32 old_mtu;
1028 
1029 	if (ip_mtu_locked(dst))
1030 		return;
1031 
1032 	old_mtu = ipv4_mtu(dst);
1033 	if (old_mtu < mtu)
1034 		return;
1035 
1036 	if (mtu < net->ipv4.ip_rt_min_pmtu) {
1037 		lock = true;
1038 		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1039 	}
1040 
1041 	if (rt->rt_pmtu == mtu && !lock &&
1042 	    time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1043 		return;
1044 
1045 	rcu_read_lock();
1046 	if (fib_lookup(net, fl4, &res, 0) == 0) {
1047 		struct fib_nh_common *nhc;
1048 
1049 		fib_select_path(net, &res, fl4, NULL);
1050 		nhc = FIB_RES_NHC(res);
1051 		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1052 				      jiffies + net->ipv4.ip_rt_mtu_expires);
1053 	}
1054 	rcu_read_unlock();
1055 }
1056 
ip_rt_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu,bool confirm_neigh)1057 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1058 			      struct sk_buff *skb, u32 mtu,
1059 			      bool confirm_neigh)
1060 {
1061 	struct rtable *rt = (struct rtable *) dst;
1062 	struct flowi4 fl4;
1063 
1064 	ip_rt_build_flow_key(&fl4, sk, skb);
1065 
1066 	/* Don't make lookup fail for bridged encapsulations */
1067 	if (skb && netif_is_any_bridge_port(skb->dev))
1068 		fl4.flowi4_oif = 0;
1069 
1070 	__ip_rt_update_pmtu(rt, &fl4, mtu);
1071 }
1072 
ipv4_update_pmtu(struct sk_buff * skb,struct net * net,u32 mtu,int oif,u8 protocol)1073 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1074 		      int oif, u8 protocol)
1075 {
1076 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1077 	struct flowi4 fl4;
1078 	struct rtable *rt;
1079 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
1080 
1081 	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
1082 			 0);
1083 	rt = __ip_route_output_key(net, &fl4);
1084 	if (!IS_ERR(rt)) {
1085 		__ip_rt_update_pmtu(rt, &fl4, mtu);
1086 		ip_rt_put(rt);
1087 	}
1088 }
1089 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1090 
__ipv4_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,u32 mtu)1091 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1092 {
1093 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1094 	struct flowi4 fl4;
1095 	struct rtable *rt;
1096 
1097 	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1098 
1099 	if (!fl4.flowi4_mark)
1100 		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1101 
1102 	rt = __ip_route_output_key(sock_net(sk), &fl4);
1103 	if (!IS_ERR(rt)) {
1104 		__ip_rt_update_pmtu(rt, &fl4, mtu);
1105 		ip_rt_put(rt);
1106 	}
1107 }
1108 
ipv4_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,u32 mtu)1109 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1110 {
1111 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1112 	struct flowi4 fl4;
1113 	struct rtable *rt;
1114 	struct dst_entry *odst = NULL;
1115 	bool new = false;
1116 	struct net *net = sock_net(sk);
1117 
1118 	bh_lock_sock(sk);
1119 
1120 	if (!ip_sk_accept_pmtu(sk))
1121 		goto out;
1122 
1123 	odst = sk_dst_get(sk);
1124 
1125 	if (sock_owned_by_user(sk) || !odst) {
1126 		__ipv4_sk_update_pmtu(skb, sk, mtu);
1127 		goto out;
1128 	}
1129 
1130 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1131 
1132 	rt = (struct rtable *)odst;
1133 	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1134 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1135 		if (IS_ERR(rt))
1136 			goto out;
1137 
1138 		new = true;
1139 	}
1140 
1141 	__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
1142 
1143 	if (!dst_check(&rt->dst, 0)) {
1144 		if (new)
1145 			dst_release(&rt->dst);
1146 
1147 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1148 		if (IS_ERR(rt))
1149 			goto out;
1150 
1151 		new = true;
1152 	}
1153 
1154 	if (new)
1155 		sk_dst_set(sk, &rt->dst);
1156 
1157 out:
1158 	bh_unlock_sock(sk);
1159 	dst_release(odst);
1160 }
1161 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1162 
ipv4_redirect(struct sk_buff * skb,struct net * net,int oif,u8 protocol)1163 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1164 		   int oif, u8 protocol)
1165 {
1166 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1167 	struct flowi4 fl4;
1168 	struct rtable *rt;
1169 
1170 	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
1171 	rt = __ip_route_output_key(net, &fl4);
1172 	if (!IS_ERR(rt)) {
1173 		__ip_do_redirect(rt, skb, &fl4, false);
1174 		ip_rt_put(rt);
1175 	}
1176 }
1177 EXPORT_SYMBOL_GPL(ipv4_redirect);
1178 
ipv4_sk_redirect(struct sk_buff * skb,struct sock * sk)1179 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1180 {
1181 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1182 	struct flowi4 fl4;
1183 	struct rtable *rt;
1184 	struct net *net = sock_net(sk);
1185 
1186 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1187 	rt = __ip_route_output_key(net, &fl4);
1188 	if (!IS_ERR(rt)) {
1189 		__ip_do_redirect(rt, skb, &fl4, false);
1190 		ip_rt_put(rt);
1191 	}
1192 }
1193 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1194 
ipv4_dst_check(struct dst_entry * dst,u32 cookie)1195 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1196 							 u32 cookie)
1197 {
1198 	struct rtable *rt = (struct rtable *) dst;
1199 
1200 	/* All IPV4 dsts are created with ->obsolete set to the value
1201 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1202 	 * into this function always.
1203 	 *
1204 	 * When a PMTU/redirect information update invalidates a route,
1205 	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1206 	 * DST_OBSOLETE_DEAD.
1207 	 */
1208 	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1209 		return NULL;
1210 	return dst;
1211 }
1212 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1213 
ipv4_send_dest_unreach(struct sk_buff * skb)1214 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1215 {
1216 	struct ip_options opt;
1217 	int res;
1218 
1219 	/* Recompile ip options since IPCB may not be valid anymore.
1220 	 * Also check we have a reasonable ipv4 header.
1221 	 */
1222 	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1223 	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1224 		return;
1225 
1226 	memset(&opt, 0, sizeof(opt));
1227 	if (ip_hdr(skb)->ihl > 5) {
1228 		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1229 			return;
1230 		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1231 
1232 		rcu_read_lock();
1233 		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1234 		rcu_read_unlock();
1235 
1236 		if (res)
1237 			return;
1238 	}
1239 	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1240 }
1241 
ipv4_link_failure(struct sk_buff * skb)1242 static void ipv4_link_failure(struct sk_buff *skb)
1243 {
1244 	struct rtable *rt;
1245 
1246 	ipv4_send_dest_unreach(skb);
1247 
1248 	rt = skb_rtable(skb);
1249 	if (rt)
1250 		dst_set_expires(&rt->dst, 0);
1251 }
1252 
ip_rt_bug(struct net * net,struct sock * sk,struct sk_buff * skb)1253 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1254 {
1255 	pr_debug("%s: %pI4 -> %pI4, %s\n",
1256 		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1257 		 skb->dev ? skb->dev->name : "?");
1258 	kfree_skb(skb);
1259 	WARN_ON(1);
1260 	return 0;
1261 }
1262 
1263 /*
1264  * We do not cache source address of outgoing interface,
1265  * because it is used only by IP RR, TS and SRR options,
1266  * so that it out of fast path.
1267  *
1268  * BTW remember: "addr" is allowed to be not aligned
1269  * in IP options!
1270  */
1271 
ip_rt_get_source(u8 * addr,struct sk_buff * skb,struct rtable * rt)1272 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1273 {
1274 	__be32 src;
1275 
1276 	if (rt_is_output_route(rt))
1277 		src = ip_hdr(skb)->saddr;
1278 	else {
1279 		struct fib_result res;
1280 		struct iphdr *iph = ip_hdr(skb);
1281 		struct flowi4 fl4 = {
1282 			.daddr = iph->daddr,
1283 			.saddr = iph->saddr,
1284 			.flowi4_tos = RT_TOS(iph->tos),
1285 			.flowi4_oif = rt->dst.dev->ifindex,
1286 			.flowi4_iif = skb->dev->ifindex,
1287 			.flowi4_mark = skb->mark,
1288 		};
1289 
1290 		rcu_read_lock();
1291 		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1292 			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1293 		else
1294 			src = inet_select_addr(rt->dst.dev,
1295 					       rt_nexthop(rt, iph->daddr),
1296 					       RT_SCOPE_UNIVERSE);
1297 		rcu_read_unlock();
1298 	}
1299 	memcpy(addr, &src, 4);
1300 }
1301 
1302 #ifdef CONFIG_IP_ROUTE_CLASSID
set_class_tag(struct rtable * rt,u32 tag)1303 static void set_class_tag(struct rtable *rt, u32 tag)
1304 {
1305 	if (!(rt->dst.tclassid & 0xFFFF))
1306 		rt->dst.tclassid |= tag & 0xFFFF;
1307 	if (!(rt->dst.tclassid & 0xFFFF0000))
1308 		rt->dst.tclassid |= tag & 0xFFFF0000;
1309 }
1310 #endif
1311 
ipv4_default_advmss(const struct dst_entry * dst)1312 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1313 {
1314 	struct net *net = dev_net(dst->dev);
1315 	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1316 	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1317 				    net->ipv4.ip_rt_min_advmss);
1318 
1319 	return min(advmss, IPV4_MAX_PMTU - header_size);
1320 }
1321 
ipv4_mtu(const struct dst_entry * dst)1322 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1323 {
1324 	return ip_dst_mtu_maybe_forward(dst, false);
1325 }
1326 EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1327 
ip_del_fnhe(struct fib_nh_common * nhc,__be32 daddr)1328 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1329 {
1330 	struct fnhe_hash_bucket *hash;
1331 	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1332 	u32 hval = fnhe_hashfun(daddr);
1333 
1334 	spin_lock_bh(&fnhe_lock);
1335 
1336 	hash = rcu_dereference_protected(nhc->nhc_exceptions,
1337 					 lockdep_is_held(&fnhe_lock));
1338 	hash += hval;
1339 
1340 	fnhe_p = &hash->chain;
1341 	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1342 	while (fnhe) {
1343 		if (fnhe->fnhe_daddr == daddr) {
1344 			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1345 				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1346 			/* set fnhe_daddr to 0 to ensure it won't bind with
1347 			 * new dsts in rt_bind_exception().
1348 			 */
1349 			fnhe->fnhe_daddr = 0;
1350 			fnhe_flush_routes(fnhe);
1351 			kfree_rcu(fnhe, rcu);
1352 			break;
1353 		}
1354 		fnhe_p = &fnhe->fnhe_next;
1355 		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1356 						 lockdep_is_held(&fnhe_lock));
1357 	}
1358 
1359 	spin_unlock_bh(&fnhe_lock);
1360 }
1361 
find_exception(struct fib_nh_common * nhc,__be32 daddr)1362 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1363 					       __be32 daddr)
1364 {
1365 	struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1366 	struct fib_nh_exception *fnhe;
1367 	u32 hval;
1368 
1369 	if (!hash)
1370 		return NULL;
1371 
1372 	hval = fnhe_hashfun(daddr);
1373 
1374 	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1375 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1376 		if (fnhe->fnhe_daddr == daddr) {
1377 			if (fnhe->fnhe_expires &&
1378 			    time_after(jiffies, fnhe->fnhe_expires)) {
1379 				ip_del_fnhe(nhc, daddr);
1380 				break;
1381 			}
1382 			return fnhe;
1383 		}
1384 	}
1385 	return NULL;
1386 }
1387 
1388 /* MTU selection:
1389  * 1. mtu on route is locked - use it
1390  * 2. mtu from nexthop exception
1391  * 3. mtu from egress device
1392  */
1393 
ip_mtu_from_fib_result(struct fib_result * res,__be32 daddr)1394 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1395 {
1396 	struct fib_nh_common *nhc = res->nhc;
1397 	struct net_device *dev = nhc->nhc_dev;
1398 	struct fib_info *fi = res->fi;
1399 	u32 mtu = 0;
1400 
1401 	if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
1402 	    fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1403 		mtu = fi->fib_mtu;
1404 
1405 	if (likely(!mtu)) {
1406 		struct fib_nh_exception *fnhe;
1407 
1408 		fnhe = find_exception(nhc, daddr);
1409 		if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1410 			mtu = fnhe->fnhe_pmtu;
1411 	}
1412 
1413 	if (likely(!mtu))
1414 		mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1415 
1416 	return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1417 }
1418 
rt_bind_exception(struct rtable * rt,struct fib_nh_exception * fnhe,__be32 daddr,const bool do_cache)1419 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1420 			      __be32 daddr, const bool do_cache)
1421 {
1422 	bool ret = false;
1423 
1424 	spin_lock_bh(&fnhe_lock);
1425 
1426 	if (daddr == fnhe->fnhe_daddr) {
1427 		struct rtable __rcu **porig;
1428 		struct rtable *orig;
1429 		int genid = fnhe_genid(dev_net(rt->dst.dev));
1430 
1431 		if (rt_is_input_route(rt))
1432 			porig = &fnhe->fnhe_rth_input;
1433 		else
1434 			porig = &fnhe->fnhe_rth_output;
1435 		orig = rcu_dereference(*porig);
1436 
1437 		if (fnhe->fnhe_genid != genid) {
1438 			fnhe->fnhe_genid = genid;
1439 			fnhe->fnhe_gw = 0;
1440 			fnhe->fnhe_pmtu = 0;
1441 			fnhe->fnhe_expires = 0;
1442 			fnhe->fnhe_mtu_locked = false;
1443 			fnhe_flush_routes(fnhe);
1444 			orig = NULL;
1445 		}
1446 		fill_route_from_fnhe(rt, fnhe);
1447 		if (!rt->rt_gw4) {
1448 			rt->rt_gw4 = daddr;
1449 			rt->rt_gw_family = AF_INET;
1450 		}
1451 
1452 		if (do_cache) {
1453 			dst_hold(&rt->dst);
1454 			rcu_assign_pointer(*porig, rt);
1455 			if (orig) {
1456 				dst_dev_put(&orig->dst);
1457 				dst_release(&orig->dst);
1458 			}
1459 			ret = true;
1460 		}
1461 
1462 		fnhe->fnhe_stamp = jiffies;
1463 	}
1464 	spin_unlock_bh(&fnhe_lock);
1465 
1466 	return ret;
1467 }
1468 
rt_cache_route(struct fib_nh_common * nhc,struct rtable * rt)1469 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1470 {
1471 	struct rtable *orig, *prev, **p;
1472 	bool ret = true;
1473 
1474 	if (rt_is_input_route(rt)) {
1475 		p = (struct rtable **)&nhc->nhc_rth_input;
1476 	} else {
1477 		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1478 	}
1479 	orig = *p;
1480 
1481 	/* hold dst before doing cmpxchg() to avoid race condition
1482 	 * on this dst
1483 	 */
1484 	dst_hold(&rt->dst);
1485 	prev = cmpxchg(p, orig, rt);
1486 	if (prev == orig) {
1487 		if (orig) {
1488 			rt_add_uncached_list(orig);
1489 			dst_release(&orig->dst);
1490 		}
1491 	} else {
1492 		dst_release(&rt->dst);
1493 		ret = false;
1494 	}
1495 
1496 	return ret;
1497 }
1498 
1499 struct uncached_list {
1500 	spinlock_t		lock;
1501 	struct list_head	head;
1502 	struct list_head	quarantine;
1503 };
1504 
1505 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1506 
rt_add_uncached_list(struct rtable * rt)1507 void rt_add_uncached_list(struct rtable *rt)
1508 {
1509 	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1510 
1511 	rt->rt_uncached_list = ul;
1512 
1513 	spin_lock_bh(&ul->lock);
1514 	list_add_tail(&rt->rt_uncached, &ul->head);
1515 	spin_unlock_bh(&ul->lock);
1516 }
1517 
rt_del_uncached_list(struct rtable * rt)1518 void rt_del_uncached_list(struct rtable *rt)
1519 {
1520 	if (!list_empty(&rt->rt_uncached)) {
1521 		struct uncached_list *ul = rt->rt_uncached_list;
1522 
1523 		spin_lock_bh(&ul->lock);
1524 		list_del_init(&rt->rt_uncached);
1525 		spin_unlock_bh(&ul->lock);
1526 	}
1527 }
1528 
ipv4_dst_destroy(struct dst_entry * dst)1529 static void ipv4_dst_destroy(struct dst_entry *dst)
1530 {
1531 	struct rtable *rt = (struct rtable *)dst;
1532 
1533 	ip_dst_metrics_put(dst);
1534 	rt_del_uncached_list(rt);
1535 }
1536 
rt_flush_dev(struct net_device * dev)1537 void rt_flush_dev(struct net_device *dev)
1538 {
1539 	struct rtable *rt, *safe;
1540 	int cpu;
1541 
1542 	for_each_possible_cpu(cpu) {
1543 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1544 
1545 		if (list_empty(&ul->head))
1546 			continue;
1547 
1548 		spin_lock_bh(&ul->lock);
1549 		list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
1550 			if (rt->dst.dev != dev)
1551 				continue;
1552 			rt->dst.dev = blackhole_netdev;
1553 			dev_replace_track(dev, blackhole_netdev,
1554 					  &rt->dst.dev_tracker,
1555 					  GFP_ATOMIC);
1556 			list_move(&rt->rt_uncached, &ul->quarantine);
1557 		}
1558 		spin_unlock_bh(&ul->lock);
1559 	}
1560 }
1561 
rt_cache_valid(const struct rtable * rt)1562 static bool rt_cache_valid(const struct rtable *rt)
1563 {
1564 	return	rt &&
1565 		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1566 		!rt_is_expired(rt);
1567 }
1568 
rt_set_nexthop(struct rtable * rt,__be32 daddr,const struct fib_result * res,struct fib_nh_exception * fnhe,struct fib_info * fi,u16 type,u32 itag,const bool do_cache)1569 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1570 			   const struct fib_result *res,
1571 			   struct fib_nh_exception *fnhe,
1572 			   struct fib_info *fi, u16 type, u32 itag,
1573 			   const bool do_cache)
1574 {
1575 	bool cached = false;
1576 
1577 	if (fi) {
1578 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1579 
1580 		if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1581 			rt->rt_uses_gateway = 1;
1582 			rt->rt_gw_family = nhc->nhc_gw_family;
1583 			/* only INET and INET6 are supported */
1584 			if (likely(nhc->nhc_gw_family == AF_INET))
1585 				rt->rt_gw4 = nhc->nhc_gw.ipv4;
1586 			else
1587 				rt->rt_gw6 = nhc->nhc_gw.ipv6;
1588 		}
1589 
1590 		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1591 
1592 #ifdef CONFIG_IP_ROUTE_CLASSID
1593 		if (nhc->nhc_family == AF_INET) {
1594 			struct fib_nh *nh;
1595 
1596 			nh = container_of(nhc, struct fib_nh, nh_common);
1597 			rt->dst.tclassid = nh->nh_tclassid;
1598 		}
1599 #endif
1600 		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1601 		if (unlikely(fnhe))
1602 			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1603 		else if (do_cache)
1604 			cached = rt_cache_route(nhc, rt);
1605 		if (unlikely(!cached)) {
1606 			/* Routes we intend to cache in nexthop exception or
1607 			 * FIB nexthop have the DST_NOCACHE bit clear.
1608 			 * However, if we are unsuccessful at storing this
1609 			 * route into the cache we really need to set it.
1610 			 */
1611 			if (!rt->rt_gw4) {
1612 				rt->rt_gw_family = AF_INET;
1613 				rt->rt_gw4 = daddr;
1614 			}
1615 			rt_add_uncached_list(rt);
1616 		}
1617 	} else
1618 		rt_add_uncached_list(rt);
1619 
1620 #ifdef CONFIG_IP_ROUTE_CLASSID
1621 #ifdef CONFIG_IP_MULTIPLE_TABLES
1622 	set_class_tag(rt, res->tclassid);
1623 #endif
1624 	set_class_tag(rt, itag);
1625 #endif
1626 }
1627 
rt_dst_alloc(struct net_device * dev,unsigned int flags,u16 type,bool nopolicy,bool noxfrm)1628 struct rtable *rt_dst_alloc(struct net_device *dev,
1629 			    unsigned int flags, u16 type,
1630 			    bool nopolicy, bool noxfrm)
1631 {
1632 	struct rtable *rt;
1633 
1634 	rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1635 		       (nopolicy ? DST_NOPOLICY : 0) |
1636 		       (noxfrm ? DST_NOXFRM : 0));
1637 
1638 	if (rt) {
1639 		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1640 		rt->rt_flags = flags;
1641 		rt->rt_type = type;
1642 		rt->rt_is_input = 0;
1643 		rt->rt_iif = 0;
1644 		rt->rt_pmtu = 0;
1645 		rt->rt_mtu_locked = 0;
1646 		rt->rt_uses_gateway = 0;
1647 		rt->rt_gw_family = 0;
1648 		rt->rt_gw4 = 0;
1649 		INIT_LIST_HEAD(&rt->rt_uncached);
1650 
1651 		rt->dst.output = ip_output;
1652 		if (flags & RTCF_LOCAL)
1653 			rt->dst.input = ip_local_deliver;
1654 	}
1655 
1656 	return rt;
1657 }
1658 EXPORT_SYMBOL(rt_dst_alloc);
1659 
rt_dst_clone(struct net_device * dev,struct rtable * rt)1660 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1661 {
1662 	struct rtable *new_rt;
1663 
1664 	new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1665 			   rt->dst.flags);
1666 
1667 	if (new_rt) {
1668 		new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1669 		new_rt->rt_flags = rt->rt_flags;
1670 		new_rt->rt_type = rt->rt_type;
1671 		new_rt->rt_is_input = rt->rt_is_input;
1672 		new_rt->rt_iif = rt->rt_iif;
1673 		new_rt->rt_pmtu = rt->rt_pmtu;
1674 		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1675 		new_rt->rt_gw_family = rt->rt_gw_family;
1676 		if (rt->rt_gw_family == AF_INET)
1677 			new_rt->rt_gw4 = rt->rt_gw4;
1678 		else if (rt->rt_gw_family == AF_INET6)
1679 			new_rt->rt_gw6 = rt->rt_gw6;
1680 		INIT_LIST_HEAD(&new_rt->rt_uncached);
1681 
1682 		new_rt->dst.input = rt->dst.input;
1683 		new_rt->dst.output = rt->dst.output;
1684 		new_rt->dst.error = rt->dst.error;
1685 		new_rt->dst.lastuse = jiffies;
1686 		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1687 	}
1688 	return new_rt;
1689 }
1690 EXPORT_SYMBOL(rt_dst_clone);
1691 
1692 /* called in rcu_read_lock() section */
ip_mc_validate_source(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev,struct in_device * in_dev,u32 * itag)1693 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1694 			  u8 tos, struct net_device *dev,
1695 			  struct in_device *in_dev, u32 *itag)
1696 {
1697 	int err;
1698 
1699 	/* Primary sanity checks. */
1700 	if (!in_dev)
1701 		return -EINVAL;
1702 
1703 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1704 	    skb->protocol != htons(ETH_P_IP))
1705 		return -EINVAL;
1706 
1707 	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1708 		return -EINVAL;
1709 
1710 	if (ipv4_is_zeronet(saddr)) {
1711 		if (!ipv4_is_local_multicast(daddr) &&
1712 		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
1713 			return -EINVAL;
1714 	} else {
1715 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1716 					  in_dev, itag);
1717 		if (err < 0)
1718 			return err;
1719 	}
1720 	return 0;
1721 }
1722 
1723 /* called in rcu_read_lock() section */
ip_route_input_mc(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev,int our)1724 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1725 			     u8 tos, struct net_device *dev, int our)
1726 {
1727 	struct in_device *in_dev = __in_dev_get_rcu(dev);
1728 	unsigned int flags = RTCF_MULTICAST;
1729 	struct rtable *rth;
1730 	bool no_policy;
1731 	u32 itag = 0;
1732 	int err;
1733 
1734 	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1735 	if (err)
1736 		return err;
1737 
1738 	if (our)
1739 		flags |= RTCF_LOCAL;
1740 
1741 	no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
1742 	if (no_policy)
1743 		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1744 
1745 	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1746 			   no_policy, false);
1747 	if (!rth)
1748 		return -ENOBUFS;
1749 
1750 #ifdef CONFIG_IP_ROUTE_CLASSID
1751 	rth->dst.tclassid = itag;
1752 #endif
1753 	rth->dst.output = ip_rt_bug;
1754 	rth->rt_is_input= 1;
1755 
1756 #ifdef CONFIG_IP_MROUTE
1757 	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1758 		rth->dst.input = ip_mr_input;
1759 #endif
1760 	RT_CACHE_STAT_INC(in_slow_mc);
1761 
1762 	skb_dst_drop(skb);
1763 	skb_dst_set(skb, &rth->dst);
1764 	return 0;
1765 }
1766 
1767 
ip_handle_martian_source(struct net_device * dev,struct in_device * in_dev,struct sk_buff * skb,__be32 daddr,__be32 saddr)1768 static void ip_handle_martian_source(struct net_device *dev,
1769 				     struct in_device *in_dev,
1770 				     struct sk_buff *skb,
1771 				     __be32 daddr,
1772 				     __be32 saddr)
1773 {
1774 	RT_CACHE_STAT_INC(in_martian_src);
1775 #ifdef CONFIG_IP_ROUTE_VERBOSE
1776 	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1777 		/*
1778 		 *	RFC1812 recommendation, if source is martian,
1779 		 *	the only hint is MAC header.
1780 		 */
1781 		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1782 			&daddr, &saddr, dev->name);
1783 		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1784 			print_hex_dump(KERN_WARNING, "ll header: ",
1785 				       DUMP_PREFIX_OFFSET, 16, 1,
1786 				       skb_mac_header(skb),
1787 				       dev->hard_header_len, false);
1788 		}
1789 	}
1790 #endif
1791 }
1792 
1793 /* called in rcu_read_lock() section */
__mkroute_input(struct sk_buff * skb,const struct fib_result * res,struct in_device * in_dev,__be32 daddr,__be32 saddr,u32 tos)1794 static int __mkroute_input(struct sk_buff *skb,
1795 			   const struct fib_result *res,
1796 			   struct in_device *in_dev,
1797 			   __be32 daddr, __be32 saddr, u32 tos)
1798 {
1799 	struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1800 	struct net_device *dev = nhc->nhc_dev;
1801 	struct fib_nh_exception *fnhe;
1802 	struct rtable *rth;
1803 	int err;
1804 	struct in_device *out_dev;
1805 	bool do_cache, no_policy;
1806 	u32 itag = 0;
1807 
1808 	/* get a working reference to the output device */
1809 	out_dev = __in_dev_get_rcu(dev);
1810 	if (!out_dev) {
1811 		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1812 		return -EINVAL;
1813 	}
1814 
1815 	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1816 				  in_dev->dev, in_dev, &itag);
1817 	if (err < 0) {
1818 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1819 					 saddr);
1820 
1821 		goto cleanup;
1822 	}
1823 
1824 	do_cache = res->fi && !itag;
1825 	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1826 	    skb->protocol == htons(ETH_P_IP)) {
1827 		__be32 gw;
1828 
1829 		gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1830 		if (IN_DEV_SHARED_MEDIA(out_dev) ||
1831 		    inet_addr_onlink(out_dev, saddr, gw))
1832 			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1833 	}
1834 
1835 	if (skb->protocol != htons(ETH_P_IP)) {
1836 		/* Not IP (i.e. ARP). Do not create route, if it is
1837 		 * invalid for proxy arp. DNAT routes are always valid.
1838 		 *
1839 		 * Proxy arp feature have been extended to allow, ARP
1840 		 * replies back to the same interface, to support
1841 		 * Private VLAN switch technologies. See arp.c.
1842 		 */
1843 		if (out_dev == in_dev &&
1844 		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1845 			err = -EINVAL;
1846 			goto cleanup;
1847 		}
1848 	}
1849 
1850 	no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
1851 	if (no_policy)
1852 		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1853 
1854 	fnhe = find_exception(nhc, daddr);
1855 	if (do_cache) {
1856 		if (fnhe)
1857 			rth = rcu_dereference(fnhe->fnhe_rth_input);
1858 		else
1859 			rth = rcu_dereference(nhc->nhc_rth_input);
1860 		if (rt_cache_valid(rth)) {
1861 			skb_dst_set_noref(skb, &rth->dst);
1862 			goto out;
1863 		}
1864 	}
1865 
1866 	rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
1867 			   IN_DEV_ORCONF(out_dev, NOXFRM));
1868 	if (!rth) {
1869 		err = -ENOBUFS;
1870 		goto cleanup;
1871 	}
1872 
1873 	rth->rt_is_input = 1;
1874 	RT_CACHE_STAT_INC(in_slow_tot);
1875 
1876 	rth->dst.input = ip_forward;
1877 
1878 	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1879 		       do_cache);
1880 	lwtunnel_set_redirect(&rth->dst);
1881 	skb_dst_set(skb, &rth->dst);
1882 out:
1883 	err = 0;
1884  cleanup:
1885 	return err;
1886 }
1887 
1888 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1889 /* To make ICMP packets follow the right flow, the multipath hash is
1890  * calculated from the inner IP addresses.
1891  */
ip_multipath_l3_keys(const struct sk_buff * skb,struct flow_keys * hash_keys)1892 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1893 				 struct flow_keys *hash_keys)
1894 {
1895 	const struct iphdr *outer_iph = ip_hdr(skb);
1896 	const struct iphdr *key_iph = outer_iph;
1897 	const struct iphdr *inner_iph;
1898 	const struct icmphdr *icmph;
1899 	struct iphdr _inner_iph;
1900 	struct icmphdr _icmph;
1901 
1902 	if (likely(outer_iph->protocol != IPPROTO_ICMP))
1903 		goto out;
1904 
1905 	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1906 		goto out;
1907 
1908 	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1909 				   &_icmph);
1910 	if (!icmph)
1911 		goto out;
1912 
1913 	if (!icmp_is_err(icmph->type))
1914 		goto out;
1915 
1916 	inner_iph = skb_header_pointer(skb,
1917 				       outer_iph->ihl * 4 + sizeof(_icmph),
1918 				       sizeof(_inner_iph), &_inner_iph);
1919 	if (!inner_iph)
1920 		goto out;
1921 
1922 	key_iph = inner_iph;
1923 out:
1924 	hash_keys->addrs.v4addrs.src = key_iph->saddr;
1925 	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1926 }
1927 
fib_multipath_custom_hash_outer(const struct net * net,const struct sk_buff * skb,bool * p_has_inner)1928 static u32 fib_multipath_custom_hash_outer(const struct net *net,
1929 					   const struct sk_buff *skb,
1930 					   bool *p_has_inner)
1931 {
1932 	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1933 	struct flow_keys keys, hash_keys;
1934 
1935 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1936 		return 0;
1937 
1938 	memset(&hash_keys, 0, sizeof(hash_keys));
1939 	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1940 
1941 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1942 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1943 		hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1944 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1945 		hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1946 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1947 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1948 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1949 		hash_keys.ports.src = keys.ports.src;
1950 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1951 		hash_keys.ports.dst = keys.ports.dst;
1952 
1953 	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1954 	return flow_hash_from_keys(&hash_keys);
1955 }
1956 
fib_multipath_custom_hash_inner(const struct net * net,const struct sk_buff * skb,bool has_inner)1957 static u32 fib_multipath_custom_hash_inner(const struct net *net,
1958 					   const struct sk_buff *skb,
1959 					   bool has_inner)
1960 {
1961 	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1962 	struct flow_keys keys, hash_keys;
1963 
1964 	/* We assume the packet carries an encapsulation, but if none was
1965 	 * encountered during dissection of the outer flow, then there is no
1966 	 * point in calling the flow dissector again.
1967 	 */
1968 	if (!has_inner)
1969 		return 0;
1970 
1971 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1972 		return 0;
1973 
1974 	memset(&hash_keys, 0, sizeof(hash_keys));
1975 	skb_flow_dissect_flow_keys(skb, &keys, 0);
1976 
1977 	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1978 		return 0;
1979 
1980 	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1981 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1982 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1983 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1984 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1985 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1986 	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1987 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1988 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1989 			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1990 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1991 			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1992 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1993 			hash_keys.tags.flow_label = keys.tags.flow_label;
1994 	}
1995 
1996 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1997 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1998 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1999 		hash_keys.ports.src = keys.ports.src;
2000 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2001 		hash_keys.ports.dst = keys.ports.dst;
2002 
2003 	return flow_hash_from_keys(&hash_keys);
2004 }
2005 
fib_multipath_custom_hash_skb(const struct net * net,const struct sk_buff * skb)2006 static u32 fib_multipath_custom_hash_skb(const struct net *net,
2007 					 const struct sk_buff *skb)
2008 {
2009 	u32 mhash, mhash_inner;
2010 	bool has_inner = true;
2011 
2012 	mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
2013 	mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
2014 
2015 	return jhash_2words(mhash, mhash_inner, 0);
2016 }
2017 
fib_multipath_custom_hash_fl4(const struct net * net,const struct flowi4 * fl4)2018 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
2019 					 const struct flowi4 *fl4)
2020 {
2021 	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
2022 	struct flow_keys hash_keys;
2023 
2024 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2025 		return 0;
2026 
2027 	memset(&hash_keys, 0, sizeof(hash_keys));
2028 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2029 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2030 		hash_keys.addrs.v4addrs.src = fl4->saddr;
2031 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2032 		hash_keys.addrs.v4addrs.dst = fl4->daddr;
2033 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2034 		hash_keys.basic.ip_proto = fl4->flowi4_proto;
2035 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2036 		hash_keys.ports.src = fl4->fl4_sport;
2037 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2038 		hash_keys.ports.dst = fl4->fl4_dport;
2039 
2040 	return flow_hash_from_keys(&hash_keys);
2041 }
2042 
2043 /* if skb is set it will be used and fl4 can be NULL */
fib_multipath_hash(const struct net * net,const struct flowi4 * fl4,const struct sk_buff * skb,struct flow_keys * flkeys)2044 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2045 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2046 {
2047 	u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2048 	struct flow_keys hash_keys;
2049 	u32 mhash = 0;
2050 
2051 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
2052 	case 0:
2053 		memset(&hash_keys, 0, sizeof(hash_keys));
2054 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2055 		if (skb) {
2056 			ip_multipath_l3_keys(skb, &hash_keys);
2057 		} else {
2058 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2059 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2060 		}
2061 		mhash = flow_hash_from_keys(&hash_keys);
2062 		break;
2063 	case 1:
2064 		/* skb is currently provided only when forwarding */
2065 		if (skb) {
2066 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2067 			struct flow_keys keys;
2068 
2069 			/* short-circuit if we already have L4 hash present */
2070 			if (skb->l4_hash)
2071 				return skb_get_hash_raw(skb) >> 1;
2072 
2073 			memset(&hash_keys, 0, sizeof(hash_keys));
2074 
2075 			if (!flkeys) {
2076 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2077 				flkeys = &keys;
2078 			}
2079 
2080 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2081 			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2082 			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2083 			hash_keys.ports.src = flkeys->ports.src;
2084 			hash_keys.ports.dst = flkeys->ports.dst;
2085 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2086 		} else {
2087 			memset(&hash_keys, 0, sizeof(hash_keys));
2088 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2089 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2090 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2091 			hash_keys.ports.src = fl4->fl4_sport;
2092 			hash_keys.ports.dst = fl4->fl4_dport;
2093 			hash_keys.basic.ip_proto = fl4->flowi4_proto;
2094 		}
2095 		mhash = flow_hash_from_keys(&hash_keys);
2096 		break;
2097 	case 2:
2098 		memset(&hash_keys, 0, sizeof(hash_keys));
2099 		/* skb is currently provided only when forwarding */
2100 		if (skb) {
2101 			struct flow_keys keys;
2102 
2103 			skb_flow_dissect_flow_keys(skb, &keys, 0);
2104 			/* Inner can be v4 or v6 */
2105 			if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2106 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2107 				hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2108 				hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2109 			} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2110 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2111 				hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2112 				hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2113 				hash_keys.tags.flow_label = keys.tags.flow_label;
2114 				hash_keys.basic.ip_proto = keys.basic.ip_proto;
2115 			} else {
2116 				/* Same as case 0 */
2117 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2118 				ip_multipath_l3_keys(skb, &hash_keys);
2119 			}
2120 		} else {
2121 			/* Same as case 0 */
2122 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2123 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2124 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2125 		}
2126 		mhash = flow_hash_from_keys(&hash_keys);
2127 		break;
2128 	case 3:
2129 		if (skb)
2130 			mhash = fib_multipath_custom_hash_skb(net, skb);
2131 		else
2132 			mhash = fib_multipath_custom_hash_fl4(net, fl4);
2133 		break;
2134 	}
2135 
2136 	if (multipath_hash)
2137 		mhash = jhash_2words(mhash, multipath_hash, 0);
2138 
2139 	return mhash >> 1;
2140 }
2141 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2142 
ip_mkroute_input(struct sk_buff * skb,struct fib_result * res,struct in_device * in_dev,__be32 daddr,__be32 saddr,u32 tos,struct flow_keys * hkeys)2143 static int ip_mkroute_input(struct sk_buff *skb,
2144 			    struct fib_result *res,
2145 			    struct in_device *in_dev,
2146 			    __be32 daddr, __be32 saddr, u32 tos,
2147 			    struct flow_keys *hkeys)
2148 {
2149 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2150 	if (res->fi && fib_info_num_path(res->fi) > 1) {
2151 		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2152 
2153 		fib_select_multipath(res, h);
2154 	}
2155 #endif
2156 
2157 	/* create a routing cache entry */
2158 	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2159 }
2160 
2161 /* Implements all the saddr-related checks as ip_route_input_slow(),
2162  * assuming daddr is valid and the destination is not a local broadcast one.
2163  * Uses the provided hint instead of performing a route lookup.
2164  */
ip_route_use_hint(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev,const struct sk_buff * hint)2165 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2166 		      u8 tos, struct net_device *dev,
2167 		      const struct sk_buff *hint)
2168 {
2169 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2170 	struct rtable *rt = skb_rtable(hint);
2171 	struct net *net = dev_net(dev);
2172 	int err = -EINVAL;
2173 	u32 tag = 0;
2174 
2175 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2176 		goto martian_source;
2177 
2178 	if (ipv4_is_zeronet(saddr))
2179 		goto martian_source;
2180 
2181 	if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2182 		goto martian_source;
2183 
2184 	if (rt->rt_type != RTN_LOCAL)
2185 		goto skip_validate_source;
2186 
2187 	tos &= IPTOS_RT_MASK;
2188 	err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2189 	if (err < 0)
2190 		goto martian_source;
2191 
2192 skip_validate_source:
2193 	skb_dst_copy(skb, hint);
2194 	return 0;
2195 
2196 martian_source:
2197 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2198 	return err;
2199 }
2200 
2201 /* get device for dst_alloc with local routes */
ip_rt_get_dev(struct net * net,const struct fib_result * res)2202 static struct net_device *ip_rt_get_dev(struct net *net,
2203 					const struct fib_result *res)
2204 {
2205 	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2206 	struct net_device *dev = NULL;
2207 
2208 	if (nhc)
2209 		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2210 
2211 	return dev ? : net->loopback_dev;
2212 }
2213 
2214 /*
2215  *	NOTE. We drop all the packets that has local source
2216  *	addresses, because every properly looped back packet
2217  *	must have correct destination already attached by output routine.
2218  *	Changes in the enforced policies must be applied also to
2219  *	ip_route_use_hint().
2220  *
2221  *	Such approach solves two big problems:
2222  *	1. Not simplex devices are handled properly.
2223  *	2. IP spoofing attempts are filtered with 100% of guarantee.
2224  *	called with rcu_read_lock()
2225  */
2226 
ip_route_input_slow(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev,struct fib_result * res)2227 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2228 			       u8 tos, struct net_device *dev,
2229 			       struct fib_result *res)
2230 {
2231 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2232 	struct flow_keys *flkeys = NULL, _flkeys;
2233 	struct net    *net = dev_net(dev);
2234 	struct ip_tunnel_info *tun_info;
2235 	int		err = -EINVAL;
2236 	unsigned int	flags = 0;
2237 	u32		itag = 0;
2238 	struct rtable	*rth;
2239 	struct flowi4	fl4;
2240 	bool do_cache = true;
2241 	bool no_policy;
2242 
2243 	/* IP on this device is disabled. */
2244 
2245 	if (!in_dev)
2246 		goto out;
2247 
2248 	/* Check for the most weird martians, which can be not detected
2249 	 * by fib_lookup.
2250 	 */
2251 
2252 	tun_info = skb_tunnel_info(skb);
2253 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2254 		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2255 	else
2256 		fl4.flowi4_tun_key.tun_id = 0;
2257 	skb_dst_drop(skb);
2258 
2259 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2260 		goto martian_source;
2261 
2262 	res->fi = NULL;
2263 	res->table = NULL;
2264 	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2265 		goto brd_input;
2266 
2267 	/* Accept zero addresses only to limited broadcast;
2268 	 * I even do not know to fix it or not. Waiting for complains :-)
2269 	 */
2270 	if (ipv4_is_zeronet(saddr))
2271 		goto martian_source;
2272 
2273 	if (ipv4_is_zeronet(daddr))
2274 		goto martian_destination;
2275 
2276 	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2277 	 * and call it once if daddr or/and saddr are loopback addresses
2278 	 */
2279 	if (ipv4_is_loopback(daddr)) {
2280 		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2281 			goto martian_destination;
2282 	} else if (ipv4_is_loopback(saddr)) {
2283 		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2284 			goto martian_source;
2285 	}
2286 
2287 	/*
2288 	 *	Now we are ready to route packet.
2289 	 */
2290 	fl4.flowi4_l3mdev = 0;
2291 	fl4.flowi4_oif = 0;
2292 	fl4.flowi4_iif = dev->ifindex;
2293 	fl4.flowi4_mark = skb->mark;
2294 	fl4.flowi4_tos = tos;
2295 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2296 	fl4.flowi4_flags = 0;
2297 	fl4.daddr = daddr;
2298 	fl4.saddr = saddr;
2299 	fl4.flowi4_uid = sock_net_uid(net, NULL);
2300 	fl4.flowi4_multipath_hash = 0;
2301 
2302 	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2303 		flkeys = &_flkeys;
2304 	} else {
2305 		fl4.flowi4_proto = 0;
2306 		fl4.fl4_sport = 0;
2307 		fl4.fl4_dport = 0;
2308 	}
2309 
2310 	err = fib_lookup(net, &fl4, res, 0);
2311 	if (err != 0) {
2312 		if (!IN_DEV_FORWARD(in_dev))
2313 			err = -EHOSTUNREACH;
2314 		goto no_route;
2315 	}
2316 
2317 	if (res->type == RTN_BROADCAST) {
2318 		if (IN_DEV_BFORWARD(in_dev))
2319 			goto make_route;
2320 		/* not do cache if bc_forwarding is enabled */
2321 		if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2322 			do_cache = false;
2323 		goto brd_input;
2324 	}
2325 
2326 	if (res->type == RTN_LOCAL) {
2327 		err = fib_validate_source(skb, saddr, daddr, tos,
2328 					  0, dev, in_dev, &itag);
2329 		if (err < 0)
2330 			goto martian_source;
2331 		goto local_input;
2332 	}
2333 
2334 	if (!IN_DEV_FORWARD(in_dev)) {
2335 		err = -EHOSTUNREACH;
2336 		goto no_route;
2337 	}
2338 	if (res->type != RTN_UNICAST)
2339 		goto martian_destination;
2340 
2341 make_route:
2342 	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2343 out:	return err;
2344 
2345 brd_input:
2346 	if (skb->protocol != htons(ETH_P_IP))
2347 		goto e_inval;
2348 
2349 	if (!ipv4_is_zeronet(saddr)) {
2350 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2351 					  in_dev, &itag);
2352 		if (err < 0)
2353 			goto martian_source;
2354 	}
2355 	flags |= RTCF_BROADCAST;
2356 	res->type = RTN_BROADCAST;
2357 	RT_CACHE_STAT_INC(in_brd);
2358 
2359 local_input:
2360 	no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
2361 	if (no_policy)
2362 		IPCB(skb)->flags |= IPSKB_NOPOLICY;
2363 
2364 	do_cache &= res->fi && !itag;
2365 	if (do_cache) {
2366 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2367 
2368 		rth = rcu_dereference(nhc->nhc_rth_input);
2369 		if (rt_cache_valid(rth)) {
2370 			skb_dst_set_noref(skb, &rth->dst);
2371 			err = 0;
2372 			goto out;
2373 		}
2374 	}
2375 
2376 	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2377 			   flags | RTCF_LOCAL, res->type,
2378 			   no_policy, false);
2379 	if (!rth)
2380 		goto e_nobufs;
2381 
2382 	rth->dst.output= ip_rt_bug;
2383 #ifdef CONFIG_IP_ROUTE_CLASSID
2384 	rth->dst.tclassid = itag;
2385 #endif
2386 	rth->rt_is_input = 1;
2387 
2388 	RT_CACHE_STAT_INC(in_slow_tot);
2389 	if (res->type == RTN_UNREACHABLE) {
2390 		rth->dst.input= ip_error;
2391 		rth->dst.error= -err;
2392 		rth->rt_flags	&= ~RTCF_LOCAL;
2393 	}
2394 
2395 	if (do_cache) {
2396 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2397 
2398 		rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2399 		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2400 			WARN_ON(rth->dst.input == lwtunnel_input);
2401 			rth->dst.lwtstate->orig_input = rth->dst.input;
2402 			rth->dst.input = lwtunnel_input;
2403 		}
2404 
2405 		if (unlikely(!rt_cache_route(nhc, rth)))
2406 			rt_add_uncached_list(rth);
2407 	}
2408 	skb_dst_set(skb, &rth->dst);
2409 	err = 0;
2410 	goto out;
2411 
2412 no_route:
2413 	RT_CACHE_STAT_INC(in_no_route);
2414 	res->type = RTN_UNREACHABLE;
2415 	res->fi = NULL;
2416 	res->table = NULL;
2417 	goto local_input;
2418 
2419 	/*
2420 	 *	Do not cache martian addresses: they should be logged (RFC1812)
2421 	 */
2422 martian_destination:
2423 	RT_CACHE_STAT_INC(in_martian_dst);
2424 #ifdef CONFIG_IP_ROUTE_VERBOSE
2425 	if (IN_DEV_LOG_MARTIANS(in_dev))
2426 		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2427 				     &daddr, &saddr, dev->name);
2428 #endif
2429 
2430 e_inval:
2431 	err = -EINVAL;
2432 	goto out;
2433 
2434 e_nobufs:
2435 	err = -ENOBUFS;
2436 	goto out;
2437 
2438 martian_source:
2439 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2440 	goto out;
2441 }
2442 
ip_route_input_noref(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev)2443 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2444 			 u8 tos, struct net_device *dev)
2445 {
2446 	struct fib_result res;
2447 	int err;
2448 
2449 	tos &= IPTOS_RT_MASK;
2450 	rcu_read_lock();
2451 	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2452 	rcu_read_unlock();
2453 
2454 	return err;
2455 }
2456 EXPORT_SYMBOL(ip_route_input_noref);
2457 
2458 /* called with rcu_read_lock held */
ip_route_input_rcu(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev,struct fib_result * res)2459 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2460 		       u8 tos, struct net_device *dev, struct fib_result *res)
2461 {
2462 	/* Multicast recognition logic is moved from route cache to here.
2463 	 * The problem was that too many Ethernet cards have broken/missing
2464 	 * hardware multicast filters :-( As result the host on multicasting
2465 	 * network acquires a lot of useless route cache entries, sort of
2466 	 * SDR messages from all the world. Now we try to get rid of them.
2467 	 * Really, provided software IP multicast filter is organized
2468 	 * reasonably (at least, hashed), it does not result in a slowdown
2469 	 * comparing with route cache reject entries.
2470 	 * Note, that multicast routers are not affected, because
2471 	 * route cache entry is created eventually.
2472 	 */
2473 	if (ipv4_is_multicast(daddr)) {
2474 		struct in_device *in_dev = __in_dev_get_rcu(dev);
2475 		int our = 0;
2476 		int err = -EINVAL;
2477 
2478 		if (!in_dev)
2479 			return err;
2480 		our = ip_check_mc_rcu(in_dev, daddr, saddr,
2481 				      ip_hdr(skb)->protocol);
2482 
2483 		/* check l3 master if no match yet */
2484 		if (!our && netif_is_l3_slave(dev)) {
2485 			struct in_device *l3_in_dev;
2486 
2487 			l3_in_dev = __in_dev_get_rcu(skb->dev);
2488 			if (l3_in_dev)
2489 				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2490 						      ip_hdr(skb)->protocol);
2491 		}
2492 
2493 		if (our
2494 #ifdef CONFIG_IP_MROUTE
2495 			||
2496 		    (!ipv4_is_local_multicast(daddr) &&
2497 		     IN_DEV_MFORWARD(in_dev))
2498 #endif
2499 		   ) {
2500 			err = ip_route_input_mc(skb, daddr, saddr,
2501 						tos, dev, our);
2502 		}
2503 		return err;
2504 	}
2505 
2506 	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2507 }
2508 
2509 /* called with rcu_read_lock() */
__mkroute_output(const struct fib_result * res,const struct flowi4 * fl4,int orig_oif,struct net_device * dev_out,unsigned int flags)2510 static struct rtable *__mkroute_output(const struct fib_result *res,
2511 				       const struct flowi4 *fl4, int orig_oif,
2512 				       struct net_device *dev_out,
2513 				       unsigned int flags)
2514 {
2515 	struct fib_info *fi = res->fi;
2516 	struct fib_nh_exception *fnhe;
2517 	struct in_device *in_dev;
2518 	u16 type = res->type;
2519 	struct rtable *rth;
2520 	bool do_cache;
2521 
2522 	in_dev = __in_dev_get_rcu(dev_out);
2523 	if (!in_dev)
2524 		return ERR_PTR(-EINVAL);
2525 
2526 	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2527 		if (ipv4_is_loopback(fl4->saddr) &&
2528 		    !(dev_out->flags & IFF_LOOPBACK) &&
2529 		    !netif_is_l3_master(dev_out))
2530 			return ERR_PTR(-EINVAL);
2531 
2532 	if (ipv4_is_lbcast(fl4->daddr))
2533 		type = RTN_BROADCAST;
2534 	else if (ipv4_is_multicast(fl4->daddr))
2535 		type = RTN_MULTICAST;
2536 	else if (ipv4_is_zeronet(fl4->daddr))
2537 		return ERR_PTR(-EINVAL);
2538 
2539 	if (dev_out->flags & IFF_LOOPBACK)
2540 		flags |= RTCF_LOCAL;
2541 
2542 	do_cache = true;
2543 	if (type == RTN_BROADCAST) {
2544 		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2545 		fi = NULL;
2546 	} else if (type == RTN_MULTICAST) {
2547 		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2548 		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2549 				     fl4->flowi4_proto))
2550 			flags &= ~RTCF_LOCAL;
2551 		else
2552 			do_cache = false;
2553 		/* If multicast route do not exist use
2554 		 * default one, but do not gateway in this case.
2555 		 * Yes, it is hack.
2556 		 */
2557 		if (fi && res->prefixlen < 4)
2558 			fi = NULL;
2559 	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2560 		   (orig_oif != dev_out->ifindex)) {
2561 		/* For local routes that require a particular output interface
2562 		 * we do not want to cache the result.  Caching the result
2563 		 * causes incorrect behaviour when there are multiple source
2564 		 * addresses on the interface, the end result being that if the
2565 		 * intended recipient is waiting on that interface for the
2566 		 * packet he won't receive it because it will be delivered on
2567 		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2568 		 * be set to the loopback interface as well.
2569 		 */
2570 		do_cache = false;
2571 	}
2572 
2573 	fnhe = NULL;
2574 	do_cache &= fi != NULL;
2575 	if (fi) {
2576 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2577 		struct rtable __rcu **prth;
2578 
2579 		fnhe = find_exception(nhc, fl4->daddr);
2580 		if (!do_cache)
2581 			goto add;
2582 		if (fnhe) {
2583 			prth = &fnhe->fnhe_rth_output;
2584 		} else {
2585 			if (unlikely(fl4->flowi4_flags &
2586 				     FLOWI_FLAG_KNOWN_NH &&
2587 				     !(nhc->nhc_gw_family &&
2588 				       nhc->nhc_scope == RT_SCOPE_LINK))) {
2589 				do_cache = false;
2590 				goto add;
2591 			}
2592 			prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2593 		}
2594 		rth = rcu_dereference(*prth);
2595 		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2596 			return rth;
2597 	}
2598 
2599 add:
2600 	rth = rt_dst_alloc(dev_out, flags, type,
2601 			   IN_DEV_ORCONF(in_dev, NOPOLICY),
2602 			   IN_DEV_ORCONF(in_dev, NOXFRM));
2603 	if (!rth)
2604 		return ERR_PTR(-ENOBUFS);
2605 
2606 	rth->rt_iif = orig_oif;
2607 
2608 	RT_CACHE_STAT_INC(out_slow_tot);
2609 
2610 	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2611 		if (flags & RTCF_LOCAL &&
2612 		    !(dev_out->flags & IFF_LOOPBACK)) {
2613 			rth->dst.output = ip_mc_output;
2614 			RT_CACHE_STAT_INC(out_slow_mc);
2615 		}
2616 #ifdef CONFIG_IP_MROUTE
2617 		if (type == RTN_MULTICAST) {
2618 			if (IN_DEV_MFORWARD(in_dev) &&
2619 			    !ipv4_is_local_multicast(fl4->daddr)) {
2620 				rth->dst.input = ip_mr_input;
2621 				rth->dst.output = ip_mc_output;
2622 			}
2623 		}
2624 #endif
2625 	}
2626 
2627 	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2628 	lwtunnel_set_redirect(&rth->dst);
2629 
2630 	return rth;
2631 }
2632 
2633 /*
2634  * Major route resolver routine.
2635  */
2636 
ip_route_output_key_hash(struct net * net,struct flowi4 * fl4,const struct sk_buff * skb)2637 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2638 					const struct sk_buff *skb)
2639 {
2640 	struct fib_result res = {
2641 		.type		= RTN_UNSPEC,
2642 		.fi		= NULL,
2643 		.table		= NULL,
2644 		.tclassid	= 0,
2645 	};
2646 	struct rtable *rth;
2647 
2648 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2649 	ip_rt_fix_tos(fl4);
2650 
2651 	rcu_read_lock();
2652 	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2653 	rcu_read_unlock();
2654 
2655 	return rth;
2656 }
2657 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2658 
ip_route_output_key_hash_rcu(struct net * net,struct flowi4 * fl4,struct fib_result * res,const struct sk_buff * skb)2659 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2660 					    struct fib_result *res,
2661 					    const struct sk_buff *skb)
2662 {
2663 	struct net_device *dev_out = NULL;
2664 	int orig_oif = fl4->flowi4_oif;
2665 	unsigned int flags = 0;
2666 	struct rtable *rth;
2667 	int err;
2668 
2669 	if (fl4->saddr) {
2670 		if (ipv4_is_multicast(fl4->saddr) ||
2671 		    ipv4_is_lbcast(fl4->saddr) ||
2672 		    ipv4_is_zeronet(fl4->saddr)) {
2673 			rth = ERR_PTR(-EINVAL);
2674 			goto out;
2675 		}
2676 
2677 		rth = ERR_PTR(-ENETUNREACH);
2678 
2679 		/* I removed check for oif == dev_out->oif here.
2680 		 * It was wrong for two reasons:
2681 		 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2682 		 *    is assigned to multiple interfaces.
2683 		 * 2. Moreover, we are allowed to send packets with saddr
2684 		 *    of another iface. --ANK
2685 		 */
2686 
2687 		if (fl4->flowi4_oif == 0 &&
2688 		    (ipv4_is_multicast(fl4->daddr) ||
2689 		     ipv4_is_lbcast(fl4->daddr))) {
2690 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2691 			dev_out = __ip_dev_find(net, fl4->saddr, false);
2692 			if (!dev_out)
2693 				goto out;
2694 
2695 			/* Special hack: user can direct multicasts
2696 			 * and limited broadcast via necessary interface
2697 			 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2698 			 * This hack is not just for fun, it allows
2699 			 * vic,vat and friends to work.
2700 			 * They bind socket to loopback, set ttl to zero
2701 			 * and expect that it will work.
2702 			 * From the viewpoint of routing cache they are broken,
2703 			 * because we are not allowed to build multicast path
2704 			 * with loopback source addr (look, routing cache
2705 			 * cannot know, that ttl is zero, so that packet
2706 			 * will not leave this host and route is valid).
2707 			 * Luckily, this hack is good workaround.
2708 			 */
2709 
2710 			fl4->flowi4_oif = dev_out->ifindex;
2711 			goto make_route;
2712 		}
2713 
2714 		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2715 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2716 			if (!__ip_dev_find(net, fl4->saddr, false))
2717 				goto out;
2718 		}
2719 	}
2720 
2721 
2722 	if (fl4->flowi4_oif) {
2723 		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2724 		rth = ERR_PTR(-ENODEV);
2725 		if (!dev_out)
2726 			goto out;
2727 
2728 		/* RACE: Check return value of inet_select_addr instead. */
2729 		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2730 			rth = ERR_PTR(-ENETUNREACH);
2731 			goto out;
2732 		}
2733 		if (ipv4_is_local_multicast(fl4->daddr) ||
2734 		    ipv4_is_lbcast(fl4->daddr) ||
2735 		    fl4->flowi4_proto == IPPROTO_IGMP) {
2736 			if (!fl4->saddr)
2737 				fl4->saddr = inet_select_addr(dev_out, 0,
2738 							      RT_SCOPE_LINK);
2739 			goto make_route;
2740 		}
2741 		if (!fl4->saddr) {
2742 			if (ipv4_is_multicast(fl4->daddr))
2743 				fl4->saddr = inet_select_addr(dev_out, 0,
2744 							      fl4->flowi4_scope);
2745 			else if (!fl4->daddr)
2746 				fl4->saddr = inet_select_addr(dev_out, 0,
2747 							      RT_SCOPE_HOST);
2748 		}
2749 	}
2750 
2751 	if (!fl4->daddr) {
2752 		fl4->daddr = fl4->saddr;
2753 		if (!fl4->daddr)
2754 			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2755 		dev_out = net->loopback_dev;
2756 		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2757 		res->type = RTN_LOCAL;
2758 		flags |= RTCF_LOCAL;
2759 		goto make_route;
2760 	}
2761 
2762 	err = fib_lookup(net, fl4, res, 0);
2763 	if (err) {
2764 		res->fi = NULL;
2765 		res->table = NULL;
2766 		if (fl4->flowi4_oif &&
2767 		    (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
2768 			/* Apparently, routing tables are wrong. Assume,
2769 			 * that the destination is on link.
2770 			 *
2771 			 * WHY? DW.
2772 			 * Because we are allowed to send to iface
2773 			 * even if it has NO routes and NO assigned
2774 			 * addresses. When oif is specified, routing
2775 			 * tables are looked up with only one purpose:
2776 			 * to catch if destination is gatewayed, rather than
2777 			 * direct. Moreover, if MSG_DONTROUTE is set,
2778 			 * we send packet, ignoring both routing tables
2779 			 * and ifaddr state. --ANK
2780 			 *
2781 			 *
2782 			 * We could make it even if oif is unknown,
2783 			 * likely IPv6, but we do not.
2784 			 */
2785 
2786 			if (fl4->saddr == 0)
2787 				fl4->saddr = inet_select_addr(dev_out, 0,
2788 							      RT_SCOPE_LINK);
2789 			res->type = RTN_UNICAST;
2790 			goto make_route;
2791 		}
2792 		rth = ERR_PTR(err);
2793 		goto out;
2794 	}
2795 
2796 	if (res->type == RTN_LOCAL) {
2797 		if (!fl4->saddr) {
2798 			if (res->fi->fib_prefsrc)
2799 				fl4->saddr = res->fi->fib_prefsrc;
2800 			else
2801 				fl4->saddr = fl4->daddr;
2802 		}
2803 
2804 		/* L3 master device is the loopback for that domain */
2805 		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2806 			net->loopback_dev;
2807 
2808 		/* make sure orig_oif points to fib result device even
2809 		 * though packet rx/tx happens over loopback or l3mdev
2810 		 */
2811 		orig_oif = FIB_RES_OIF(*res);
2812 
2813 		fl4->flowi4_oif = dev_out->ifindex;
2814 		flags |= RTCF_LOCAL;
2815 		goto make_route;
2816 	}
2817 
2818 	fib_select_path(net, res, fl4, skb);
2819 
2820 	dev_out = FIB_RES_DEV(*res);
2821 
2822 make_route:
2823 	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2824 
2825 out:
2826 	return rth;
2827 }
2828 
2829 static struct dst_ops ipv4_dst_blackhole_ops = {
2830 	.family			= AF_INET,
2831 	.default_advmss		= ipv4_default_advmss,
2832 	.neigh_lookup		= ipv4_neigh_lookup,
2833 	.check			= dst_blackhole_check,
2834 	.cow_metrics		= dst_blackhole_cow_metrics,
2835 	.update_pmtu		= dst_blackhole_update_pmtu,
2836 	.redirect		= dst_blackhole_redirect,
2837 	.mtu			= dst_blackhole_mtu,
2838 };
2839 
ipv4_blackhole_route(struct net * net,struct dst_entry * dst_orig)2840 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2841 {
2842 	struct rtable *ort = (struct rtable *) dst_orig;
2843 	struct rtable *rt;
2844 
2845 	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2846 	if (rt) {
2847 		struct dst_entry *new = &rt->dst;
2848 
2849 		new->__use = 1;
2850 		new->input = dst_discard;
2851 		new->output = dst_discard_out;
2852 
2853 		new->dev = net->loopback_dev;
2854 		dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC);
2855 
2856 		rt->rt_is_input = ort->rt_is_input;
2857 		rt->rt_iif = ort->rt_iif;
2858 		rt->rt_pmtu = ort->rt_pmtu;
2859 		rt->rt_mtu_locked = ort->rt_mtu_locked;
2860 
2861 		rt->rt_genid = rt_genid_ipv4(net);
2862 		rt->rt_flags = ort->rt_flags;
2863 		rt->rt_type = ort->rt_type;
2864 		rt->rt_uses_gateway = ort->rt_uses_gateway;
2865 		rt->rt_gw_family = ort->rt_gw_family;
2866 		if (rt->rt_gw_family == AF_INET)
2867 			rt->rt_gw4 = ort->rt_gw4;
2868 		else if (rt->rt_gw_family == AF_INET6)
2869 			rt->rt_gw6 = ort->rt_gw6;
2870 
2871 		INIT_LIST_HEAD(&rt->rt_uncached);
2872 	}
2873 
2874 	dst_release(dst_orig);
2875 
2876 	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2877 }
2878 
ip_route_output_flow(struct net * net,struct flowi4 * flp4,const struct sock * sk)2879 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2880 				    const struct sock *sk)
2881 {
2882 	struct rtable *rt = __ip_route_output_key(net, flp4);
2883 
2884 	if (IS_ERR(rt))
2885 		return rt;
2886 
2887 	if (flp4->flowi4_proto) {
2888 		flp4->flowi4_oif = rt->dst.dev->ifindex;
2889 		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2890 							flowi4_to_flowi(flp4),
2891 							sk, 0);
2892 	}
2893 
2894 	return rt;
2895 }
2896 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2897 
ip_route_output_tunnel(struct sk_buff * skb,struct net_device * dev,struct net * net,__be32 * saddr,const struct ip_tunnel_info * info,u8 protocol,bool use_cache)2898 struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
2899 				      struct net_device *dev,
2900 				      struct net *net, __be32 *saddr,
2901 				      const struct ip_tunnel_info *info,
2902 				      u8 protocol, bool use_cache)
2903 {
2904 #ifdef CONFIG_DST_CACHE
2905 	struct dst_cache *dst_cache;
2906 #endif
2907 	struct rtable *rt = NULL;
2908 	struct flowi4 fl4;
2909 	__u8 tos;
2910 
2911 #ifdef CONFIG_DST_CACHE
2912 	dst_cache = (struct dst_cache *)&info->dst_cache;
2913 	if (use_cache) {
2914 		rt = dst_cache_get_ip4(dst_cache, saddr);
2915 		if (rt)
2916 			return rt;
2917 	}
2918 #endif
2919 	memset(&fl4, 0, sizeof(fl4));
2920 	fl4.flowi4_mark = skb->mark;
2921 	fl4.flowi4_proto = protocol;
2922 	fl4.daddr = info->key.u.ipv4.dst;
2923 	fl4.saddr = info->key.u.ipv4.src;
2924 	tos = info->key.tos;
2925 	fl4.flowi4_tos = RT_TOS(tos);
2926 
2927 	rt = ip_route_output_key(net, &fl4);
2928 	if (IS_ERR(rt)) {
2929 		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
2930 		return ERR_PTR(-ENETUNREACH);
2931 	}
2932 	if (rt->dst.dev == dev) { /* is this necessary? */
2933 		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
2934 		ip_rt_put(rt);
2935 		return ERR_PTR(-ELOOP);
2936 	}
2937 #ifdef CONFIG_DST_CACHE
2938 	if (use_cache)
2939 		dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2940 #endif
2941 	*saddr = fl4.saddr;
2942 	return rt;
2943 }
2944 EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
2945 
2946 /* called with rcu_read_lock held */
rt_fill_info(struct net * net,__be32 dst,__be32 src,struct rtable * rt,u32 table_id,struct flowi4 * fl4,struct sk_buff * skb,u32 portid,u32 seq,unsigned int flags)2947 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2948 			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2949 			struct sk_buff *skb, u32 portid, u32 seq,
2950 			unsigned int flags)
2951 {
2952 	struct rtmsg *r;
2953 	struct nlmsghdr *nlh;
2954 	unsigned long expires = 0;
2955 	u32 error;
2956 	u32 metrics[RTAX_MAX];
2957 
2958 	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2959 	if (!nlh)
2960 		return -EMSGSIZE;
2961 
2962 	r = nlmsg_data(nlh);
2963 	r->rtm_family	 = AF_INET;
2964 	r->rtm_dst_len	= 32;
2965 	r->rtm_src_len	= 0;
2966 	r->rtm_tos	= fl4 ? fl4->flowi4_tos : 0;
2967 	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2968 	if (nla_put_u32(skb, RTA_TABLE, table_id))
2969 		goto nla_put_failure;
2970 	r->rtm_type	= rt->rt_type;
2971 	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2972 	r->rtm_protocol = RTPROT_UNSPEC;
2973 	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2974 	if (rt->rt_flags & RTCF_NOTIFY)
2975 		r->rtm_flags |= RTM_F_NOTIFY;
2976 	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2977 		r->rtm_flags |= RTCF_DOREDIRECT;
2978 
2979 	if (nla_put_in_addr(skb, RTA_DST, dst))
2980 		goto nla_put_failure;
2981 	if (src) {
2982 		r->rtm_src_len = 32;
2983 		if (nla_put_in_addr(skb, RTA_SRC, src))
2984 			goto nla_put_failure;
2985 	}
2986 	if (rt->dst.dev &&
2987 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2988 		goto nla_put_failure;
2989 	if (rt->dst.lwtstate &&
2990 	    lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2991 		goto nla_put_failure;
2992 #ifdef CONFIG_IP_ROUTE_CLASSID
2993 	if (rt->dst.tclassid &&
2994 	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2995 		goto nla_put_failure;
2996 #endif
2997 	if (fl4 && !rt_is_input_route(rt) &&
2998 	    fl4->saddr != src) {
2999 		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
3000 			goto nla_put_failure;
3001 	}
3002 	if (rt->rt_uses_gateway) {
3003 		if (rt->rt_gw_family == AF_INET &&
3004 		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
3005 			goto nla_put_failure;
3006 		} else if (rt->rt_gw_family == AF_INET6) {
3007 			int alen = sizeof(struct in6_addr);
3008 			struct nlattr *nla;
3009 			struct rtvia *via;
3010 
3011 			nla = nla_reserve(skb, RTA_VIA, alen + 2);
3012 			if (!nla)
3013 				goto nla_put_failure;
3014 
3015 			via = nla_data(nla);
3016 			via->rtvia_family = AF_INET6;
3017 			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
3018 		}
3019 	}
3020 
3021 	expires = rt->dst.expires;
3022 	if (expires) {
3023 		unsigned long now = jiffies;
3024 
3025 		if (time_before(now, expires))
3026 			expires -= now;
3027 		else
3028 			expires = 0;
3029 	}
3030 
3031 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3032 	if (rt->rt_pmtu && expires)
3033 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
3034 	if (rt->rt_mtu_locked && expires)
3035 		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
3036 	if (rtnetlink_put_metrics(skb, metrics) < 0)
3037 		goto nla_put_failure;
3038 
3039 	if (fl4) {
3040 		if (fl4->flowi4_mark &&
3041 		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
3042 			goto nla_put_failure;
3043 
3044 		if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
3045 		    nla_put_u32(skb, RTA_UID,
3046 				from_kuid_munged(current_user_ns(),
3047 						 fl4->flowi4_uid)))
3048 			goto nla_put_failure;
3049 
3050 		if (rt_is_input_route(rt)) {
3051 #ifdef CONFIG_IP_MROUTE
3052 			if (ipv4_is_multicast(dst) &&
3053 			    !ipv4_is_local_multicast(dst) &&
3054 			    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
3055 				int err = ipmr_get_route(net, skb,
3056 							 fl4->saddr, fl4->daddr,
3057 							 r, portid);
3058 
3059 				if (err <= 0) {
3060 					if (err == 0)
3061 						return 0;
3062 					goto nla_put_failure;
3063 				}
3064 			} else
3065 #endif
3066 				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3067 					goto nla_put_failure;
3068 		}
3069 	}
3070 
3071 	error = rt->dst.error;
3072 
3073 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3074 		goto nla_put_failure;
3075 
3076 	nlmsg_end(skb, nlh);
3077 	return 0;
3078 
3079 nla_put_failure:
3080 	nlmsg_cancel(skb, nlh);
3081 	return -EMSGSIZE;
3082 }
3083 
fnhe_dump_bucket(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,u32 table_id,struct fnhe_hash_bucket * bucket,int genid,int * fa_index,int fa_start,unsigned int flags)3084 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3085 			    struct netlink_callback *cb, u32 table_id,
3086 			    struct fnhe_hash_bucket *bucket, int genid,
3087 			    int *fa_index, int fa_start, unsigned int flags)
3088 {
3089 	int i;
3090 
3091 	for (i = 0; i < FNHE_HASH_SIZE; i++) {
3092 		struct fib_nh_exception *fnhe;
3093 
3094 		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3095 		     fnhe = rcu_dereference(fnhe->fnhe_next)) {
3096 			struct rtable *rt;
3097 			int err;
3098 
3099 			if (*fa_index < fa_start)
3100 				goto next;
3101 
3102 			if (fnhe->fnhe_genid != genid)
3103 				goto next;
3104 
3105 			if (fnhe->fnhe_expires &&
3106 			    time_after(jiffies, fnhe->fnhe_expires))
3107 				goto next;
3108 
3109 			rt = rcu_dereference(fnhe->fnhe_rth_input);
3110 			if (!rt)
3111 				rt = rcu_dereference(fnhe->fnhe_rth_output);
3112 			if (!rt)
3113 				goto next;
3114 
3115 			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3116 					   table_id, NULL, skb,
3117 					   NETLINK_CB(cb->skb).portid,
3118 					   cb->nlh->nlmsg_seq, flags);
3119 			if (err)
3120 				return err;
3121 next:
3122 			(*fa_index)++;
3123 		}
3124 	}
3125 
3126 	return 0;
3127 }
3128 
fib_dump_info_fnhe(struct sk_buff * skb,struct netlink_callback * cb,u32 table_id,struct fib_info * fi,int * fa_index,int fa_start,unsigned int flags)3129 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3130 		       u32 table_id, struct fib_info *fi,
3131 		       int *fa_index, int fa_start, unsigned int flags)
3132 {
3133 	struct net *net = sock_net(cb->skb->sk);
3134 	int nhsel, genid = fnhe_genid(net);
3135 
3136 	for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3137 		struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3138 		struct fnhe_hash_bucket *bucket;
3139 		int err;
3140 
3141 		if (nhc->nhc_flags & RTNH_F_DEAD)
3142 			continue;
3143 
3144 		rcu_read_lock();
3145 		bucket = rcu_dereference(nhc->nhc_exceptions);
3146 		err = 0;
3147 		if (bucket)
3148 			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3149 					       genid, fa_index, fa_start,
3150 					       flags);
3151 		rcu_read_unlock();
3152 		if (err)
3153 			return err;
3154 	}
3155 
3156 	return 0;
3157 }
3158 
inet_rtm_getroute_build_skb(__be32 src,__be32 dst,u8 ip_proto,__be16 sport,__be16 dport)3159 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3160 						   u8 ip_proto, __be16 sport,
3161 						   __be16 dport)
3162 {
3163 	struct sk_buff *skb;
3164 	struct iphdr *iph;
3165 
3166 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3167 	if (!skb)
3168 		return NULL;
3169 
3170 	/* Reserve room for dummy headers, this skb can pass
3171 	 * through good chunk of routing engine.
3172 	 */
3173 	skb_reset_mac_header(skb);
3174 	skb_reset_network_header(skb);
3175 	skb->protocol = htons(ETH_P_IP);
3176 	iph = skb_put(skb, sizeof(struct iphdr));
3177 	iph->protocol = ip_proto;
3178 	iph->saddr = src;
3179 	iph->daddr = dst;
3180 	iph->version = 0x4;
3181 	iph->frag_off = 0;
3182 	iph->ihl = 0x5;
3183 	skb_set_transport_header(skb, skb->len);
3184 
3185 	switch (iph->protocol) {
3186 	case IPPROTO_UDP: {
3187 		struct udphdr *udph;
3188 
3189 		udph = skb_put_zero(skb, sizeof(struct udphdr));
3190 		udph->source = sport;
3191 		udph->dest = dport;
3192 		udph->len = htons(sizeof(struct udphdr));
3193 		udph->check = 0;
3194 		break;
3195 	}
3196 	case IPPROTO_TCP: {
3197 		struct tcphdr *tcph;
3198 
3199 		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3200 		tcph->source	= sport;
3201 		tcph->dest	= dport;
3202 		tcph->doff	= sizeof(struct tcphdr) / 4;
3203 		tcph->rst = 1;
3204 		tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3205 					    src, dst, 0);
3206 		break;
3207 	}
3208 	case IPPROTO_ICMP: {
3209 		struct icmphdr *icmph;
3210 
3211 		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3212 		icmph->type = ICMP_ECHO;
3213 		icmph->code = 0;
3214 	}
3215 	}
3216 
3217 	return skb;
3218 }
3219 
inet_rtm_valid_getroute_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)3220 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3221 				       const struct nlmsghdr *nlh,
3222 				       struct nlattr **tb,
3223 				       struct netlink_ext_ack *extack)
3224 {
3225 	struct rtmsg *rtm;
3226 	int i, err;
3227 
3228 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3229 		NL_SET_ERR_MSG(extack,
3230 			       "ipv4: Invalid header for route get request");
3231 		return -EINVAL;
3232 	}
3233 
3234 	if (!netlink_strict_get_check(skb))
3235 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3236 					      rtm_ipv4_policy, extack);
3237 
3238 	rtm = nlmsg_data(nlh);
3239 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3240 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3241 	    rtm->rtm_table || rtm->rtm_protocol ||
3242 	    rtm->rtm_scope || rtm->rtm_type) {
3243 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3244 		return -EINVAL;
3245 	}
3246 
3247 	if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3248 			       RTM_F_LOOKUP_TABLE |
3249 			       RTM_F_FIB_MATCH)) {
3250 		NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3251 		return -EINVAL;
3252 	}
3253 
3254 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3255 					    rtm_ipv4_policy, extack);
3256 	if (err)
3257 		return err;
3258 
3259 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3260 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3261 		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3262 		return -EINVAL;
3263 	}
3264 
3265 	for (i = 0; i <= RTA_MAX; i++) {
3266 		if (!tb[i])
3267 			continue;
3268 
3269 		switch (i) {
3270 		case RTA_IIF:
3271 		case RTA_OIF:
3272 		case RTA_SRC:
3273 		case RTA_DST:
3274 		case RTA_IP_PROTO:
3275 		case RTA_SPORT:
3276 		case RTA_DPORT:
3277 		case RTA_MARK:
3278 		case RTA_UID:
3279 			break;
3280 		default:
3281 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3282 			return -EINVAL;
3283 		}
3284 	}
3285 
3286 	return 0;
3287 }
3288 
inet_rtm_getroute(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3289 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3290 			     struct netlink_ext_ack *extack)
3291 {
3292 	struct net *net = sock_net(in_skb->sk);
3293 	struct nlattr *tb[RTA_MAX+1];
3294 	u32 table_id = RT_TABLE_MAIN;
3295 	__be16 sport = 0, dport = 0;
3296 	struct fib_result res = {};
3297 	u8 ip_proto = IPPROTO_UDP;
3298 	struct rtable *rt = NULL;
3299 	struct sk_buff *skb;
3300 	struct rtmsg *rtm;
3301 	struct flowi4 fl4 = {};
3302 	__be32 dst = 0;
3303 	__be32 src = 0;
3304 	kuid_t uid;
3305 	u32 iif;
3306 	int err;
3307 	int mark;
3308 
3309 	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3310 	if (err < 0)
3311 		return err;
3312 
3313 	rtm = nlmsg_data(nlh);
3314 	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3315 	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3316 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3317 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3318 	if (tb[RTA_UID])
3319 		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3320 	else
3321 		uid = (iif ? INVALID_UID : current_uid());
3322 
3323 	if (tb[RTA_IP_PROTO]) {
3324 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3325 						  &ip_proto, AF_INET, extack);
3326 		if (err)
3327 			return err;
3328 	}
3329 
3330 	if (tb[RTA_SPORT])
3331 		sport = nla_get_be16(tb[RTA_SPORT]);
3332 
3333 	if (tb[RTA_DPORT])
3334 		dport = nla_get_be16(tb[RTA_DPORT]);
3335 
3336 	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3337 	if (!skb)
3338 		return -ENOBUFS;
3339 
3340 	fl4.daddr = dst;
3341 	fl4.saddr = src;
3342 	fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3343 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3344 	fl4.flowi4_mark = mark;
3345 	fl4.flowi4_uid = uid;
3346 	if (sport)
3347 		fl4.fl4_sport = sport;
3348 	if (dport)
3349 		fl4.fl4_dport = dport;
3350 	fl4.flowi4_proto = ip_proto;
3351 
3352 	rcu_read_lock();
3353 
3354 	if (iif) {
3355 		struct net_device *dev;
3356 
3357 		dev = dev_get_by_index_rcu(net, iif);
3358 		if (!dev) {
3359 			err = -ENODEV;
3360 			goto errout_rcu;
3361 		}
3362 
3363 		fl4.flowi4_iif = iif; /* for rt_fill_info */
3364 		skb->dev	= dev;
3365 		skb->mark	= mark;
3366 		err = ip_route_input_rcu(skb, dst, src,
3367 					 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3368 					 &res);
3369 
3370 		rt = skb_rtable(skb);
3371 		if (err == 0 && rt->dst.error)
3372 			err = -rt->dst.error;
3373 	} else {
3374 		fl4.flowi4_iif = LOOPBACK_IFINDEX;
3375 		skb->dev = net->loopback_dev;
3376 		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3377 		err = 0;
3378 		if (IS_ERR(rt))
3379 			err = PTR_ERR(rt);
3380 		else
3381 			skb_dst_set(skb, &rt->dst);
3382 	}
3383 
3384 	if (err)
3385 		goto errout_rcu;
3386 
3387 	if (rtm->rtm_flags & RTM_F_NOTIFY)
3388 		rt->rt_flags |= RTCF_NOTIFY;
3389 
3390 	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3391 		table_id = res.table ? res.table->tb_id : 0;
3392 
3393 	/* reset skb for netlink reply msg */
3394 	skb_trim(skb, 0);
3395 	skb_reset_network_header(skb);
3396 	skb_reset_transport_header(skb);
3397 	skb_reset_mac_header(skb);
3398 
3399 	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3400 		struct fib_rt_info fri;
3401 
3402 		if (!res.fi) {
3403 			err = fib_props[res.type].error;
3404 			if (!err)
3405 				err = -EHOSTUNREACH;
3406 			goto errout_rcu;
3407 		}
3408 		fri.fi = res.fi;
3409 		fri.tb_id = table_id;
3410 		fri.dst = res.prefix;
3411 		fri.dst_len = res.prefixlen;
3412 		fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
3413 		fri.type = rt->rt_type;
3414 		fri.offload = 0;
3415 		fri.trap = 0;
3416 		fri.offload_failed = 0;
3417 		if (res.fa_head) {
3418 			struct fib_alias *fa;
3419 
3420 			hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3421 				u8 slen = 32 - fri.dst_len;
3422 
3423 				if (fa->fa_slen == slen &&
3424 				    fa->tb_id == fri.tb_id &&
3425 				    fa->fa_dscp == fri.dscp &&
3426 				    fa->fa_info == res.fi &&
3427 				    fa->fa_type == fri.type) {
3428 					fri.offload = READ_ONCE(fa->offload);
3429 					fri.trap = READ_ONCE(fa->trap);
3430 					break;
3431 				}
3432 			}
3433 		}
3434 		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3435 				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3436 	} else {
3437 		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3438 				   NETLINK_CB(in_skb).portid,
3439 				   nlh->nlmsg_seq, 0);
3440 	}
3441 	if (err < 0)
3442 		goto errout_rcu;
3443 
3444 	rcu_read_unlock();
3445 
3446 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3447 
3448 errout_free:
3449 	return err;
3450 errout_rcu:
3451 	rcu_read_unlock();
3452 	kfree_skb(skb);
3453 	goto errout_free;
3454 }
3455 
ip_rt_multicast_event(struct in_device * in_dev)3456 void ip_rt_multicast_event(struct in_device *in_dev)
3457 {
3458 	rt_cache_flush(dev_net(in_dev->dev));
3459 }
3460 
3461 #ifdef CONFIG_SYSCTL
3462 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3463 static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
3464 static int ip_rt_gc_elasticity __read_mostly	= 8;
3465 static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
3466 
ipv4_sysctl_rtcache_flush(struct ctl_table * __ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)3467 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3468 		void *buffer, size_t *lenp, loff_t *ppos)
3469 {
3470 	struct net *net = (struct net *)__ctl->extra1;
3471 
3472 	if (write) {
3473 		rt_cache_flush(net);
3474 		fnhe_genid_bump(net);
3475 		return 0;
3476 	}
3477 
3478 	return -EINVAL;
3479 }
3480 
3481 static struct ctl_table ipv4_route_table[] = {
3482 	{
3483 		.procname	= "gc_thresh",
3484 		.data		= &ipv4_dst_ops.gc_thresh,
3485 		.maxlen		= sizeof(int),
3486 		.mode		= 0644,
3487 		.proc_handler	= proc_dointvec,
3488 	},
3489 	{
3490 		.procname	= "max_size",
3491 		.data		= &ip_rt_max_size,
3492 		.maxlen		= sizeof(int),
3493 		.mode		= 0644,
3494 		.proc_handler	= proc_dointvec,
3495 	},
3496 	{
3497 		/*  Deprecated. Use gc_min_interval_ms */
3498 
3499 		.procname	= "gc_min_interval",
3500 		.data		= &ip_rt_gc_min_interval,
3501 		.maxlen		= sizeof(int),
3502 		.mode		= 0644,
3503 		.proc_handler	= proc_dointvec_jiffies,
3504 	},
3505 	{
3506 		.procname	= "gc_min_interval_ms",
3507 		.data		= &ip_rt_gc_min_interval,
3508 		.maxlen		= sizeof(int),
3509 		.mode		= 0644,
3510 		.proc_handler	= proc_dointvec_ms_jiffies,
3511 	},
3512 	{
3513 		.procname	= "gc_timeout",
3514 		.data		= &ip_rt_gc_timeout,
3515 		.maxlen		= sizeof(int),
3516 		.mode		= 0644,
3517 		.proc_handler	= proc_dointvec_jiffies,
3518 	},
3519 	{
3520 		.procname	= "gc_interval",
3521 		.data		= &ip_rt_gc_interval,
3522 		.maxlen		= sizeof(int),
3523 		.mode		= 0644,
3524 		.proc_handler	= proc_dointvec_jiffies,
3525 	},
3526 	{
3527 		.procname	= "redirect_load",
3528 		.data		= &ip_rt_redirect_load,
3529 		.maxlen		= sizeof(int),
3530 		.mode		= 0644,
3531 		.proc_handler	= proc_dointvec,
3532 	},
3533 	{
3534 		.procname	= "redirect_number",
3535 		.data		= &ip_rt_redirect_number,
3536 		.maxlen		= sizeof(int),
3537 		.mode		= 0644,
3538 		.proc_handler	= proc_dointvec,
3539 	},
3540 	{
3541 		.procname	= "redirect_silence",
3542 		.data		= &ip_rt_redirect_silence,
3543 		.maxlen		= sizeof(int),
3544 		.mode		= 0644,
3545 		.proc_handler	= proc_dointvec,
3546 	},
3547 	{
3548 		.procname	= "error_cost",
3549 		.data		= &ip_rt_error_cost,
3550 		.maxlen		= sizeof(int),
3551 		.mode		= 0644,
3552 		.proc_handler	= proc_dointvec,
3553 	},
3554 	{
3555 		.procname	= "error_burst",
3556 		.data		= &ip_rt_error_burst,
3557 		.maxlen		= sizeof(int),
3558 		.mode		= 0644,
3559 		.proc_handler	= proc_dointvec,
3560 	},
3561 	{
3562 		.procname	= "gc_elasticity",
3563 		.data		= &ip_rt_gc_elasticity,
3564 		.maxlen		= sizeof(int),
3565 		.mode		= 0644,
3566 		.proc_handler	= proc_dointvec,
3567 	},
3568 	{ }
3569 };
3570 
3571 static const char ipv4_route_flush_procname[] = "flush";
3572 
3573 static struct ctl_table ipv4_route_netns_table[] = {
3574 	{
3575 		.procname	= ipv4_route_flush_procname,
3576 		.maxlen		= sizeof(int),
3577 		.mode		= 0200,
3578 		.proc_handler	= ipv4_sysctl_rtcache_flush,
3579 	},
3580 	{
3581 		.procname       = "min_pmtu",
3582 		.data           = &init_net.ipv4.ip_rt_min_pmtu,
3583 		.maxlen         = sizeof(int),
3584 		.mode           = 0644,
3585 		.proc_handler   = proc_dointvec_minmax,
3586 		.extra1         = &ip_min_valid_pmtu,
3587 	},
3588 	{
3589 		.procname       = "mtu_expires",
3590 		.data           = &init_net.ipv4.ip_rt_mtu_expires,
3591 		.maxlen         = sizeof(int),
3592 		.mode           = 0644,
3593 		.proc_handler   = proc_dointvec_jiffies,
3594 	},
3595 	{
3596 		.procname   = "min_adv_mss",
3597 		.data       = &init_net.ipv4.ip_rt_min_advmss,
3598 		.maxlen     = sizeof(int),
3599 		.mode       = 0644,
3600 		.proc_handler   = proc_dointvec,
3601 	},
3602 	{ },
3603 };
3604 
sysctl_route_net_init(struct net * net)3605 static __net_init int sysctl_route_net_init(struct net *net)
3606 {
3607 	struct ctl_table *tbl;
3608 
3609 	tbl = ipv4_route_netns_table;
3610 	if (!net_eq(net, &init_net)) {
3611 		int i;
3612 
3613 		tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3614 		if (!tbl)
3615 			goto err_dup;
3616 
3617 		/* Don't export non-whitelisted sysctls to unprivileged users */
3618 		if (net->user_ns != &init_user_ns) {
3619 			if (tbl[0].procname != ipv4_route_flush_procname)
3620 				tbl[0].procname = NULL;
3621 		}
3622 
3623 		/* Update the variables to point into the current struct net
3624 		 * except for the first element flush
3625 		 */
3626 		for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
3627 			tbl[i].data += (void *)net - (void *)&init_net;
3628 	}
3629 	tbl[0].extra1 = net;
3630 
3631 	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3632 	if (!net->ipv4.route_hdr)
3633 		goto err_reg;
3634 	return 0;
3635 
3636 err_reg:
3637 	if (tbl != ipv4_route_netns_table)
3638 		kfree(tbl);
3639 err_dup:
3640 	return -ENOMEM;
3641 }
3642 
sysctl_route_net_exit(struct net * net)3643 static __net_exit void sysctl_route_net_exit(struct net *net)
3644 {
3645 	struct ctl_table *tbl;
3646 
3647 	tbl = net->ipv4.route_hdr->ctl_table_arg;
3648 	unregister_net_sysctl_table(net->ipv4.route_hdr);
3649 	BUG_ON(tbl == ipv4_route_netns_table);
3650 	kfree(tbl);
3651 }
3652 
3653 static __net_initdata struct pernet_operations sysctl_route_ops = {
3654 	.init = sysctl_route_net_init,
3655 	.exit = sysctl_route_net_exit,
3656 };
3657 #endif
3658 
netns_ip_rt_init(struct net * net)3659 static __net_init int netns_ip_rt_init(struct net *net)
3660 {
3661 	/* Set default value for namespaceified sysctls */
3662 	net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3663 	net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3664 	net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3665 	return 0;
3666 }
3667 
3668 static struct pernet_operations __net_initdata ip_rt_ops = {
3669 	.init = netns_ip_rt_init,
3670 };
3671 
rt_genid_init(struct net * net)3672 static __net_init int rt_genid_init(struct net *net)
3673 {
3674 	atomic_set(&net->ipv4.rt_genid, 0);
3675 	atomic_set(&net->fnhe_genid, 0);
3676 	atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3677 	return 0;
3678 }
3679 
3680 static __net_initdata struct pernet_operations rt_genid_ops = {
3681 	.init = rt_genid_init,
3682 };
3683 
ipv4_inetpeer_init(struct net * net)3684 static int __net_init ipv4_inetpeer_init(struct net *net)
3685 {
3686 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3687 
3688 	if (!bp)
3689 		return -ENOMEM;
3690 	inet_peer_base_init(bp);
3691 	net->ipv4.peers = bp;
3692 	return 0;
3693 }
3694 
ipv4_inetpeer_exit(struct net * net)3695 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3696 {
3697 	struct inet_peer_base *bp = net->ipv4.peers;
3698 
3699 	net->ipv4.peers = NULL;
3700 	inetpeer_invalidate_tree(bp);
3701 	kfree(bp);
3702 }
3703 
3704 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3705 	.init	=	ipv4_inetpeer_init,
3706 	.exit	=	ipv4_inetpeer_exit,
3707 };
3708 
3709 #ifdef CONFIG_IP_ROUTE_CLASSID
3710 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3711 #endif /* CONFIG_IP_ROUTE_CLASSID */
3712 
ip_rt_init(void)3713 int __init ip_rt_init(void)
3714 {
3715 	void *idents_hash;
3716 	int cpu;
3717 
3718 	/* For modern hosts, this will use 2 MB of memory */
3719 	idents_hash = alloc_large_system_hash("IP idents",
3720 					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
3721 					      0,
3722 					      16, /* one bucket per 64 KB */
3723 					      HASH_ZERO,
3724 					      NULL,
3725 					      &ip_idents_mask,
3726 					      2048,
3727 					      256*1024);
3728 
3729 	ip_idents = idents_hash;
3730 
3731 	prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3732 
3733 	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3734 
3735 	for_each_possible_cpu(cpu) {
3736 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3737 
3738 		INIT_LIST_HEAD(&ul->head);
3739 		INIT_LIST_HEAD(&ul->quarantine);
3740 		spin_lock_init(&ul->lock);
3741 	}
3742 #ifdef CONFIG_IP_ROUTE_CLASSID
3743 	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3744 	if (!ip_rt_acct)
3745 		panic("IP: failed to allocate ip_rt_acct\n");
3746 #endif
3747 
3748 	ipv4_dst_ops.kmem_cachep =
3749 		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3750 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3751 
3752 	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3753 
3754 	if (dst_entries_init(&ipv4_dst_ops) < 0)
3755 		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3756 
3757 	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3758 		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3759 
3760 	ipv4_dst_ops.gc_thresh = ~0;
3761 	ip_rt_max_size = INT_MAX;
3762 
3763 	devinet_init();
3764 	ip_fib_init();
3765 
3766 	if (ip_rt_proc_init())
3767 		pr_err("Unable to create route proc files\n");
3768 #ifdef CONFIG_XFRM
3769 	xfrm_init();
3770 	xfrm4_init();
3771 #endif
3772 	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3773 		      RTNL_FLAG_DOIT_UNLOCKED);
3774 
3775 #ifdef CONFIG_SYSCTL
3776 	register_pernet_subsys(&sysctl_route_ops);
3777 #endif
3778 	register_pernet_subsys(&ip_rt_ops);
3779 	register_pernet_subsys(&rt_genid_ops);
3780 	register_pernet_subsys(&ipv4_inetpeer_ops);
3781 	return 0;
3782 }
3783 
3784 #ifdef CONFIG_SYSCTL
3785 /*
3786  * We really need to sanitize the damn ipv4 init order, then all
3787  * this nonsense will go away.
3788  */
ip_static_sysctl_init(void)3789 void __init ip_static_sysctl_init(void)
3790 {
3791 	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3792 }
3793 #endif
3794