1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The User Datagram Protocol (UDP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
12  *		Hirokazu Takahashi, <taka@valinux.co.jp>
13  *
14  * Fixes:
15  *		Alan Cox	:	verify_area() calls
16  *		Alan Cox	: 	stopped close while in use off icmp
17  *					messages. Not a fix but a botch that
18  *					for udp at least is 'valid'.
19  *		Alan Cox	:	Fixed icmp handling properly
20  *		Alan Cox	: 	Correct error for oversized datagrams
21  *		Alan Cox	:	Tidied select() semantics.
22  *		Alan Cox	:	udp_err() fixed properly, also now
23  *					select and read wake correctly on errors
24  *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
25  *		Alan Cox	:	UDP can count its memory
26  *		Alan Cox	:	send to an unknown connection causes
27  *					an ECONNREFUSED off the icmp, but
28  *					does NOT close.
29  *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
30  *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
31  *					bug no longer crashes it.
32  *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
33  *		Alan Cox	:	Uses skb_free_datagram
34  *		Alan Cox	:	Added get/set sockopt support.
35  *		Alan Cox	:	Broadcasting without option set returns EACCES.
36  *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
37  *		Alan Cox	:	Use ip_tos and ip_ttl
38  *		Alan Cox	:	SNMP Mibs
39  *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
40  *		Matt Dillon	:	UDP length checks.
41  *		Alan Cox	:	Smarter af_inet used properly.
42  *		Alan Cox	:	Use new kernel side addressing.
43  *		Alan Cox	:	Incorrect return on truncated datagram receive.
44  *	Arnt Gulbrandsen 	:	New udp_send and stuff
45  *		Alan Cox	:	Cache last socket
46  *		Alan Cox	:	Route cache
47  *		Jon Peatfield	:	Minor efficiency fix to sendto().
48  *		Mike Shaver	:	RFC1122 checks.
49  *		Alan Cox	:	Nonblocking error fix.
50  *	Willy Konynenberg	:	Transparent proxying support.
51  *		Mike McLagan	:	Routing by source
52  *		David S. Miller	:	New socket lookup architecture.
53  *					Last socket cache retained as it
54  *					does have a high hit rate.
55  *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
56  *		Andi Kleen	:	Some cleanups, cache destination entry
57  *					for connect.
58  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
59  *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
60  *					return ENOTCONN for unconnected sockets (POSIX)
61  *		Janos Farkas	:	don't deliver multi/broadcasts to a different
62  *					bound-to-device socket
63  *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
64  *					datagrams.
65  *	Hirokazu Takahashi	:	sendfile() on UDP works now.
66  *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
67  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
68  *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
69  *					a single port at the same time.
70  *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71  *	James Chapman		:	Add L2TP encapsulation type.
72  *
73  *
74  *		This program is free software; you can redistribute it and/or
75  *		modify it under the terms of the GNU General Public License
76  *		as published by the Free Software Foundation; either version
77  *		2 of the License, or (at your option) any later version.
78  */
79 
80 #include <asm/system.h>
81 #include <asm/uaccess.h>
82 #include <asm/ioctls.h>
83 #include <linux/bootmem.h>
84 #include <linux/highmem.h>
85 #include <linux/swap.h>
86 #include <linux/types.h>
87 #include <linux/fcntl.h>
88 #include <linux/module.h>
89 #include <linux/socket.h>
90 #include <linux/sockios.h>
91 #include <linux/igmp.h>
92 #include <linux/in.h>
93 #include <linux/errno.h>
94 #include <linux/timer.h>
95 #include <linux/mm.h>
96 #include <linux/inet.h>
97 #include <linux/netdevice.h>
98 #include <linux/slab.h>
99 #include <net/tcp_states.h>
100 #include <linux/skbuff.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <net/net_namespace.h>
104 #include <net/icmp.h>
105 #include <net/route.h>
106 #include <net/checksum.h>
107 #include <net/xfrm.h>
108 #include "udp_impl.h"
109 
110 struct udp_table udp_table __read_mostly;
111 EXPORT_SYMBOL(udp_table);
112 
113 long sysctl_udp_mem[3] __read_mostly;
114 EXPORT_SYMBOL(sysctl_udp_mem);
115 
116 int sysctl_udp_rmem_min __read_mostly;
117 EXPORT_SYMBOL(sysctl_udp_rmem_min);
118 
119 int sysctl_udp_wmem_min __read_mostly;
120 EXPORT_SYMBOL(sysctl_udp_wmem_min);
121 
122 atomic_long_t udp_memory_allocated;
123 EXPORT_SYMBOL(udp_memory_allocated);
124 
125 #define MAX_UDP_PORTS 65536
126 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
127 
udp_lib_lport_inuse(struct net * net,__u16 num,const struct udp_hslot * hslot,unsigned long * bitmap,struct sock * sk,int (* saddr_comp)(const struct sock * sk1,const struct sock * sk2),unsigned int log)128 static int udp_lib_lport_inuse(struct net *net, __u16 num,
129 			       const struct udp_hslot *hslot,
130 			       unsigned long *bitmap,
131 			       struct sock *sk,
132 			       int (*saddr_comp)(const struct sock *sk1,
133 						 const struct sock *sk2),
134 			       unsigned int log)
135 {
136 	struct sock *sk2;
137 	struct hlist_nulls_node *node;
138 
139 	sk_nulls_for_each(sk2, node, &hslot->head)
140 		if (net_eq(sock_net(sk2), net) &&
141 		    sk2 != sk &&
142 		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
143 		    (!sk2->sk_reuse || !sk->sk_reuse) &&
144 		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
145 		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
146 		    (*saddr_comp)(sk, sk2)) {
147 			if (bitmap)
148 				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
149 					  bitmap);
150 			else
151 				return 1;
152 		}
153 	return 0;
154 }
155 
156 /*
157  * Note: we still hold spinlock of primary hash chain, so no other writer
158  * can insert/delete a socket with local_port == num
159  */
udp_lib_lport_inuse2(struct net * net,__u16 num,struct udp_hslot * hslot2,struct sock * sk,int (* saddr_comp)(const struct sock * sk1,const struct sock * sk2))160 static int udp_lib_lport_inuse2(struct net *net, __u16 num,
161 			       struct udp_hslot *hslot2,
162 			       struct sock *sk,
163 			       int (*saddr_comp)(const struct sock *sk1,
164 						 const struct sock *sk2))
165 {
166 	struct sock *sk2;
167 	struct hlist_nulls_node *node;
168 	int res = 0;
169 
170 	spin_lock(&hslot2->lock);
171 	udp_portaddr_for_each_entry(sk2, node, &hslot2->head)
172 		if (net_eq(sock_net(sk2), net) &&
173 		    sk2 != sk &&
174 		    (udp_sk(sk2)->udp_port_hash == num) &&
175 		    (!sk2->sk_reuse || !sk->sk_reuse) &&
176 		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
177 		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
178 		    (*saddr_comp)(sk, sk2)) {
179 			res = 1;
180 			break;
181 		}
182 	spin_unlock(&hslot2->lock);
183 	return res;
184 }
185 
186 /**
187  *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
188  *
189  *  @sk:          socket struct in question
190  *  @snum:        port number to look up
191  *  @saddr_comp:  AF-dependent comparison of bound local IP addresses
192  *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
193  *                   with NULL address
194  */
udp_lib_get_port(struct sock * sk,unsigned short snum,int (* saddr_comp)(const struct sock * sk1,const struct sock * sk2),unsigned int hash2_nulladdr)195 int udp_lib_get_port(struct sock *sk, unsigned short snum,
196 		       int (*saddr_comp)(const struct sock *sk1,
197 					 const struct sock *sk2),
198 		     unsigned int hash2_nulladdr)
199 {
200 	struct udp_hslot *hslot, *hslot2;
201 	struct udp_table *udptable = sk->sk_prot->h.udp_table;
202 	int    error = 1;
203 	struct net *net = sock_net(sk);
204 
205 	if (!snum) {
206 		int low, high, remaining;
207 		unsigned rand;
208 		unsigned short first, last;
209 		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
210 
211 		inet_get_local_port_range(&low, &high);
212 		remaining = (high - low) + 1;
213 
214 		rand = net_random();
215 		first = (((u64)rand * remaining) >> 32) + low;
216 		/*
217 		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
218 		 */
219 		rand = (rand | 1) * (udptable->mask + 1);
220 		last = first + udptable->mask + 1;
221 		do {
222 			hslot = udp_hashslot(udptable, net, first);
223 			bitmap_zero(bitmap, PORTS_PER_CHAIN);
224 			spin_lock_bh(&hslot->lock);
225 			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
226 					    saddr_comp, udptable->log);
227 
228 			snum = first;
229 			/*
230 			 * Iterate on all possible values of snum for this hash.
231 			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
232 			 * give us randomization and full range coverage.
233 			 */
234 			do {
235 				if (low <= snum && snum <= high &&
236 				    !test_bit(snum >> udptable->log, bitmap) &&
237 				    !inet_is_reserved_local_port(snum))
238 					goto found;
239 				snum += rand;
240 			} while (snum != first);
241 			spin_unlock_bh(&hslot->lock);
242 		} while (++first != last);
243 		goto fail;
244 	} else {
245 		hslot = udp_hashslot(udptable, net, snum);
246 		spin_lock_bh(&hslot->lock);
247 		if (hslot->count > 10) {
248 			int exist;
249 			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
250 
251 			slot2          &= udptable->mask;
252 			hash2_nulladdr &= udptable->mask;
253 
254 			hslot2 = udp_hashslot2(udptable, slot2);
255 			if (hslot->count < hslot2->count)
256 				goto scan_primary_hash;
257 
258 			exist = udp_lib_lport_inuse2(net, snum, hslot2,
259 						     sk, saddr_comp);
260 			if (!exist && (hash2_nulladdr != slot2)) {
261 				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
262 				exist = udp_lib_lport_inuse2(net, snum, hslot2,
263 							     sk, saddr_comp);
264 			}
265 			if (exist)
266 				goto fail_unlock;
267 			else
268 				goto found;
269 		}
270 scan_primary_hash:
271 		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
272 					saddr_comp, 0))
273 			goto fail_unlock;
274 	}
275 found:
276 	inet_sk(sk)->inet_num = snum;
277 	udp_sk(sk)->udp_port_hash = snum;
278 	udp_sk(sk)->udp_portaddr_hash ^= snum;
279 	if (sk_unhashed(sk)) {
280 		sk_nulls_add_node_rcu(sk, &hslot->head);
281 		hslot->count++;
282 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
283 
284 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
285 		spin_lock(&hslot2->lock);
286 		hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
287 					 &hslot2->head);
288 		hslot2->count++;
289 		spin_unlock(&hslot2->lock);
290 	}
291 	error = 0;
292 fail_unlock:
293 	spin_unlock_bh(&hslot->lock);
294 fail:
295 	return error;
296 }
297 EXPORT_SYMBOL(udp_lib_get_port);
298 
ipv4_rcv_saddr_equal(const struct sock * sk1,const struct sock * sk2)299 static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
300 {
301 	struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
302 
303 	return 	(!ipv6_only_sock(sk2)  &&
304 		 (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
305 		   inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
306 }
307 
udp4_portaddr_hash(struct net * net,__be32 saddr,unsigned int port)308 static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
309 				       unsigned int port)
310 {
311 	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
312 }
313 
udp_v4_get_port(struct sock * sk,unsigned short snum)314 int udp_v4_get_port(struct sock *sk, unsigned short snum)
315 {
316 	unsigned int hash2_nulladdr =
317 		udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
318 	unsigned int hash2_partial =
319 		udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
320 
321 	/* precompute partial secondary hash */
322 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
323 	return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
324 }
325 
compute_score(struct sock * sk,struct net * net,__be32 saddr,unsigned short hnum,__be16 sport,__be32 daddr,__be16 dport,int dif)326 static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
327 			 unsigned short hnum,
328 			 __be16 sport, __be32 daddr, __be16 dport, int dif)
329 {
330 	int score = -1;
331 
332 	if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
333 			!ipv6_only_sock(sk)) {
334 		struct inet_sock *inet = inet_sk(sk);
335 
336 		score = (sk->sk_family == PF_INET ? 1 : 0);
337 		if (inet->inet_rcv_saddr) {
338 			if (inet->inet_rcv_saddr != daddr)
339 				return -1;
340 			score += 2;
341 		}
342 		if (inet->inet_daddr) {
343 			if (inet->inet_daddr != saddr)
344 				return -1;
345 			score += 2;
346 		}
347 		if (inet->inet_dport) {
348 			if (inet->inet_dport != sport)
349 				return -1;
350 			score += 2;
351 		}
352 		if (sk->sk_bound_dev_if) {
353 			if (sk->sk_bound_dev_if != dif)
354 				return -1;
355 			score += 2;
356 		}
357 	}
358 	return score;
359 }
360 
361 /*
362  * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
363  */
364 #define SCORE2_MAX (1 + 2 + 2 + 2)
compute_score2(struct sock * sk,struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned int hnum,int dif)365 static inline int compute_score2(struct sock *sk, struct net *net,
366 				 __be32 saddr, __be16 sport,
367 				 __be32 daddr, unsigned int hnum, int dif)
368 {
369 	int score = -1;
370 
371 	if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) {
372 		struct inet_sock *inet = inet_sk(sk);
373 
374 		if (inet->inet_rcv_saddr != daddr)
375 			return -1;
376 		if (inet->inet_num != hnum)
377 			return -1;
378 
379 		score = (sk->sk_family == PF_INET ? 1 : 0);
380 		if (inet->inet_daddr) {
381 			if (inet->inet_daddr != saddr)
382 				return -1;
383 			score += 2;
384 		}
385 		if (inet->inet_dport) {
386 			if (inet->inet_dport != sport)
387 				return -1;
388 			score += 2;
389 		}
390 		if (sk->sk_bound_dev_if) {
391 			if (sk->sk_bound_dev_if != dif)
392 				return -1;
393 			score += 2;
394 		}
395 	}
396 	return score;
397 }
398 
399 
400 /* called with read_rcu_lock() */
udp4_lib_lookup2(struct net * net,__be32 saddr,__be16 sport,__be32 daddr,unsigned int hnum,int dif,struct udp_hslot * hslot2,unsigned int slot2)401 static struct sock *udp4_lib_lookup2(struct net *net,
402 		__be32 saddr, __be16 sport,
403 		__be32 daddr, unsigned int hnum, int dif,
404 		struct udp_hslot *hslot2, unsigned int slot2)
405 {
406 	struct sock *sk, *result;
407 	struct hlist_nulls_node *node;
408 	int score, badness;
409 
410 begin:
411 	result = NULL;
412 	badness = -1;
413 	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
414 		score = compute_score2(sk, net, saddr, sport,
415 				      daddr, hnum, dif);
416 		if (score > badness) {
417 			result = sk;
418 			badness = score;
419 			if (score == SCORE2_MAX)
420 				goto exact_match;
421 		}
422 	}
423 	/*
424 	 * if the nulls value we got at the end of this lookup is
425 	 * not the expected one, we must restart lookup.
426 	 * We probably met an item that was moved to another chain.
427 	 */
428 	if (get_nulls_value(node) != slot2)
429 		goto begin;
430 
431 	if (result) {
432 exact_match:
433 		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
434 			result = NULL;
435 		else if (unlikely(compute_score2(result, net, saddr, sport,
436 				  daddr, hnum, dif) < badness)) {
437 			sock_put(result);
438 			goto begin;
439 		}
440 	}
441 	return result;
442 }
443 
444 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
445  * harder than this. -DaveM
446  */
__udp4_lib_lookup(struct net * net,__be32 saddr,__be16 sport,__be32 daddr,__be16 dport,int dif,struct udp_table * udptable)447 static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
448 		__be16 sport, __be32 daddr, __be16 dport,
449 		int dif, struct udp_table *udptable)
450 {
451 	struct sock *sk, *result;
452 	struct hlist_nulls_node *node;
453 	unsigned short hnum = ntohs(dport);
454 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
455 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
456 	int score, badness;
457 
458 	rcu_read_lock();
459 	if (hslot->count > 10) {
460 		hash2 = udp4_portaddr_hash(net, daddr, hnum);
461 		slot2 = hash2 & udptable->mask;
462 		hslot2 = &udptable->hash2[slot2];
463 		if (hslot->count < hslot2->count)
464 			goto begin;
465 
466 		result = udp4_lib_lookup2(net, saddr, sport,
467 					  daddr, hnum, dif,
468 					  hslot2, slot2);
469 		if (!result) {
470 			hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
471 			slot2 = hash2 & udptable->mask;
472 			hslot2 = &udptable->hash2[slot2];
473 			if (hslot->count < hslot2->count)
474 				goto begin;
475 
476 			result = udp4_lib_lookup2(net, saddr, sport,
477 						  htonl(INADDR_ANY), hnum, dif,
478 						  hslot2, slot2);
479 		}
480 		rcu_read_unlock();
481 		return result;
482 	}
483 begin:
484 	result = NULL;
485 	badness = -1;
486 	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
487 		score = compute_score(sk, net, saddr, hnum, sport,
488 				      daddr, dport, dif);
489 		if (score > badness) {
490 			result = sk;
491 			badness = score;
492 		}
493 	}
494 	/*
495 	 * if the nulls value we got at the end of this lookup is
496 	 * not the expected one, we must restart lookup.
497 	 * We probably met an item that was moved to another chain.
498 	 */
499 	if (get_nulls_value(node) != slot)
500 		goto begin;
501 
502 	if (result) {
503 		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
504 			result = NULL;
505 		else if (unlikely(compute_score(result, net, saddr, hnum, sport,
506 				  daddr, dport, dif) < badness)) {
507 			sock_put(result);
508 			goto begin;
509 		}
510 	}
511 	rcu_read_unlock();
512 	return result;
513 }
514 
__udp4_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)515 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
516 						 __be16 sport, __be16 dport,
517 						 struct udp_table *udptable)
518 {
519 	struct sock *sk;
520 	const struct iphdr *iph = ip_hdr(skb);
521 
522 	if (unlikely(sk = skb_steal_sock(skb)))
523 		return sk;
524 	else
525 		return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
526 					 iph->daddr, dport, inet_iif(skb),
527 					 udptable);
528 }
529 
udp4_lib_lookup(struct net * net,__be32 saddr,__be16 sport,__be32 daddr,__be16 dport,int dif)530 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
531 			     __be32 daddr, __be16 dport, int dif)
532 {
533 	return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
534 }
535 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
536 
udp_v4_mcast_next(struct net * net,struct sock * sk,__be16 loc_port,__be32 loc_addr,__be16 rmt_port,__be32 rmt_addr,int dif)537 static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
538 					     __be16 loc_port, __be32 loc_addr,
539 					     __be16 rmt_port, __be32 rmt_addr,
540 					     int dif)
541 {
542 	struct hlist_nulls_node *node;
543 	struct sock *s = sk;
544 	unsigned short hnum = ntohs(loc_port);
545 
546 	sk_nulls_for_each_from(s, node) {
547 		struct inet_sock *inet = inet_sk(s);
548 
549 		if (!net_eq(sock_net(s), net) ||
550 		    udp_sk(s)->udp_port_hash != hnum ||
551 		    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
552 		    (inet->inet_dport != rmt_port && inet->inet_dport) ||
553 		    (inet->inet_rcv_saddr &&
554 		     inet->inet_rcv_saddr != loc_addr) ||
555 		    ipv6_only_sock(s) ||
556 		    (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
557 			continue;
558 		if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
559 			continue;
560 		goto found;
561 	}
562 	s = NULL;
563 found:
564 	return s;
565 }
566 
567 /*
568  * This routine is called by the ICMP module when it gets some
569  * sort of error condition.  If err < 0 then the socket should
570  * be closed and the error returned to the user.  If err > 0
571  * it's just the icmp type << 8 | icmp code.
572  * Header points to the ip header of the error packet. We move
573  * on past this. Then (as it used to claim before adjustment)
574  * header points to the first 8 bytes of the udp header.  We need
575  * to find the appropriate port.
576  */
577 
__udp4_lib_err(struct sk_buff * skb,u32 info,struct udp_table * udptable)578 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
579 {
580 	struct inet_sock *inet;
581 	struct iphdr *iph = (struct iphdr *)skb->data;
582 	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
583 	const int type = icmp_hdr(skb)->type;
584 	const int code = icmp_hdr(skb)->code;
585 	struct sock *sk;
586 	int harderr;
587 	int err;
588 	struct net *net = dev_net(skb->dev);
589 
590 	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
591 			iph->saddr, uh->source, skb->dev->ifindex, udptable);
592 	if (sk == NULL) {
593 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
594 		return;	/* No socket for error */
595 	}
596 
597 	err = 0;
598 	harderr = 0;
599 	inet = inet_sk(sk);
600 
601 	switch (type) {
602 	default:
603 	case ICMP_TIME_EXCEEDED:
604 		err = EHOSTUNREACH;
605 		break;
606 	case ICMP_SOURCE_QUENCH:
607 		goto out;
608 	case ICMP_PARAMETERPROB:
609 		err = EPROTO;
610 		harderr = 1;
611 		break;
612 	case ICMP_DEST_UNREACH:
613 		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
614 			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
615 				err = EMSGSIZE;
616 				harderr = 1;
617 				break;
618 			}
619 			goto out;
620 		}
621 		err = EHOSTUNREACH;
622 		if (code <= NR_ICMP_UNREACH) {
623 			harderr = icmp_err_convert[code].fatal;
624 			err = icmp_err_convert[code].errno;
625 		}
626 		break;
627 	}
628 
629 	/*
630 	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
631 	 *	4.1.3.3.
632 	 */
633 	if (!inet->recverr) {
634 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
635 			goto out;
636 	} else
637 		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
638 
639 	sk->sk_err = err;
640 	sk->sk_error_report(sk);
641 out:
642 	sock_put(sk);
643 }
644 
udp_err(struct sk_buff * skb,u32 info)645 void udp_err(struct sk_buff *skb, u32 info)
646 {
647 	__udp4_lib_err(skb, info, &udp_table);
648 }
649 
650 /*
651  * Throw away all pending data and cancel the corking. Socket is locked.
652  */
udp_flush_pending_frames(struct sock * sk)653 void udp_flush_pending_frames(struct sock *sk)
654 {
655 	struct udp_sock *up = udp_sk(sk);
656 
657 	if (up->pending) {
658 		up->len = 0;
659 		up->pending = 0;
660 		ip_flush_pending_frames(sk);
661 	}
662 }
663 EXPORT_SYMBOL(udp_flush_pending_frames);
664 
665 /**
666  * 	udp4_hwcsum  -  handle outgoing HW checksumming
667  * 	@skb: 	sk_buff containing the filled-in UDP header
668  * 	        (checksum field must be zeroed out)
669  *	@src:	source IP address
670  *	@dst:	destination IP address
671  */
udp4_hwcsum(struct sk_buff * skb,__be32 src,__be32 dst)672 static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
673 {
674 	struct udphdr *uh = udp_hdr(skb);
675 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
676 	int offset = skb_transport_offset(skb);
677 	int len = skb->len - offset;
678 	int hlen = len;
679 	__wsum csum = 0;
680 
681 	if (!frags) {
682 		/*
683 		 * Only one fragment on the socket.
684 		 */
685 		skb->csum_start = skb_transport_header(skb) - skb->head;
686 		skb->csum_offset = offsetof(struct udphdr, check);
687 		uh->check = ~csum_tcpudp_magic(src, dst, len,
688 					       IPPROTO_UDP, 0);
689 	} else {
690 		/*
691 		 * HW-checksum won't work as there are two or more
692 		 * fragments on the socket so that all csums of sk_buffs
693 		 * should be together
694 		 */
695 		do {
696 			csum = csum_add(csum, frags->csum);
697 			hlen -= frags->len;
698 		} while ((frags = frags->next));
699 
700 		csum = skb_checksum(skb, offset, hlen, csum);
701 		skb->ip_summed = CHECKSUM_NONE;
702 
703 		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
704 		if (uh->check == 0)
705 			uh->check = CSUM_MANGLED_0;
706 	}
707 }
708 
udp_send_skb(struct sk_buff * skb,__be32 daddr,__be32 dport)709 static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
710 {
711 	struct sock *sk = skb->sk;
712 	struct inet_sock *inet = inet_sk(sk);
713 	struct udphdr *uh;
714 	struct rtable *rt = (struct rtable *)skb_dst(skb);
715 	int err = 0;
716 	int is_udplite = IS_UDPLITE(sk);
717 	int offset = skb_transport_offset(skb);
718 	int len = skb->len - offset;
719 	__wsum csum = 0;
720 
721 	/*
722 	 * Create a UDP header
723 	 */
724 	uh = udp_hdr(skb);
725 	uh->source = inet->inet_sport;
726 	uh->dest = dport;
727 	uh->len = htons(len);
728 	uh->check = 0;
729 
730 	if (is_udplite)  				 /*     UDP-Lite      */
731 		csum = udplite_csum(skb);
732 
733 	else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
734 
735 		skb->ip_summed = CHECKSUM_NONE;
736 		goto send;
737 
738 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
739 
740 		udp4_hwcsum(skb, rt->rt_src, daddr);
741 		goto send;
742 
743 	} else
744 		csum = udp_csum(skb);
745 
746 	/* add protocol-dependent pseudo-header */
747 	uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len,
748 				      sk->sk_protocol, csum);
749 	if (uh->check == 0)
750 		uh->check = CSUM_MANGLED_0;
751 
752 send:
753 	err = ip_send_skb(skb);
754 	if (err) {
755 		if (err == -ENOBUFS && !inet->recverr) {
756 			UDP_INC_STATS_USER(sock_net(sk),
757 					   UDP_MIB_SNDBUFERRORS, is_udplite);
758 			err = 0;
759 		}
760 	} else
761 		UDP_INC_STATS_USER(sock_net(sk),
762 				   UDP_MIB_OUTDATAGRAMS, is_udplite);
763 	return err;
764 }
765 
766 /*
767  * Push out all pending data as one UDP datagram. Socket is locked.
768  */
udp_push_pending_frames(struct sock * sk)769 static int udp_push_pending_frames(struct sock *sk)
770 {
771 	struct udp_sock  *up = udp_sk(sk);
772 	struct inet_sock *inet = inet_sk(sk);
773 	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
774 	struct sk_buff *skb;
775 	int err = 0;
776 
777 	skb = ip_finish_skb(sk);
778 	if (!skb)
779 		goto out;
780 
781 	err = udp_send_skb(skb, fl4->daddr, fl4->fl4_dport);
782 
783 out:
784 	up->len = 0;
785 	up->pending = 0;
786 	return err;
787 }
788 
udp_sendmsg(struct kiocb * iocb,struct sock * sk,struct msghdr * msg,size_t len)789 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
790 		size_t len)
791 {
792 	struct inet_sock *inet = inet_sk(sk);
793 	struct udp_sock *up = udp_sk(sk);
794 	struct flowi4 *fl4;
795 	int ulen = len;
796 	struct ipcm_cookie ipc;
797 	struct rtable *rt = NULL;
798 	int free = 0;
799 	int connected = 0;
800 	__be32 daddr, faddr, saddr;
801 	__be16 dport;
802 	u8  tos;
803 	int err, is_udplite = IS_UDPLITE(sk);
804 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
805 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
806 	struct sk_buff *skb;
807 
808 	if (len > 0xFFFF)
809 		return -EMSGSIZE;
810 
811 	/*
812 	 *	Check the flags.
813 	 */
814 
815 	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
816 		return -EOPNOTSUPP;
817 
818 	ipc.opt = NULL;
819 	ipc.tx_flags = 0;
820 
821 	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
822 
823 	if (up->pending) {
824 		/*
825 		 * There are pending frames.
826 		 * The socket lock must be held while it's corked.
827 		 */
828 		lock_sock(sk);
829 		if (likely(up->pending)) {
830 			if (unlikely(up->pending != AF_INET)) {
831 				release_sock(sk);
832 				return -EINVAL;
833 			}
834 			goto do_append_data;
835 		}
836 		release_sock(sk);
837 	}
838 	ulen += sizeof(struct udphdr);
839 
840 	/*
841 	 *	Get and verify the address.
842 	 */
843 	if (msg->msg_name) {
844 		struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
845 		if (msg->msg_namelen < sizeof(*usin))
846 			return -EINVAL;
847 		if (usin->sin_family != AF_INET) {
848 			if (usin->sin_family != AF_UNSPEC)
849 				return -EAFNOSUPPORT;
850 		}
851 
852 		daddr = usin->sin_addr.s_addr;
853 		dport = usin->sin_port;
854 		if (dport == 0)
855 			return -EINVAL;
856 	} else {
857 		if (sk->sk_state != TCP_ESTABLISHED)
858 			return -EDESTADDRREQ;
859 		daddr = inet->inet_daddr;
860 		dport = inet->inet_dport;
861 		/* Open fast path for connected socket.
862 		   Route will not be used, if at least one option is set.
863 		 */
864 		connected = 1;
865 	}
866 	ipc.addr = inet->inet_saddr;
867 
868 	ipc.oif = sk->sk_bound_dev_if;
869 	err = sock_tx_timestamp(sk, &ipc.tx_flags);
870 	if (err)
871 		return err;
872 	if (msg->msg_controllen) {
873 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
874 		if (err)
875 			return err;
876 		if (ipc.opt)
877 			free = 1;
878 		connected = 0;
879 	}
880 	if (!ipc.opt)
881 		ipc.opt = inet->opt;
882 
883 	saddr = ipc.addr;
884 	ipc.addr = faddr = daddr;
885 
886 	if (ipc.opt && ipc.opt->srr) {
887 		if (!daddr)
888 			return -EINVAL;
889 		faddr = ipc.opt->faddr;
890 		connected = 0;
891 	}
892 	tos = RT_TOS(inet->tos);
893 	if (sock_flag(sk, SOCK_LOCALROUTE) ||
894 	    (msg->msg_flags & MSG_DONTROUTE) ||
895 	    (ipc.opt && ipc.opt->is_strictroute)) {
896 		tos |= RTO_ONLINK;
897 		connected = 0;
898 	}
899 
900 	if (ipv4_is_multicast(daddr)) {
901 		if (!ipc.oif)
902 			ipc.oif = inet->mc_index;
903 		if (!saddr)
904 			saddr = inet->mc_addr;
905 		connected = 0;
906 	}
907 
908 	if (connected)
909 		rt = (struct rtable *)sk_dst_check(sk, 0);
910 
911 	if (rt == NULL) {
912 		struct flowi4 fl4 = {
913 			.flowi4_oif = ipc.oif,
914 			.flowi4_mark = sk->sk_mark,
915 			.daddr = faddr,
916 			.saddr = saddr,
917 			.flowi4_tos = tos,
918 			.flowi4_proto = sk->sk_protocol,
919 			.flowi4_flags = (inet_sk_flowi_flags(sk) |
920 					 FLOWI_FLAG_CAN_SLEEP),
921 			.fl4_sport = inet->inet_sport,
922 			.fl4_dport = dport,
923 		};
924 		struct net *net = sock_net(sk);
925 
926 		security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
927 		rt = ip_route_output_flow(net, &fl4, sk);
928 		if (IS_ERR(rt)) {
929 			err = PTR_ERR(rt);
930 			rt = NULL;
931 			if (err == -ENETUNREACH)
932 				IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
933 			goto out;
934 		}
935 
936 		err = -EACCES;
937 		if ((rt->rt_flags & RTCF_BROADCAST) &&
938 		    !sock_flag(sk, SOCK_BROADCAST))
939 			goto out;
940 		if (connected)
941 			sk_dst_set(sk, dst_clone(&rt->dst));
942 	}
943 
944 	if (msg->msg_flags&MSG_CONFIRM)
945 		goto do_confirm;
946 back_from_confirm:
947 
948 	saddr = rt->rt_src;
949 	if (!ipc.addr)
950 		daddr = ipc.addr = rt->rt_dst;
951 
952 	/* Lockless fast path for the non-corking case. */
953 	if (!corkreq) {
954 		skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
955 				  sizeof(struct udphdr), &ipc, &rt,
956 				  msg->msg_flags);
957 		err = PTR_ERR(skb);
958 		if (skb && !IS_ERR(skb))
959 			err = udp_send_skb(skb, daddr, dport);
960 		goto out;
961 	}
962 
963 	lock_sock(sk);
964 	if (unlikely(up->pending)) {
965 		/* The socket is already corked while preparing it. */
966 		/* ... which is an evident application bug. --ANK */
967 		release_sock(sk);
968 
969 		LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
970 		err = -EINVAL;
971 		goto out;
972 	}
973 	/*
974 	 *	Now cork the socket to pend data.
975 	 */
976 	fl4 = &inet->cork.fl.u.ip4;
977 	fl4->daddr = daddr;
978 	fl4->saddr = saddr;
979 	fl4->fl4_dport = dport;
980 	fl4->fl4_sport = inet->inet_sport;
981 	up->pending = AF_INET;
982 
983 do_append_data:
984 	up->len += ulen;
985 	err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
986 			sizeof(struct udphdr), &ipc, &rt,
987 			corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
988 	if (err)
989 		udp_flush_pending_frames(sk);
990 	else if (!corkreq)
991 		err = udp_push_pending_frames(sk);
992 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
993 		up->pending = 0;
994 	release_sock(sk);
995 
996 out:
997 	ip_rt_put(rt);
998 	if (free)
999 		kfree(ipc.opt);
1000 	if (!err)
1001 		return len;
1002 	/*
1003 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1004 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1005 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1006 	 * things).  We could add another new stat but at least for now that
1007 	 * seems like overkill.
1008 	 */
1009 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1010 		UDP_INC_STATS_USER(sock_net(sk),
1011 				UDP_MIB_SNDBUFERRORS, is_udplite);
1012 	}
1013 	return err;
1014 
1015 do_confirm:
1016 	dst_confirm(&rt->dst);
1017 	if (!(msg->msg_flags&MSG_PROBE) || len)
1018 		goto back_from_confirm;
1019 	err = 0;
1020 	goto out;
1021 }
1022 EXPORT_SYMBOL(udp_sendmsg);
1023 
udp_sendpage(struct sock * sk,struct page * page,int offset,size_t size,int flags)1024 int udp_sendpage(struct sock *sk, struct page *page, int offset,
1025 		 size_t size, int flags)
1026 {
1027 	struct udp_sock *up = udp_sk(sk);
1028 	int ret;
1029 
1030 	if (!up->pending) {
1031 		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };
1032 
1033 		/* Call udp_sendmsg to specify destination address which
1034 		 * sendpage interface can't pass.
1035 		 * This will succeed only when the socket is connected.
1036 		 */
1037 		ret = udp_sendmsg(NULL, sk, &msg, 0);
1038 		if (ret < 0)
1039 			return ret;
1040 	}
1041 
1042 	lock_sock(sk);
1043 
1044 	if (unlikely(!up->pending)) {
1045 		release_sock(sk);
1046 
1047 		LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	ret = ip_append_page(sk, page, offset, size, flags);
1052 	if (ret == -EOPNOTSUPP) {
1053 		release_sock(sk);
1054 		return sock_no_sendpage(sk->sk_socket, page, offset,
1055 					size, flags);
1056 	}
1057 	if (ret < 0) {
1058 		udp_flush_pending_frames(sk);
1059 		goto out;
1060 	}
1061 
1062 	up->len += size;
1063 	if (!(up->corkflag || (flags&MSG_MORE)))
1064 		ret = udp_push_pending_frames(sk);
1065 	if (!ret)
1066 		ret = size;
1067 out:
1068 	release_sock(sk);
1069 	return ret;
1070 }
1071 
1072 
1073 /**
1074  *	first_packet_length	- return length of first packet in receive queue
1075  *	@sk: socket
1076  *
1077  *	Drops all bad checksum frames, until a valid one is found.
1078  *	Returns the length of found skb, or 0 if none is found.
1079  */
first_packet_length(struct sock * sk)1080 static unsigned int first_packet_length(struct sock *sk)
1081 {
1082 	struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
1083 	struct sk_buff *skb;
1084 	unsigned int res;
1085 
1086 	__skb_queue_head_init(&list_kill);
1087 
1088 	spin_lock_bh(&rcvq->lock);
1089 	while ((skb = skb_peek(rcvq)) != NULL &&
1090 		udp_lib_checksum_complete(skb)) {
1091 		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1092 				 IS_UDPLITE(sk));
1093 		atomic_inc(&sk->sk_drops);
1094 		__skb_unlink(skb, rcvq);
1095 		__skb_queue_tail(&list_kill, skb);
1096 	}
1097 	res = skb ? skb->len : 0;
1098 	spin_unlock_bh(&rcvq->lock);
1099 
1100 	if (!skb_queue_empty(&list_kill)) {
1101 		bool slow = lock_sock_fast(sk);
1102 
1103 		__skb_queue_purge(&list_kill);
1104 		sk_mem_reclaim_partial(sk);
1105 		unlock_sock_fast(sk, slow);
1106 	}
1107 	return res;
1108 }
1109 
1110 /*
1111  *	IOCTL requests applicable to the UDP protocol
1112  */
1113 
udp_ioctl(struct sock * sk,int cmd,unsigned long arg)1114 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1115 {
1116 	switch (cmd) {
1117 	case SIOCOUTQ:
1118 	{
1119 		int amount = sk_wmem_alloc_get(sk);
1120 
1121 		return put_user(amount, (int __user *)arg);
1122 	}
1123 
1124 	case SIOCINQ:
1125 	{
1126 		unsigned int amount = first_packet_length(sk);
1127 
1128 		if (amount)
1129 			/*
1130 			 * We will only return the amount
1131 			 * of this packet since that is all
1132 			 * that will be read.
1133 			 */
1134 			amount -= sizeof(struct udphdr);
1135 
1136 		return put_user(amount, (int __user *)arg);
1137 	}
1138 
1139 	default:
1140 		return -ENOIOCTLCMD;
1141 	}
1142 
1143 	return 0;
1144 }
1145 EXPORT_SYMBOL(udp_ioctl);
1146 
1147 /*
1148  * 	This should be easy, if there is something there we
1149  * 	return it, otherwise we block.
1150  */
1151 
udp_recvmsg(struct kiocb * iocb,struct sock * sk,struct msghdr * msg,size_t len,int noblock,int flags,int * addr_len)1152 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1153 		size_t len, int noblock, int flags, int *addr_len)
1154 {
1155 	struct inet_sock *inet = inet_sk(sk);
1156 	struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1157 	struct sk_buff *skb;
1158 	unsigned int ulen;
1159 	int peeked;
1160 	int err;
1161 	int is_udplite = IS_UDPLITE(sk);
1162 	bool slow;
1163 
1164 	/*
1165 	 *	Check any passed addresses
1166 	 */
1167 	if (addr_len)
1168 		*addr_len = sizeof(*sin);
1169 
1170 	if (flags & MSG_ERRQUEUE)
1171 		return ip_recv_error(sk, msg, len);
1172 
1173 try_again:
1174 	skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
1175 				  &peeked, &err);
1176 	if (!skb)
1177 		goto out;
1178 
1179 	ulen = skb->len - sizeof(struct udphdr);
1180 	if (len > ulen)
1181 		len = ulen;
1182 	else if (len < ulen)
1183 		msg->msg_flags |= MSG_TRUNC;
1184 
1185 	/*
1186 	 * If checksum is needed at all, try to do it while copying the
1187 	 * data.  If the data is truncated, or if we only want a partial
1188 	 * coverage checksum (UDP-Lite), do it before the copy.
1189 	 */
1190 
1191 	if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
1192 		if (udp_lib_checksum_complete(skb))
1193 			goto csum_copy_err;
1194 	}
1195 
1196 	if (skb_csum_unnecessary(skb))
1197 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1198 					      msg->msg_iov, len);
1199 	else {
1200 		err = skb_copy_and_csum_datagram_iovec(skb,
1201 						       sizeof(struct udphdr),
1202 						       msg->msg_iov);
1203 
1204 		if (err == -EINVAL)
1205 			goto csum_copy_err;
1206 	}
1207 
1208 	if (err)
1209 		goto out_free;
1210 
1211 	if (!peeked)
1212 		UDP_INC_STATS_USER(sock_net(sk),
1213 				UDP_MIB_INDATAGRAMS, is_udplite);
1214 
1215 	sock_recv_ts_and_drops(msg, sk, skb);
1216 
1217 	/* Copy the address. */
1218 	if (sin) {
1219 		sin->sin_family = AF_INET;
1220 		sin->sin_port = udp_hdr(skb)->source;
1221 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1222 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1223 	}
1224 	if (inet->cmsg_flags)
1225 		ip_cmsg_recv(msg, skb);
1226 
1227 	err = len;
1228 	if (flags & MSG_TRUNC)
1229 		err = ulen;
1230 
1231 out_free:
1232 	skb_free_datagram_locked(sk, skb);
1233 out:
1234 	return err;
1235 
1236 csum_copy_err:
1237 	slow = lock_sock_fast(sk);
1238 	if (!skb_kill_datagram(sk, skb, flags))
1239 		UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1240 	unlock_sock_fast(sk, slow);
1241 
1242 	if (noblock)
1243 		return -EAGAIN;
1244 	goto try_again;
1245 }
1246 
1247 
udp_disconnect(struct sock * sk,int flags)1248 int udp_disconnect(struct sock *sk, int flags)
1249 {
1250 	struct inet_sock *inet = inet_sk(sk);
1251 	/*
1252 	 *	1003.1g - break association.
1253 	 */
1254 
1255 	sk->sk_state = TCP_CLOSE;
1256 	inet->inet_daddr = 0;
1257 	inet->inet_dport = 0;
1258 	sock_rps_save_rxhash(sk, 0);
1259 	sk->sk_bound_dev_if = 0;
1260 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1261 		inet_reset_saddr(sk);
1262 
1263 	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1264 		sk->sk_prot->unhash(sk);
1265 		inet->inet_sport = 0;
1266 	}
1267 	sk_dst_reset(sk);
1268 	return 0;
1269 }
1270 EXPORT_SYMBOL(udp_disconnect);
1271 
udp_lib_unhash(struct sock * sk)1272 void udp_lib_unhash(struct sock *sk)
1273 {
1274 	if (sk_hashed(sk)) {
1275 		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1276 		struct udp_hslot *hslot, *hslot2;
1277 
1278 		hslot  = udp_hashslot(udptable, sock_net(sk),
1279 				      udp_sk(sk)->udp_port_hash);
1280 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1281 
1282 		spin_lock_bh(&hslot->lock);
1283 		if (sk_nulls_del_node_init_rcu(sk)) {
1284 			hslot->count--;
1285 			inet_sk(sk)->inet_num = 0;
1286 			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1287 
1288 			spin_lock(&hslot2->lock);
1289 			hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1290 			hslot2->count--;
1291 			spin_unlock(&hslot2->lock);
1292 		}
1293 		spin_unlock_bh(&hslot->lock);
1294 	}
1295 }
1296 EXPORT_SYMBOL(udp_lib_unhash);
1297 
1298 /*
1299  * inet_rcv_saddr was changed, we must rehash secondary hash
1300  */
udp_lib_rehash(struct sock * sk,u16 newhash)1301 void udp_lib_rehash(struct sock *sk, u16 newhash)
1302 {
1303 	if (sk_hashed(sk)) {
1304 		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1305 		struct udp_hslot *hslot, *hslot2, *nhslot2;
1306 
1307 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1308 		nhslot2 = udp_hashslot2(udptable, newhash);
1309 		udp_sk(sk)->udp_portaddr_hash = newhash;
1310 		if (hslot2 != nhslot2) {
1311 			hslot = udp_hashslot(udptable, sock_net(sk),
1312 					     udp_sk(sk)->udp_port_hash);
1313 			/* we must lock primary chain too */
1314 			spin_lock_bh(&hslot->lock);
1315 
1316 			spin_lock(&hslot2->lock);
1317 			hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1318 			hslot2->count--;
1319 			spin_unlock(&hslot2->lock);
1320 
1321 			spin_lock(&nhslot2->lock);
1322 			hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1323 						 &nhslot2->head);
1324 			nhslot2->count++;
1325 			spin_unlock(&nhslot2->lock);
1326 
1327 			spin_unlock_bh(&hslot->lock);
1328 		}
1329 	}
1330 }
1331 EXPORT_SYMBOL(udp_lib_rehash);
1332 
udp_v4_rehash(struct sock * sk)1333 static void udp_v4_rehash(struct sock *sk)
1334 {
1335 	u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1336 					  inet_sk(sk)->inet_rcv_saddr,
1337 					  inet_sk(sk)->inet_num);
1338 	udp_lib_rehash(sk, new_hash);
1339 }
1340 
__udp_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)1341 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1342 {
1343 	int rc;
1344 
1345 	if (inet_sk(sk)->inet_daddr)
1346 		sock_rps_save_rxhash(sk, skb->rxhash);
1347 
1348 	rc = ip_queue_rcv_skb(sk, skb);
1349 	if (rc < 0) {
1350 		int is_udplite = IS_UDPLITE(sk);
1351 
1352 		/* Note that an ENOMEM error is charged twice */
1353 		if (rc == -ENOMEM)
1354 			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1355 					 is_udplite);
1356 		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1357 		kfree_skb(skb);
1358 		return -1;
1359 	}
1360 
1361 	return 0;
1362 
1363 }
1364 
1365 /* returns:
1366  *  -1: error
1367  *   0: success
1368  *  >0: "udp encap" protocol resubmission
1369  *
1370  * Note that in the success and error cases, the skb is assumed to
1371  * have either been requeued or freed.
1372  */
udp_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)1373 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1374 {
1375 	struct udp_sock *up = udp_sk(sk);
1376 	int rc;
1377 	int is_udplite = IS_UDPLITE(sk);
1378 
1379 	/*
1380 	 *	Charge it to the socket, dropping if the queue is full.
1381 	 */
1382 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1383 		goto drop;
1384 	nf_reset(skb);
1385 
1386 	if (up->encap_type) {
1387 		/*
1388 		 * This is an encapsulation socket so pass the skb to
1389 		 * the socket's udp_encap_rcv() hook. Otherwise, just
1390 		 * fall through and pass this up the UDP socket.
1391 		 * up->encap_rcv() returns the following value:
1392 		 * =0 if skb was successfully passed to the encap
1393 		 *    handler or was discarded by it.
1394 		 * >0 if skb should be passed on to UDP.
1395 		 * <0 if skb should be resubmitted as proto -N
1396 		 */
1397 
1398 		/* if we're overly short, let UDP handle it */
1399 		if (skb->len > sizeof(struct udphdr) &&
1400 		    up->encap_rcv != NULL) {
1401 			int ret;
1402 
1403 			ret = (*up->encap_rcv)(sk, skb);
1404 			if (ret <= 0) {
1405 				UDP_INC_STATS_BH(sock_net(sk),
1406 						 UDP_MIB_INDATAGRAMS,
1407 						 is_udplite);
1408 				return -ret;
1409 			}
1410 		}
1411 
1412 		/* FALLTHROUGH -- it's a UDP Packet */
1413 	}
1414 
1415 	/*
1416 	 * 	UDP-Lite specific tests, ignored on UDP sockets
1417 	 */
1418 	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
1419 
1420 		/*
1421 		 * MIB statistics other than incrementing the error count are
1422 		 * disabled for the following two types of errors: these depend
1423 		 * on the application settings, not on the functioning of the
1424 		 * protocol stack as such.
1425 		 *
1426 		 * RFC 3828 here recommends (sec 3.3): "There should also be a
1427 		 * way ... to ... at least let the receiving application block
1428 		 * delivery of packets with coverage values less than a value
1429 		 * provided by the application."
1430 		 */
1431 		if (up->pcrlen == 0) {          /* full coverage was set  */
1432 			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
1433 				"%d while full coverage %d requested\n",
1434 				UDP_SKB_CB(skb)->cscov, skb->len);
1435 			goto drop;
1436 		}
1437 		/* The next case involves violating the min. coverage requested
1438 		 * by the receiver. This is subtle: if receiver wants x and x is
1439 		 * greater than the buffersize/MTU then receiver will complain
1440 		 * that it wants x while sender emits packets of smaller size y.
1441 		 * Therefore the above ...()->partial_cov statement is essential.
1442 		 */
1443 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
1444 			LIMIT_NETDEBUG(KERN_WARNING
1445 				"UDPLITE: coverage %d too small, need min %d\n",
1446 				UDP_SKB_CB(skb)->cscov, up->pcrlen);
1447 			goto drop;
1448 		}
1449 	}
1450 
1451 	if (rcu_dereference_raw(sk->sk_filter)) {
1452 		if (udp_lib_checksum_complete(skb))
1453 			goto drop;
1454 	}
1455 
1456 
1457 	if (sk_rcvqueues_full(sk, skb))
1458 		goto drop;
1459 
1460 	rc = 0;
1461 
1462 	bh_lock_sock(sk);
1463 	if (!sock_owned_by_user(sk))
1464 		rc = __udp_queue_rcv_skb(sk, skb);
1465 	else if (sk_add_backlog(sk, skb)) {
1466 		bh_unlock_sock(sk);
1467 		goto drop;
1468 	}
1469 	bh_unlock_sock(sk);
1470 
1471 	return rc;
1472 
1473 drop:
1474 	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1475 	atomic_inc(&sk->sk_drops);
1476 	kfree_skb(skb);
1477 	return -1;
1478 }
1479 
1480 
flush_stack(struct sock ** stack,unsigned int count,struct sk_buff * skb,unsigned int final)1481 static void flush_stack(struct sock **stack, unsigned int count,
1482 			struct sk_buff *skb, unsigned int final)
1483 {
1484 	unsigned int i;
1485 	struct sk_buff *skb1 = NULL;
1486 	struct sock *sk;
1487 
1488 	for (i = 0; i < count; i++) {
1489 		sk = stack[i];
1490 		if (likely(skb1 == NULL))
1491 			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1492 
1493 		if (!skb1) {
1494 			atomic_inc(&sk->sk_drops);
1495 			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1496 					 IS_UDPLITE(sk));
1497 			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1498 					 IS_UDPLITE(sk));
1499 		}
1500 
1501 		if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
1502 			skb1 = NULL;
1503 	}
1504 	if (unlikely(skb1))
1505 		kfree_skb(skb1);
1506 }
1507 
1508 /*
1509  *	Multicasts and broadcasts go to each listener.
1510  *
1511  *	Note: called only from the BH handler context.
1512  */
__udp4_lib_mcast_deliver(struct net * net,struct sk_buff * skb,struct udphdr * uh,__be32 saddr,__be32 daddr,struct udp_table * udptable)1513 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1514 				    struct udphdr  *uh,
1515 				    __be32 saddr, __be32 daddr,
1516 				    struct udp_table *udptable)
1517 {
1518 	struct sock *sk, *stack[256 / sizeof(struct sock *)];
1519 	struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
1520 	int dif;
1521 	unsigned int i, count = 0;
1522 
1523 	spin_lock(&hslot->lock);
1524 	sk = sk_nulls_head(&hslot->head);
1525 	dif = skb->dev->ifindex;
1526 	sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
1527 	while (sk) {
1528 		stack[count++] = sk;
1529 		sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
1530 				       daddr, uh->source, saddr, dif);
1531 		if (unlikely(count == ARRAY_SIZE(stack))) {
1532 			if (!sk)
1533 				break;
1534 			flush_stack(stack, count, skb, ~0);
1535 			count = 0;
1536 		}
1537 	}
1538 	/*
1539 	 * before releasing chain lock, we must take a reference on sockets
1540 	 */
1541 	for (i = 0; i < count; i++)
1542 		sock_hold(stack[i]);
1543 
1544 	spin_unlock(&hslot->lock);
1545 
1546 	/*
1547 	 * do the slow work with no lock held
1548 	 */
1549 	if (count) {
1550 		flush_stack(stack, count, skb, count - 1);
1551 
1552 		for (i = 0; i < count; i++)
1553 			sock_put(stack[i]);
1554 	} else {
1555 		kfree_skb(skb);
1556 	}
1557 	return 0;
1558 }
1559 
1560 /* Initialize UDP checksum. If exited with zero value (success),
1561  * CHECKSUM_UNNECESSARY means, that no more checks are required.
1562  * Otherwise, csum completion requires chacksumming packet body,
1563  * including udp header and folding it to skb->csum.
1564  */
udp4_csum_init(struct sk_buff * skb,struct udphdr * uh,int proto)1565 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1566 				 int proto)
1567 {
1568 	const struct iphdr *iph;
1569 	int err;
1570 
1571 	UDP_SKB_CB(skb)->partial_cov = 0;
1572 	UDP_SKB_CB(skb)->cscov = skb->len;
1573 
1574 	if (proto == IPPROTO_UDPLITE) {
1575 		err = udplite_checksum_init(skb, uh);
1576 		if (err)
1577 			return err;
1578 	}
1579 
1580 	iph = ip_hdr(skb);
1581 	if (uh->check == 0) {
1582 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 	} else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1584 		if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1585 				      proto, skb->csum))
1586 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1587 	}
1588 	if (!skb_csum_unnecessary(skb))
1589 		skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1590 					       skb->len, proto, 0);
1591 	/* Probably, we should checksum udp header (it should be in cache
1592 	 * in any case) and data in tiny packets (< rx copybreak).
1593 	 */
1594 
1595 	return 0;
1596 }
1597 
1598 /*
1599  *	All we need to do is get the socket, and then do a checksum.
1600  */
1601 
__udp4_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)1602 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1603 		   int proto)
1604 {
1605 	struct sock *sk;
1606 	struct udphdr *uh;
1607 	unsigned short ulen;
1608 	struct rtable *rt = skb_rtable(skb);
1609 	__be32 saddr, daddr;
1610 	struct net *net = dev_net(skb->dev);
1611 
1612 	/*
1613 	 *  Validate the packet.
1614 	 */
1615 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1616 		goto drop;		/* No space for header. */
1617 
1618 	uh   = udp_hdr(skb);
1619 	ulen = ntohs(uh->len);
1620 	saddr = ip_hdr(skb)->saddr;
1621 	daddr = ip_hdr(skb)->daddr;
1622 
1623 	if (ulen > skb->len)
1624 		goto short_packet;
1625 
1626 	if (proto == IPPROTO_UDP) {
1627 		/* UDP validates ulen. */
1628 		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1629 			goto short_packet;
1630 		uh = udp_hdr(skb);
1631 	}
1632 
1633 	if (udp4_csum_init(skb, uh, proto))
1634 		goto csum_error;
1635 
1636 	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1637 		return __udp4_lib_mcast_deliver(net, skb, uh,
1638 				saddr, daddr, udptable);
1639 
1640 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1641 
1642 	if (sk != NULL) {
1643 		int ret = udp_queue_rcv_skb(sk, skb);
1644 		sock_put(sk);
1645 
1646 		/* a return value > 0 means to resubmit the input, but
1647 		 * it wants the return to be -protocol, or 0
1648 		 */
1649 		if (ret > 0)
1650 			return -ret;
1651 		return 0;
1652 	}
1653 
1654 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1655 		goto drop;
1656 	nf_reset(skb);
1657 
1658 	/* No socket. Drop packet silently, if checksum is wrong */
1659 	if (udp_lib_checksum_complete(skb))
1660 		goto csum_error;
1661 
1662 	UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1663 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1664 
1665 	/*
1666 	 * Hmm.  We got an UDP packet to a port to which we
1667 	 * don't wanna listen.  Ignore it.
1668 	 */
1669 	kfree_skb(skb);
1670 	return 0;
1671 
1672 short_packet:
1673 	LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1674 		       proto == IPPROTO_UDPLITE ? "-Lite" : "",
1675 		       &saddr,
1676 		       ntohs(uh->source),
1677 		       ulen,
1678 		       skb->len,
1679 		       &daddr,
1680 		       ntohs(uh->dest));
1681 	goto drop;
1682 
1683 csum_error:
1684 	/*
1685 	 * RFC1122: OK.  Discards the bad packet silently (as far as
1686 	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1687 	 */
1688 	LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1689 		       proto == IPPROTO_UDPLITE ? "-Lite" : "",
1690 		       &saddr,
1691 		       ntohs(uh->source),
1692 		       &daddr,
1693 		       ntohs(uh->dest),
1694 		       ulen);
1695 drop:
1696 	UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1697 	kfree_skb(skb);
1698 	return 0;
1699 }
1700 
udp_rcv(struct sk_buff * skb)1701 int udp_rcv(struct sk_buff *skb)
1702 {
1703 	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1704 }
1705 
udp_destroy_sock(struct sock * sk)1706 void udp_destroy_sock(struct sock *sk)
1707 {
1708 	bool slow = lock_sock_fast(sk);
1709 	udp_flush_pending_frames(sk);
1710 	unlock_sock_fast(sk, slow);
1711 }
1712 
1713 /*
1714  *	Socket option code for UDP
1715  */
udp_lib_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen,int (* push_pending_frames)(struct sock *))1716 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1717 		       char __user *optval, unsigned int optlen,
1718 		       int (*push_pending_frames)(struct sock *))
1719 {
1720 	struct udp_sock *up = udp_sk(sk);
1721 	int val;
1722 	int err = 0;
1723 	int is_udplite = IS_UDPLITE(sk);
1724 
1725 	if (optlen < sizeof(int))
1726 		return -EINVAL;
1727 
1728 	if (get_user(val, (int __user *)optval))
1729 		return -EFAULT;
1730 
1731 	switch (optname) {
1732 	case UDP_CORK:
1733 		if (val != 0) {
1734 			up->corkflag = 1;
1735 		} else {
1736 			up->corkflag = 0;
1737 			lock_sock(sk);
1738 			(*push_pending_frames)(sk);
1739 			release_sock(sk);
1740 		}
1741 		break;
1742 
1743 	case UDP_ENCAP:
1744 		switch (val) {
1745 		case 0:
1746 		case UDP_ENCAP_ESPINUDP:
1747 		case UDP_ENCAP_ESPINUDP_NON_IKE:
1748 			up->encap_rcv = xfrm4_udp_encap_rcv;
1749 			/* FALLTHROUGH */
1750 		case UDP_ENCAP_L2TPINUDP:
1751 			up->encap_type = val;
1752 			break;
1753 		default:
1754 			err = -ENOPROTOOPT;
1755 			break;
1756 		}
1757 		break;
1758 
1759 	/*
1760 	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
1761 	 */
1762 	/* The sender sets actual checksum coverage length via this option.
1763 	 * The case coverage > packet length is handled by send module. */
1764 	case UDPLITE_SEND_CSCOV:
1765 		if (!is_udplite)         /* Disable the option on UDP sockets */
1766 			return -ENOPROTOOPT;
1767 		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1768 			val = 8;
1769 		else if (val > USHRT_MAX)
1770 			val = USHRT_MAX;
1771 		up->pcslen = val;
1772 		up->pcflag |= UDPLITE_SEND_CC;
1773 		break;
1774 
1775 	/* The receiver specifies a minimum checksum coverage value. To make
1776 	 * sense, this should be set to at least 8 (as done below). If zero is
1777 	 * used, this again means full checksum coverage.                     */
1778 	case UDPLITE_RECV_CSCOV:
1779 		if (!is_udplite)         /* Disable the option on UDP sockets */
1780 			return -ENOPROTOOPT;
1781 		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
1782 			val = 8;
1783 		else if (val > USHRT_MAX)
1784 			val = USHRT_MAX;
1785 		up->pcrlen = val;
1786 		up->pcflag |= UDPLITE_RECV_CC;
1787 		break;
1788 
1789 	default:
1790 		err = -ENOPROTOOPT;
1791 		break;
1792 	}
1793 
1794 	return err;
1795 }
1796 EXPORT_SYMBOL(udp_lib_setsockopt);
1797 
udp_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)1798 int udp_setsockopt(struct sock *sk, int level, int optname,
1799 		   char __user *optval, unsigned int optlen)
1800 {
1801 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1802 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1803 					  udp_push_pending_frames);
1804 	return ip_setsockopt(sk, level, optname, optval, optlen);
1805 }
1806 
1807 #ifdef CONFIG_COMPAT
compat_udp_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)1808 int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1809 			  char __user *optval, unsigned int optlen)
1810 {
1811 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1812 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1813 					  udp_push_pending_frames);
1814 	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1815 }
1816 #endif
1817 
udp_lib_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1818 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1819 		       char __user *optval, int __user *optlen)
1820 {
1821 	struct udp_sock *up = udp_sk(sk);
1822 	int val, len;
1823 
1824 	if (get_user(len, optlen))
1825 		return -EFAULT;
1826 
1827 	len = min_t(unsigned int, len, sizeof(int));
1828 
1829 	if (len < 0)
1830 		return -EINVAL;
1831 
1832 	switch (optname) {
1833 	case UDP_CORK:
1834 		val = up->corkflag;
1835 		break;
1836 
1837 	case UDP_ENCAP:
1838 		val = up->encap_type;
1839 		break;
1840 
1841 	/* The following two cannot be changed on UDP sockets, the return is
1842 	 * always 0 (which corresponds to the full checksum coverage of UDP). */
1843 	case UDPLITE_SEND_CSCOV:
1844 		val = up->pcslen;
1845 		break;
1846 
1847 	case UDPLITE_RECV_CSCOV:
1848 		val = up->pcrlen;
1849 		break;
1850 
1851 	default:
1852 		return -ENOPROTOOPT;
1853 	}
1854 
1855 	if (put_user(len, optlen))
1856 		return -EFAULT;
1857 	if (copy_to_user(optval, &val, len))
1858 		return -EFAULT;
1859 	return 0;
1860 }
1861 EXPORT_SYMBOL(udp_lib_getsockopt);
1862 
udp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1863 int udp_getsockopt(struct sock *sk, int level, int optname,
1864 		   char __user *optval, int __user *optlen)
1865 {
1866 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1867 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1868 	return ip_getsockopt(sk, level, optname, optval, optlen);
1869 }
1870 
1871 #ifdef CONFIG_COMPAT
compat_udp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1872 int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1873 				 char __user *optval, int __user *optlen)
1874 {
1875 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1876 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1877 	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1878 }
1879 #endif
1880 /**
1881  * 	udp_poll - wait for a UDP event.
1882  *	@file - file struct
1883  *	@sock - socket
1884  *	@wait - poll table
1885  *
1886  *	This is same as datagram poll, except for the special case of
1887  *	blocking sockets. If application is using a blocking fd
1888  *	and a packet with checksum error is in the queue;
1889  *	then it could get return from select indicating data available
1890  *	but then block when reading it. Add special case code
1891  *	to work around these arguably broken applications.
1892  */
udp_poll(struct file * file,struct socket * sock,poll_table * wait)1893 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1894 {
1895 	unsigned int mask = datagram_poll(file, sock, wait);
1896 	struct sock *sk = sock->sk;
1897 
1898 	/* Check for false positives due to checksum errors */
1899 	if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
1900 	    !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
1901 		mask &= ~(POLLIN | POLLRDNORM);
1902 
1903 	return mask;
1904 
1905 }
1906 EXPORT_SYMBOL(udp_poll);
1907 
1908 struct proto udp_prot = {
1909 	.name		   = "UDP",
1910 	.owner		   = THIS_MODULE,
1911 	.close		   = udp_lib_close,
1912 	.connect	   = ip4_datagram_connect,
1913 	.disconnect	   = udp_disconnect,
1914 	.ioctl		   = udp_ioctl,
1915 	.destroy	   = udp_destroy_sock,
1916 	.setsockopt	   = udp_setsockopt,
1917 	.getsockopt	   = udp_getsockopt,
1918 	.sendmsg	   = udp_sendmsg,
1919 	.recvmsg	   = udp_recvmsg,
1920 	.sendpage	   = udp_sendpage,
1921 	.backlog_rcv	   = __udp_queue_rcv_skb,
1922 	.hash		   = udp_lib_hash,
1923 	.unhash		   = udp_lib_unhash,
1924 	.rehash		   = udp_v4_rehash,
1925 	.get_port	   = udp_v4_get_port,
1926 	.memory_allocated  = &udp_memory_allocated,
1927 	.sysctl_mem	   = sysctl_udp_mem,
1928 	.sysctl_wmem	   = &sysctl_udp_wmem_min,
1929 	.sysctl_rmem	   = &sysctl_udp_rmem_min,
1930 	.obj_size	   = sizeof(struct udp_sock),
1931 	.slab_flags	   = SLAB_DESTROY_BY_RCU,
1932 	.h.udp_table	   = &udp_table,
1933 #ifdef CONFIG_COMPAT
1934 	.compat_setsockopt = compat_udp_setsockopt,
1935 	.compat_getsockopt = compat_udp_getsockopt,
1936 #endif
1937 	.clear_sk	   = sk_prot_clear_portaddr_nulls,
1938 };
1939 EXPORT_SYMBOL(udp_prot);
1940 
1941 /* ------------------------------------------------------------------------ */
1942 #ifdef CONFIG_PROC_FS
1943 
udp_get_first(struct seq_file * seq,int start)1944 static struct sock *udp_get_first(struct seq_file *seq, int start)
1945 {
1946 	struct sock *sk;
1947 	struct udp_iter_state *state = seq->private;
1948 	struct net *net = seq_file_net(seq);
1949 
1950 	for (state->bucket = start; state->bucket <= state->udp_table->mask;
1951 	     ++state->bucket) {
1952 		struct hlist_nulls_node *node;
1953 		struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
1954 
1955 		if (hlist_nulls_empty(&hslot->head))
1956 			continue;
1957 
1958 		spin_lock_bh(&hslot->lock);
1959 		sk_nulls_for_each(sk, node, &hslot->head) {
1960 			if (!net_eq(sock_net(sk), net))
1961 				continue;
1962 			if (sk->sk_family == state->family)
1963 				goto found;
1964 		}
1965 		spin_unlock_bh(&hslot->lock);
1966 	}
1967 	sk = NULL;
1968 found:
1969 	return sk;
1970 }
1971 
udp_get_next(struct seq_file * seq,struct sock * sk)1972 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1973 {
1974 	struct udp_iter_state *state = seq->private;
1975 	struct net *net = seq_file_net(seq);
1976 
1977 	do {
1978 		sk = sk_nulls_next(sk);
1979 	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
1980 
1981 	if (!sk) {
1982 		if (state->bucket <= state->udp_table->mask)
1983 			spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
1984 		return udp_get_first(seq, state->bucket + 1);
1985 	}
1986 	return sk;
1987 }
1988 
udp_get_idx(struct seq_file * seq,loff_t pos)1989 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1990 {
1991 	struct sock *sk = udp_get_first(seq, 0);
1992 
1993 	if (sk)
1994 		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
1995 			--pos;
1996 	return pos ? NULL : sk;
1997 }
1998 
udp_seq_start(struct seq_file * seq,loff_t * pos)1999 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
2000 {
2001 	struct udp_iter_state *state = seq->private;
2002 	state->bucket = MAX_UDP_PORTS;
2003 
2004 	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
2005 }
2006 
udp_seq_next(struct seq_file * seq,void * v,loff_t * pos)2007 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2008 {
2009 	struct sock *sk;
2010 
2011 	if (v == SEQ_START_TOKEN)
2012 		sk = udp_get_idx(seq, 0);
2013 	else
2014 		sk = udp_get_next(seq, v);
2015 
2016 	++*pos;
2017 	return sk;
2018 }
2019 
udp_seq_stop(struct seq_file * seq,void * v)2020 static void udp_seq_stop(struct seq_file *seq, void *v)
2021 {
2022 	struct udp_iter_state *state = seq->private;
2023 
2024 	if (state->bucket <= state->udp_table->mask)
2025 		spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2026 }
2027 
udp_seq_open(struct inode * inode,struct file * file)2028 static int udp_seq_open(struct inode *inode, struct file *file)
2029 {
2030 	struct udp_seq_afinfo *afinfo = PDE(inode)->data;
2031 	struct udp_iter_state *s;
2032 	int err;
2033 
2034 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2035 			   sizeof(struct udp_iter_state));
2036 	if (err < 0)
2037 		return err;
2038 
2039 	s = ((struct seq_file *)file->private_data)->private;
2040 	s->family		= afinfo->family;
2041 	s->udp_table		= afinfo->udp_table;
2042 	return err;
2043 }
2044 
2045 /* ------------------------------------------------------------------------ */
udp_proc_register(struct net * net,struct udp_seq_afinfo * afinfo)2046 int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
2047 {
2048 	struct proc_dir_entry *p;
2049 	int rc = 0;
2050 
2051 	afinfo->seq_fops.open		= udp_seq_open;
2052 	afinfo->seq_fops.read		= seq_read;
2053 	afinfo->seq_fops.llseek		= seq_lseek;
2054 	afinfo->seq_fops.release	= seq_release_net;
2055 
2056 	afinfo->seq_ops.start		= udp_seq_start;
2057 	afinfo->seq_ops.next		= udp_seq_next;
2058 	afinfo->seq_ops.stop		= udp_seq_stop;
2059 
2060 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2061 			     &afinfo->seq_fops, afinfo);
2062 	if (!p)
2063 		rc = -ENOMEM;
2064 	return rc;
2065 }
2066 EXPORT_SYMBOL(udp_proc_register);
2067 
udp_proc_unregister(struct net * net,struct udp_seq_afinfo * afinfo)2068 void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
2069 {
2070 	proc_net_remove(net, afinfo->name);
2071 }
2072 EXPORT_SYMBOL(udp_proc_unregister);
2073 
2074 /* ------------------------------------------------------------------------ */
udp4_format_sock(struct sock * sp,struct seq_file * f,int bucket,int * len)2075 static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2076 		int bucket, int *len)
2077 {
2078 	struct inet_sock *inet = inet_sk(sp);
2079 	__be32 dest = inet->inet_daddr;
2080 	__be32 src  = inet->inet_rcv_saddr;
2081 	__u16 destp	  = ntohs(inet->inet_dport);
2082 	__u16 srcp	  = ntohs(inet->inet_sport);
2083 
2084 	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2085 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
2086 		bucket, src, srcp, dest, destp, sp->sk_state,
2087 		sk_wmem_alloc_get(sp),
2088 		sk_rmem_alloc_get(sp),
2089 		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
2090 		atomic_read(&sp->sk_refcnt), sp,
2091 		atomic_read(&sp->sk_drops), len);
2092 }
2093 
udp4_seq_show(struct seq_file * seq,void * v)2094 int udp4_seq_show(struct seq_file *seq, void *v)
2095 {
2096 	if (v == SEQ_START_TOKEN)
2097 		seq_printf(seq, "%-127s\n",
2098 			   "  sl  local_address rem_address   st tx_queue "
2099 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2100 			   "inode ref pointer drops");
2101 	else {
2102 		struct udp_iter_state *state = seq->private;
2103 		int len;
2104 
2105 		udp4_format_sock(v, seq, state->bucket, &len);
2106 		seq_printf(seq, "%*s\n", 127 - len, "");
2107 	}
2108 	return 0;
2109 }
2110 
2111 /* ------------------------------------------------------------------------ */
2112 static struct udp_seq_afinfo udp4_seq_afinfo = {
2113 	.name		= "udp",
2114 	.family		= AF_INET,
2115 	.udp_table	= &udp_table,
2116 	.seq_fops	= {
2117 		.owner	=	THIS_MODULE,
2118 	},
2119 	.seq_ops	= {
2120 		.show		= udp4_seq_show,
2121 	},
2122 };
2123 
udp4_proc_init_net(struct net * net)2124 static int __net_init udp4_proc_init_net(struct net *net)
2125 {
2126 	return udp_proc_register(net, &udp4_seq_afinfo);
2127 }
2128 
udp4_proc_exit_net(struct net * net)2129 static void __net_exit udp4_proc_exit_net(struct net *net)
2130 {
2131 	udp_proc_unregister(net, &udp4_seq_afinfo);
2132 }
2133 
2134 static struct pernet_operations udp4_net_ops = {
2135 	.init = udp4_proc_init_net,
2136 	.exit = udp4_proc_exit_net,
2137 };
2138 
udp4_proc_init(void)2139 int __init udp4_proc_init(void)
2140 {
2141 	return register_pernet_subsys(&udp4_net_ops);
2142 }
2143 
udp4_proc_exit(void)2144 void udp4_proc_exit(void)
2145 {
2146 	unregister_pernet_subsys(&udp4_net_ops);
2147 }
2148 #endif /* CONFIG_PROC_FS */
2149 
2150 static __initdata unsigned long uhash_entries;
set_uhash_entries(char * str)2151 static int __init set_uhash_entries(char *str)
2152 {
2153 	if (!str)
2154 		return 0;
2155 	uhash_entries = simple_strtoul(str, &str, 0);
2156 	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2157 		uhash_entries = UDP_HTABLE_SIZE_MIN;
2158 	return 1;
2159 }
2160 __setup("uhash_entries=", set_uhash_entries);
2161 
udp_table_init(struct udp_table * table,const char * name)2162 void __init udp_table_init(struct udp_table *table, const char *name)
2163 {
2164 	unsigned int i;
2165 
2166 	if (!CONFIG_BASE_SMALL)
2167 		table->hash = alloc_large_system_hash(name,
2168 			2 * sizeof(struct udp_hslot),
2169 			uhash_entries,
2170 			21, /* one slot per 2 MB */
2171 			0,
2172 			&table->log,
2173 			&table->mask,
2174 			64 * 1024);
2175 	/*
2176 	 * Make sure hash table has the minimum size
2177 	 */
2178 	if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
2179 		table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
2180 				      2 * sizeof(struct udp_hslot), GFP_KERNEL);
2181 		if (!table->hash)
2182 			panic(name);
2183 		table->log = ilog2(UDP_HTABLE_SIZE_MIN);
2184 		table->mask = UDP_HTABLE_SIZE_MIN - 1;
2185 	}
2186 	table->hash2 = table->hash + (table->mask + 1);
2187 	for (i = 0; i <= table->mask; i++) {
2188 		INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
2189 		table->hash[i].count = 0;
2190 		spin_lock_init(&table->hash[i].lock);
2191 	}
2192 	for (i = 0; i <= table->mask; i++) {
2193 		INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
2194 		table->hash2[i].count = 0;
2195 		spin_lock_init(&table->hash2[i].lock);
2196 	}
2197 }
2198 
udp_init(void)2199 void __init udp_init(void)
2200 {
2201 	unsigned long nr_pages, limit;
2202 
2203 	udp_table_init(&udp_table, "UDP");
2204 	/* Set the pressure threshold up by the same strategy of TCP. It is a
2205 	 * fraction of global memory that is up to 1/2 at 256 MB, decreasing
2206 	 * toward zero with the amount of memory, with a floor of 128 pages.
2207 	 */
2208 	nr_pages = totalram_pages - totalhigh_pages;
2209 	limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2210 	limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2211 	limit = max(limit, 128UL);
2212 	sysctl_udp_mem[0] = limit / 4 * 3;
2213 	sysctl_udp_mem[1] = limit;
2214 	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
2215 
2216 	sysctl_udp_rmem_min = SK_MEM_QUANTUM;
2217 	sysctl_udp_wmem_min = SK_MEM_QUANTUM;
2218 }
2219 
udp4_ufo_send_check(struct sk_buff * skb)2220 int udp4_ufo_send_check(struct sk_buff *skb)
2221 {
2222 	const struct iphdr *iph;
2223 	struct udphdr *uh;
2224 
2225 	if (!pskb_may_pull(skb, sizeof(*uh)))
2226 		return -EINVAL;
2227 
2228 	iph = ip_hdr(skb);
2229 	uh = udp_hdr(skb);
2230 
2231 	uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
2232 				       IPPROTO_UDP, 0);
2233 	skb->csum_start = skb_transport_header(skb) - skb->head;
2234 	skb->csum_offset = offsetof(struct udphdr, check);
2235 	skb->ip_summed = CHECKSUM_PARTIAL;
2236 	return 0;
2237 }
2238 
udp4_ufo_fragment(struct sk_buff * skb,u32 features)2239 struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
2240 {
2241 	struct sk_buff *segs = ERR_PTR(-EINVAL);
2242 	unsigned int mss;
2243 	int offset;
2244 	__wsum csum;
2245 
2246 	mss = skb_shinfo(skb)->gso_size;
2247 	if (unlikely(skb->len <= mss))
2248 		goto out;
2249 
2250 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2251 		/* Packet is from an untrusted source, reset gso_segs. */
2252 		int type = skb_shinfo(skb)->gso_type;
2253 
2254 		if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
2255 			     !(type & (SKB_GSO_UDP))))
2256 			goto out;
2257 
2258 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2259 
2260 		segs = NULL;
2261 		goto out;
2262 	}
2263 
2264 	/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
2265 	 * do checksum of UDP packets sent as multiple IP fragments.
2266 	 */
2267 	offset = skb_checksum_start_offset(skb);
2268 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
2269 	offset += skb->csum_offset;
2270 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
2271 	skb->ip_summed = CHECKSUM_NONE;
2272 
2273 	/* Fragment the skb. IP headers of the fragments are updated in
2274 	 * inet_gso_segment()
2275 	 */
2276 	segs = skb_segment(skb, features);
2277 out:
2278 	return segs;
2279 }
2280 
2281