1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP to API glue.
8 *
9 * Authors: see ip.c
10 *
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
17 */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/icmp.h>
25 #include <linux/inetdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/slab.h>
28 #include <net/sock.h>
29 #include <net/ip.h>
30 #include <net/icmp.h>
31 #include <net/tcp_states.h>
32 #include <linux/udp.h>
33 #include <linux/igmp.h>
34 #include <linux/netfilter.h>
35 #include <linux/route.h>
36 #include <linux/mroute.h>
37 #include <net/inet_ecn.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40 #include <net/compat.h>
41 #include <net/checksum.h>
42 #if IS_ENABLED(CONFIG_IPV6)
43 #include <net/transp_v6.h>
44 #endif
45 #include <net/ip_fib.h>
46
47 #include <linux/errqueue.h>
48 #include <linux/uaccess.h>
49
50 #include <linux/bpfilter.h>
51
52 /*
53 * SOL_IP control messages.
54 */
55
ip_cmsg_recv_pktinfo(struct msghdr * msg,struct sk_buff * skb)56 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
57 {
58 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
59
60 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
61
62 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
63 }
64
ip_cmsg_recv_ttl(struct msghdr * msg,struct sk_buff * skb)65 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
66 {
67 int ttl = ip_hdr(skb)->ttl;
68 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
69 }
70
ip_cmsg_recv_tos(struct msghdr * msg,struct sk_buff * skb)71 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
72 {
73 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
74 }
75
ip_cmsg_recv_opts(struct msghdr * msg,struct sk_buff * skb)76 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
77 {
78 if (IPCB(skb)->opt.optlen == 0)
79 return;
80
81 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
82 ip_hdr(skb) + 1);
83 }
84
85
ip_cmsg_recv_retopts(struct net * net,struct msghdr * msg,struct sk_buff * skb)86 static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
87 struct sk_buff *skb)
88 {
89 unsigned char optbuf[sizeof(struct ip_options) + 40];
90 struct ip_options *opt = (struct ip_options *)optbuf;
91
92 if (IPCB(skb)->opt.optlen == 0)
93 return;
94
95 if (ip_options_echo(net, opt, skb)) {
96 msg->msg_flags |= MSG_CTRUNC;
97 return;
98 }
99 ip_options_undo(opt);
100
101 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
102 }
103
ip_cmsg_recv_fragsize(struct msghdr * msg,struct sk_buff * skb)104 static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
105 {
106 int val;
107
108 if (IPCB(skb)->frag_max_size == 0)
109 return;
110
111 val = IPCB(skb)->frag_max_size;
112 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
113 }
114
ip_cmsg_recv_checksum(struct msghdr * msg,struct sk_buff * skb,int tlen,int offset)115 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
116 int tlen, int offset)
117 {
118 __wsum csum = skb->csum;
119
120 if (skb->ip_summed != CHECKSUM_COMPLETE)
121 return;
122
123 if (offset != 0) {
124 int tend_off = skb_transport_offset(skb) + tlen;
125 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
126 }
127
128 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
129 }
130
ip_cmsg_recv_security(struct msghdr * msg,struct sk_buff * skb)131 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
132 {
133 char *secdata;
134 u32 seclen, secid;
135 int err;
136
137 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
138 if (err)
139 return;
140
141 err = security_secid_to_secctx(secid, &secdata, &seclen);
142 if (err)
143 return;
144
145 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
146 security_release_secctx(secdata, seclen);
147 }
148
ip_cmsg_recv_dstaddr(struct msghdr * msg,struct sk_buff * skb)149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150 {
151 __be16 _ports[2], *ports;
152 struct sockaddr_in sin;
153
154 /* All current transport protocols have the port numbers in the
155 * first four bytes of the transport header and this function is
156 * written with this assumption in mind.
157 */
158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
162
163 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
165 sin.sin_port = ports[1];
166 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
167
168 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
169 }
170
ip_cmsg_recv_offset(struct msghdr * msg,struct sock * sk,struct sk_buff * skb,int tlen,int offset)171 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
172 struct sk_buff *skb, int tlen, int offset)
173 {
174 unsigned long flags = inet_cmsg_flags(inet_sk(sk));
175
176 if (!flags)
177 return;
178
179 /* Ordered by supposed usage frequency */
180 if (flags & IP_CMSG_PKTINFO) {
181 ip_cmsg_recv_pktinfo(msg, skb);
182
183 flags &= ~IP_CMSG_PKTINFO;
184 if (!flags)
185 return;
186 }
187
188 if (flags & IP_CMSG_TTL) {
189 ip_cmsg_recv_ttl(msg, skb);
190
191 flags &= ~IP_CMSG_TTL;
192 if (!flags)
193 return;
194 }
195
196 if (flags & IP_CMSG_TOS) {
197 ip_cmsg_recv_tos(msg, skb);
198
199 flags &= ~IP_CMSG_TOS;
200 if (!flags)
201 return;
202 }
203
204 if (flags & IP_CMSG_RECVOPTS) {
205 ip_cmsg_recv_opts(msg, skb);
206
207 flags &= ~IP_CMSG_RECVOPTS;
208 if (!flags)
209 return;
210 }
211
212 if (flags & IP_CMSG_RETOPTS) {
213 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
214
215 flags &= ~IP_CMSG_RETOPTS;
216 if (!flags)
217 return;
218 }
219
220 if (flags & IP_CMSG_PASSSEC) {
221 ip_cmsg_recv_security(msg, skb);
222
223 flags &= ~IP_CMSG_PASSSEC;
224 if (!flags)
225 return;
226 }
227
228 if (flags & IP_CMSG_ORIGDSTADDR) {
229 ip_cmsg_recv_dstaddr(msg, skb);
230
231 flags &= ~IP_CMSG_ORIGDSTADDR;
232 if (!flags)
233 return;
234 }
235
236 if (flags & IP_CMSG_CHECKSUM)
237 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
238
239 if (flags & IP_CMSG_RECVFRAGSIZE)
240 ip_cmsg_recv_fragsize(msg, skb);
241 }
242 EXPORT_SYMBOL(ip_cmsg_recv_offset);
243
ip_cmsg_send(struct sock * sk,struct msghdr * msg,struct ipcm_cookie * ipc,bool allow_ipv6)244 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
245 bool allow_ipv6)
246 {
247 int err, val;
248 struct cmsghdr *cmsg;
249 struct net *net = sock_net(sk);
250
251 for_each_cmsghdr(cmsg, msg) {
252 if (!CMSG_OK(msg, cmsg))
253 return -EINVAL;
254 #if IS_ENABLED(CONFIG_IPV6)
255 if (allow_ipv6 &&
256 cmsg->cmsg_level == SOL_IPV6 &&
257 cmsg->cmsg_type == IPV6_PKTINFO) {
258 struct in6_pktinfo *src_info;
259
260 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
261 return -EINVAL;
262 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
263 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
264 return -EINVAL;
265 if (src_info->ipi6_ifindex)
266 ipc->oif = src_info->ipi6_ifindex;
267 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
268 continue;
269 }
270 #endif
271 if (cmsg->cmsg_level == SOL_SOCKET) {
272 err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
273 if (err)
274 return err;
275 continue;
276 }
277
278 if (cmsg->cmsg_level != SOL_IP)
279 continue;
280 switch (cmsg->cmsg_type) {
281 case IP_RETOPTS:
282 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
283
284 /* Our caller is responsible for freeing ipc->opt */
285 err = ip_options_get(net, &ipc->opt,
286 KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
287 err < 40 ? err : 40);
288 if (err)
289 return err;
290 break;
291 case IP_PKTINFO:
292 {
293 struct in_pktinfo *info;
294 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
295 return -EINVAL;
296 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
297 if (info->ipi_ifindex)
298 ipc->oif = info->ipi_ifindex;
299 ipc->addr = info->ipi_spec_dst.s_addr;
300 break;
301 }
302 case IP_TTL:
303 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
304 return -EINVAL;
305 val = *(int *)CMSG_DATA(cmsg);
306 if (val < 1 || val > 255)
307 return -EINVAL;
308 ipc->ttl = val;
309 break;
310 case IP_TOS:
311 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
312 val = *(int *)CMSG_DATA(cmsg);
313 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
314 val = *(u8 *)CMSG_DATA(cmsg);
315 else
316 return -EINVAL;
317 if (val < 0 || val > 255)
318 return -EINVAL;
319 ipc->tos = val;
320 ipc->priority = rt_tos2priority(ipc->tos);
321 break;
322 case IP_PROTOCOL:
323 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
324 return -EINVAL;
325 val = *(int *)CMSG_DATA(cmsg);
326 if (val < 1 || val > 255)
327 return -EINVAL;
328 ipc->protocol = val;
329 break;
330 default:
331 return -EINVAL;
332 }
333 }
334 return 0;
335 }
336
ip_ra_destroy_rcu(struct rcu_head * head)337 static void ip_ra_destroy_rcu(struct rcu_head *head)
338 {
339 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
340
341 sock_put(ra->saved_sk);
342 kfree(ra);
343 }
344
ip_ra_control(struct sock * sk,unsigned char on,void (* destructor)(struct sock *))345 int ip_ra_control(struct sock *sk, unsigned char on,
346 void (*destructor)(struct sock *))
347 {
348 struct ip_ra_chain *ra, *new_ra;
349 struct ip_ra_chain __rcu **rap;
350 struct net *net = sock_net(sk);
351
352 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
353 return -EINVAL;
354
355 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
356 if (on && !new_ra)
357 return -ENOMEM;
358
359 mutex_lock(&net->ipv4.ra_mutex);
360 for (rap = &net->ipv4.ra_chain;
361 (ra = rcu_dereference_protected(*rap,
362 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
363 rap = &ra->next) {
364 if (ra->sk == sk) {
365 if (on) {
366 mutex_unlock(&net->ipv4.ra_mutex);
367 kfree(new_ra);
368 return -EADDRINUSE;
369 }
370 /* dont let ip_call_ra_chain() use sk again */
371 ra->sk = NULL;
372 RCU_INIT_POINTER(*rap, ra->next);
373 mutex_unlock(&net->ipv4.ra_mutex);
374
375 if (ra->destructor)
376 ra->destructor(sk);
377 /*
378 * Delay sock_put(sk) and kfree(ra) after one rcu grace
379 * period. This guarantee ip_call_ra_chain() dont need
380 * to mess with socket refcounts.
381 */
382 ra->saved_sk = sk;
383 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
384 return 0;
385 }
386 }
387 if (!new_ra) {
388 mutex_unlock(&net->ipv4.ra_mutex);
389 return -ENOBUFS;
390 }
391 new_ra->sk = sk;
392 new_ra->destructor = destructor;
393
394 RCU_INIT_POINTER(new_ra->next, ra);
395 rcu_assign_pointer(*rap, new_ra);
396 sock_hold(sk);
397 mutex_unlock(&net->ipv4.ra_mutex);
398
399 return 0;
400 }
401
ipv4_icmp_error_rfc4884(const struct sk_buff * skb,struct sock_ee_data_rfc4884 * out)402 static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
403 struct sock_ee_data_rfc4884 *out)
404 {
405 switch (icmp_hdr(skb)->type) {
406 case ICMP_DEST_UNREACH:
407 case ICMP_TIME_EXCEEDED:
408 case ICMP_PARAMETERPROB:
409 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
410 icmp_hdr(skb)->un.reserved[1] * 4);
411 }
412 }
413
ip_icmp_error(struct sock * sk,struct sk_buff * skb,int err,__be16 port,u32 info,u8 * payload)414 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
415 __be16 port, u32 info, u8 *payload)
416 {
417 struct sock_exterr_skb *serr;
418
419 skb = skb_clone(skb, GFP_ATOMIC);
420 if (!skb)
421 return;
422
423 serr = SKB_EXT_ERR(skb);
424 serr->ee.ee_errno = err;
425 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
426 serr->ee.ee_type = icmp_hdr(skb)->type;
427 serr->ee.ee_code = icmp_hdr(skb)->code;
428 serr->ee.ee_pad = 0;
429 serr->ee.ee_info = info;
430 serr->ee.ee_data = 0;
431 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
432 skb_network_header(skb);
433 serr->port = port;
434
435 if (skb_pull(skb, payload - skb->data)) {
436 if (inet_test_bit(RECVERR_RFC4884, sk))
437 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
438
439 skb_reset_transport_header(skb);
440 if (sock_queue_err_skb(sk, skb) == 0)
441 return;
442 }
443 kfree_skb(skb);
444 }
445 EXPORT_SYMBOL_GPL(ip_icmp_error);
446
ip_local_error(struct sock * sk,int err,__be32 daddr,__be16 port,u32 info)447 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
448 {
449 struct sock_exterr_skb *serr;
450 struct iphdr *iph;
451 struct sk_buff *skb;
452
453 if (!inet_test_bit(RECVERR, sk))
454 return;
455
456 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
457 if (!skb)
458 return;
459
460 skb_put(skb, sizeof(struct iphdr));
461 skb_reset_network_header(skb);
462 iph = ip_hdr(skb);
463 iph->daddr = daddr;
464
465 serr = SKB_EXT_ERR(skb);
466 serr->ee.ee_errno = err;
467 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
468 serr->ee.ee_type = 0;
469 serr->ee.ee_code = 0;
470 serr->ee.ee_pad = 0;
471 serr->ee.ee_info = info;
472 serr->ee.ee_data = 0;
473 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
474 serr->port = port;
475
476 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
477 skb_reset_transport_header(skb);
478
479 if (sock_queue_err_skb(sk, skb))
480 kfree_skb(skb);
481 }
482
483 /* For some errors we have valid addr_offset even with zero payload and
484 * zero port. Also, addr_offset should be supported if port is set.
485 */
ipv4_datagram_support_addr(struct sock_exterr_skb * serr)486 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
487 {
488 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
489 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
490 }
491
492 /* IPv4 supports cmsg on all imcp errors and some timestamps
493 *
494 * Timestamp code paths do not initialize the fields expected by cmsg:
495 * the PKTINFO fields in skb->cb[]. Fill those in here.
496 */
ipv4_datagram_support_cmsg(const struct sock * sk,struct sk_buff * skb,int ee_origin)497 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
498 struct sk_buff *skb,
499 int ee_origin)
500 {
501 struct in_pktinfo *info;
502
503 if (ee_origin == SO_EE_ORIGIN_ICMP)
504 return true;
505
506 if (ee_origin == SO_EE_ORIGIN_LOCAL)
507 return false;
508
509 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
510 * timestamp with egress dev. Not possible for packets without iif
511 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
512 */
513 info = PKTINFO_SKB_CB(skb);
514 if (!(READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_CMSG) ||
515 !info->ipi_ifindex)
516 return false;
517
518 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
519 return true;
520 }
521
522 /*
523 * Handle MSG_ERRQUEUE
524 */
ip_recv_error(struct sock * sk,struct msghdr * msg,int len,int * addr_len)525 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
526 {
527 struct sock_exterr_skb *serr;
528 struct sk_buff *skb;
529 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
530 struct {
531 struct sock_extended_err ee;
532 struct sockaddr_in offender;
533 } errhdr;
534 int err;
535 int copied;
536
537 err = -EAGAIN;
538 skb = sock_dequeue_err_skb(sk);
539 if (!skb)
540 goto out;
541
542 copied = skb->len;
543 if (copied > len) {
544 msg->msg_flags |= MSG_TRUNC;
545 copied = len;
546 }
547 err = skb_copy_datagram_msg(skb, 0, msg, copied);
548 if (unlikely(err)) {
549 kfree_skb(skb);
550 return err;
551 }
552 sock_recv_timestamp(msg, sk, skb);
553
554 serr = SKB_EXT_ERR(skb);
555
556 if (sin && ipv4_datagram_support_addr(serr)) {
557 sin->sin_family = AF_INET;
558 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
559 serr->addr_offset);
560 sin->sin_port = serr->port;
561 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
562 *addr_len = sizeof(*sin);
563 }
564
565 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
566 sin = &errhdr.offender;
567 memset(sin, 0, sizeof(*sin));
568
569 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
570 sin->sin_family = AF_INET;
571 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
572 if (inet_cmsg_flags(inet_sk(sk)))
573 ip_cmsg_recv(msg, skb);
574 }
575
576 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
577
578 /* Now we could try to dump offended packet options */
579
580 msg->msg_flags |= MSG_ERRQUEUE;
581 err = copied;
582
583 consume_skb(skb);
584 out:
585 return err;
586 }
587
__ip_sock_set_tos(struct sock * sk,int val)588 void __ip_sock_set_tos(struct sock *sk, int val)
589 {
590 if (sk->sk_type == SOCK_STREAM) {
591 val &= ~INET_ECN_MASK;
592 val |= inet_sk(sk)->tos & INET_ECN_MASK;
593 }
594 if (inet_sk(sk)->tos != val) {
595 inet_sk(sk)->tos = val;
596 WRITE_ONCE(sk->sk_priority, rt_tos2priority(val));
597 sk_dst_reset(sk);
598 }
599 }
600
ip_sock_set_tos(struct sock * sk,int val)601 void ip_sock_set_tos(struct sock *sk, int val)
602 {
603 lock_sock(sk);
604 __ip_sock_set_tos(sk, val);
605 release_sock(sk);
606 }
607 EXPORT_SYMBOL(ip_sock_set_tos);
608
ip_sock_set_freebind(struct sock * sk)609 void ip_sock_set_freebind(struct sock *sk)
610 {
611 inet_set_bit(FREEBIND, sk);
612 }
613 EXPORT_SYMBOL(ip_sock_set_freebind);
614
ip_sock_set_recverr(struct sock * sk)615 void ip_sock_set_recverr(struct sock *sk)
616 {
617 inet_set_bit(RECVERR, sk);
618 }
619 EXPORT_SYMBOL(ip_sock_set_recverr);
620
ip_sock_set_mtu_discover(struct sock * sk,int val)621 int ip_sock_set_mtu_discover(struct sock *sk, int val)
622 {
623 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
624 return -EINVAL;
625 lock_sock(sk);
626 inet_sk(sk)->pmtudisc = val;
627 release_sock(sk);
628 return 0;
629 }
630 EXPORT_SYMBOL(ip_sock_set_mtu_discover);
631
ip_sock_set_pktinfo(struct sock * sk)632 void ip_sock_set_pktinfo(struct sock *sk)
633 {
634 inet_set_bit(PKTINFO, sk);
635 }
636 EXPORT_SYMBOL(ip_sock_set_pktinfo);
637
638 /*
639 * Socket option code for IP. This is the end of the line after any
640 * TCP,UDP etc options on an IP socket.
641 */
setsockopt_needs_rtnl(int optname)642 static bool setsockopt_needs_rtnl(int optname)
643 {
644 switch (optname) {
645 case IP_ADD_MEMBERSHIP:
646 case IP_ADD_SOURCE_MEMBERSHIP:
647 case IP_BLOCK_SOURCE:
648 case IP_DROP_MEMBERSHIP:
649 case IP_DROP_SOURCE_MEMBERSHIP:
650 case IP_MSFILTER:
651 case IP_UNBLOCK_SOURCE:
652 case MCAST_BLOCK_SOURCE:
653 case MCAST_MSFILTER:
654 case MCAST_JOIN_GROUP:
655 case MCAST_JOIN_SOURCE_GROUP:
656 case MCAST_LEAVE_GROUP:
657 case MCAST_LEAVE_SOURCE_GROUP:
658 case MCAST_UNBLOCK_SOURCE:
659 return true;
660 }
661 return false;
662 }
663
set_mcast_msfilter(struct sock * sk,int ifindex,int numsrc,int fmode,struct sockaddr_storage * group,struct sockaddr_storage * list)664 static int set_mcast_msfilter(struct sock *sk, int ifindex,
665 int numsrc, int fmode,
666 struct sockaddr_storage *group,
667 struct sockaddr_storage *list)
668 {
669 struct ip_msfilter *msf;
670 struct sockaddr_in *psin;
671 int err, i;
672
673 msf = kmalloc(IP_MSFILTER_SIZE(numsrc), GFP_KERNEL);
674 if (!msf)
675 return -ENOBUFS;
676
677 psin = (struct sockaddr_in *)group;
678 if (psin->sin_family != AF_INET)
679 goto Eaddrnotavail;
680 msf->imsf_multiaddr = psin->sin_addr.s_addr;
681 msf->imsf_interface = 0;
682 msf->imsf_fmode = fmode;
683 msf->imsf_numsrc = numsrc;
684 for (i = 0; i < numsrc; ++i) {
685 psin = (struct sockaddr_in *)&list[i];
686
687 if (psin->sin_family != AF_INET)
688 goto Eaddrnotavail;
689 msf->imsf_slist_flex[i] = psin->sin_addr.s_addr;
690 }
691 err = ip_mc_msfilter(sk, msf, ifindex);
692 kfree(msf);
693 return err;
694
695 Eaddrnotavail:
696 kfree(msf);
697 return -EADDRNOTAVAIL;
698 }
699
copy_group_source_from_sockptr(struct group_source_req * greqs,sockptr_t optval,int optlen)700 static int copy_group_source_from_sockptr(struct group_source_req *greqs,
701 sockptr_t optval, int optlen)
702 {
703 if (in_compat_syscall()) {
704 struct compat_group_source_req gr32;
705
706 if (optlen != sizeof(gr32))
707 return -EINVAL;
708 if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
709 return -EFAULT;
710 greqs->gsr_interface = gr32.gsr_interface;
711 greqs->gsr_group = gr32.gsr_group;
712 greqs->gsr_source = gr32.gsr_source;
713 } else {
714 if (optlen != sizeof(*greqs))
715 return -EINVAL;
716 if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
717 return -EFAULT;
718 }
719
720 return 0;
721 }
722
do_mcast_group_source(struct sock * sk,int optname,sockptr_t optval,int optlen)723 static int do_mcast_group_source(struct sock *sk, int optname,
724 sockptr_t optval, int optlen)
725 {
726 struct group_source_req greqs;
727 struct ip_mreq_source mreqs;
728 struct sockaddr_in *psin;
729 int omode, add, err;
730
731 err = copy_group_source_from_sockptr(&greqs, optval, optlen);
732 if (err)
733 return err;
734
735 if (greqs.gsr_group.ss_family != AF_INET ||
736 greqs.gsr_source.ss_family != AF_INET)
737 return -EADDRNOTAVAIL;
738
739 psin = (struct sockaddr_in *)&greqs.gsr_group;
740 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
741 psin = (struct sockaddr_in *)&greqs.gsr_source;
742 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
743 mreqs.imr_interface = 0; /* use index for mc_source */
744
745 if (optname == MCAST_BLOCK_SOURCE) {
746 omode = MCAST_EXCLUDE;
747 add = 1;
748 } else if (optname == MCAST_UNBLOCK_SOURCE) {
749 omode = MCAST_EXCLUDE;
750 add = 0;
751 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
752 struct ip_mreqn mreq;
753
754 psin = (struct sockaddr_in *)&greqs.gsr_group;
755 mreq.imr_multiaddr = psin->sin_addr;
756 mreq.imr_address.s_addr = 0;
757 mreq.imr_ifindex = greqs.gsr_interface;
758 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
759 if (err && err != -EADDRINUSE)
760 return err;
761 greqs.gsr_interface = mreq.imr_ifindex;
762 omode = MCAST_INCLUDE;
763 add = 1;
764 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
765 omode = MCAST_INCLUDE;
766 add = 0;
767 }
768 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
769 }
770
ip_set_mcast_msfilter(struct sock * sk,sockptr_t optval,int optlen)771 static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
772 {
773 struct group_filter *gsf = NULL;
774 int err;
775
776 if (optlen < GROUP_FILTER_SIZE(0))
777 return -EINVAL;
778 if (optlen > READ_ONCE(sysctl_optmem_max))
779 return -ENOBUFS;
780
781 gsf = memdup_sockptr(optval, optlen);
782 if (IS_ERR(gsf))
783 return PTR_ERR(gsf);
784
785 /* numsrc >= (4G-140)/128 overflow in 32 bits */
786 err = -ENOBUFS;
787 if (gsf->gf_numsrc >= 0x1ffffff ||
788 gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
789 goto out_free_gsf;
790
791 err = -EINVAL;
792 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
793 goto out_free_gsf;
794
795 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
796 gsf->gf_fmode, &gsf->gf_group,
797 gsf->gf_slist_flex);
798 out_free_gsf:
799 kfree(gsf);
800 return err;
801 }
802
compat_ip_set_mcast_msfilter(struct sock * sk,sockptr_t optval,int optlen)803 static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
804 int optlen)
805 {
806 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
807 struct compat_group_filter *gf32;
808 unsigned int n;
809 void *p;
810 int err;
811
812 if (optlen < size0)
813 return -EINVAL;
814 if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
815 return -ENOBUFS;
816
817 p = kmalloc(optlen + 4, GFP_KERNEL);
818 if (!p)
819 return -ENOMEM;
820 gf32 = p + 4; /* we want ->gf_group and ->gf_slist_flex aligned */
821
822 err = -EFAULT;
823 if (copy_from_sockptr(gf32, optval, optlen))
824 goto out_free_gsf;
825
826 /* numsrc >= (4G-140)/128 overflow in 32 bits */
827 n = gf32->gf_numsrc;
828 err = -ENOBUFS;
829 if (n >= 0x1ffffff)
830 goto out_free_gsf;
831
832 err = -EINVAL;
833 if (offsetof(struct compat_group_filter, gf_slist_flex[n]) > optlen)
834 goto out_free_gsf;
835
836 /* numsrc >= (4G-140)/128 overflow in 32 bits */
837 err = -ENOBUFS;
838 if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
839 goto out_free_gsf;
840 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
841 &gf32->gf_group, gf32->gf_slist_flex);
842 out_free_gsf:
843 kfree(p);
844 return err;
845 }
846
ip_mcast_join_leave(struct sock * sk,int optname,sockptr_t optval,int optlen)847 static int ip_mcast_join_leave(struct sock *sk, int optname,
848 sockptr_t optval, int optlen)
849 {
850 struct ip_mreqn mreq = { };
851 struct sockaddr_in *psin;
852 struct group_req greq;
853
854 if (optlen < sizeof(struct group_req))
855 return -EINVAL;
856 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
857 return -EFAULT;
858
859 psin = (struct sockaddr_in *)&greq.gr_group;
860 if (psin->sin_family != AF_INET)
861 return -EINVAL;
862 mreq.imr_multiaddr = psin->sin_addr;
863 mreq.imr_ifindex = greq.gr_interface;
864 if (optname == MCAST_JOIN_GROUP)
865 return ip_mc_join_group(sk, &mreq);
866 return ip_mc_leave_group(sk, &mreq);
867 }
868
compat_ip_mcast_join_leave(struct sock * sk,int optname,sockptr_t optval,int optlen)869 static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
870 sockptr_t optval, int optlen)
871 {
872 struct compat_group_req greq;
873 struct ip_mreqn mreq = { };
874 struct sockaddr_in *psin;
875
876 if (optlen < sizeof(struct compat_group_req))
877 return -EINVAL;
878 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
879 return -EFAULT;
880
881 psin = (struct sockaddr_in *)&greq.gr_group;
882 if (psin->sin_family != AF_INET)
883 return -EINVAL;
884 mreq.imr_multiaddr = psin->sin_addr;
885 mreq.imr_ifindex = greq.gr_interface;
886
887 if (optname == MCAST_JOIN_GROUP)
888 return ip_mc_join_group(sk, &mreq);
889 return ip_mc_leave_group(sk, &mreq);
890 }
891
892 DEFINE_STATIC_KEY_FALSE(ip4_min_ttl);
893
do_ip_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)894 int do_ip_setsockopt(struct sock *sk, int level, int optname,
895 sockptr_t optval, unsigned int optlen)
896 {
897 struct inet_sock *inet = inet_sk(sk);
898 struct net *net = sock_net(sk);
899 int val = 0, err;
900 bool needs_rtnl = setsockopt_needs_rtnl(optname);
901
902 switch (optname) {
903 case IP_PKTINFO:
904 case IP_RECVTTL:
905 case IP_RECVOPTS:
906 case IP_RECVTOS:
907 case IP_RETOPTS:
908 case IP_TOS:
909 case IP_TTL:
910 case IP_HDRINCL:
911 case IP_MTU_DISCOVER:
912 case IP_RECVERR:
913 case IP_ROUTER_ALERT:
914 case IP_FREEBIND:
915 case IP_PASSSEC:
916 case IP_TRANSPARENT:
917 case IP_MINTTL:
918 case IP_NODEFRAG:
919 case IP_BIND_ADDRESS_NO_PORT:
920 case IP_UNICAST_IF:
921 case IP_MULTICAST_TTL:
922 case IP_MULTICAST_ALL:
923 case IP_MULTICAST_LOOP:
924 case IP_RECVORIGDSTADDR:
925 case IP_CHECKSUM:
926 case IP_RECVFRAGSIZE:
927 case IP_RECVERR_RFC4884:
928 case IP_LOCAL_PORT_RANGE:
929 if (optlen >= sizeof(int)) {
930 if (copy_from_sockptr(&val, optval, sizeof(val)))
931 return -EFAULT;
932 } else if (optlen >= sizeof(char)) {
933 unsigned char ucval;
934
935 if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
936 return -EFAULT;
937 val = (int) ucval;
938 }
939 }
940
941 /* If optlen==0, it is equivalent to val == 0 */
942
943 if (optname == IP_ROUTER_ALERT)
944 return ip_ra_control(sk, val ? 1 : 0, NULL);
945 if (ip_mroute_opt(optname))
946 return ip_mroute_setsockopt(sk, optname, optval, optlen);
947
948 /* Handle options that can be set without locking the socket. */
949 switch (optname) {
950 case IP_PKTINFO:
951 inet_assign_bit(PKTINFO, sk, val);
952 return 0;
953 case IP_RECVTTL:
954 inet_assign_bit(TTL, sk, val);
955 return 0;
956 case IP_RECVTOS:
957 inet_assign_bit(TOS, sk, val);
958 return 0;
959 case IP_RECVOPTS:
960 inet_assign_bit(RECVOPTS, sk, val);
961 return 0;
962 case IP_RETOPTS:
963 inet_assign_bit(RETOPTS, sk, val);
964 return 0;
965 case IP_PASSSEC:
966 inet_assign_bit(PASSSEC, sk, val);
967 return 0;
968 case IP_RECVORIGDSTADDR:
969 inet_assign_bit(ORIGDSTADDR, sk, val);
970 return 0;
971 case IP_RECVFRAGSIZE:
972 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
973 return -EINVAL;
974 inet_assign_bit(RECVFRAGSIZE, sk, val);
975 return 0;
976 case IP_RECVERR:
977 inet_assign_bit(RECVERR, sk, val);
978 if (!val)
979 skb_errqueue_purge(&sk->sk_error_queue);
980 return 0;
981 case IP_RECVERR_RFC4884:
982 if (val < 0 || val > 1)
983 return -EINVAL;
984 inet_assign_bit(RECVERR_RFC4884, sk, val);
985 return 0;
986 case IP_FREEBIND:
987 if (optlen < 1)
988 return -EINVAL;
989 inet_assign_bit(FREEBIND, sk, val);
990 return 0;
991 case IP_HDRINCL:
992 if (sk->sk_type != SOCK_RAW)
993 return -ENOPROTOOPT;
994 inet_assign_bit(HDRINCL, sk, val);
995 return 0;
996 case IP_MULTICAST_LOOP:
997 if (optlen < 1)
998 return -EINVAL;
999 inet_assign_bit(MC_LOOP, sk, val);
1000 return 0;
1001 case IP_MULTICAST_ALL:
1002 if (optlen < 1)
1003 return -EINVAL;
1004 if (val != 0 && val != 1)
1005 return -EINVAL;
1006 inet_assign_bit(MC_ALL, sk, val);
1007 return 0;
1008 case IP_TRANSPARENT:
1009 if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1010 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1011 return -EPERM;
1012 if (optlen < 1)
1013 return -EINVAL;
1014 inet_assign_bit(TRANSPARENT, sk, val);
1015 return 0;
1016 case IP_NODEFRAG:
1017 if (sk->sk_type != SOCK_RAW)
1018 return -ENOPROTOOPT;
1019 inet_assign_bit(NODEFRAG, sk, val);
1020 return 0;
1021 case IP_BIND_ADDRESS_NO_PORT:
1022 inet_assign_bit(BIND_ADDRESS_NO_PORT, sk, val);
1023 return 0;
1024 case IP_TTL:
1025 if (optlen < 1)
1026 return -EINVAL;
1027 if (val != -1 && (val < 1 || val > 255))
1028 return -EINVAL;
1029 WRITE_ONCE(inet->uc_ttl, val);
1030 return 0;
1031 case IP_MINTTL:
1032 if (optlen < 1)
1033 return -EINVAL;
1034 if (val < 0 || val > 255)
1035 return -EINVAL;
1036
1037 if (val)
1038 static_branch_enable(&ip4_min_ttl);
1039
1040 WRITE_ONCE(inet->min_ttl, val);
1041 return 0;
1042 }
1043
1044 err = 0;
1045 if (needs_rtnl)
1046 rtnl_lock();
1047 sockopt_lock_sock(sk);
1048
1049 switch (optname) {
1050 case IP_OPTIONS:
1051 {
1052 struct ip_options_rcu *old, *opt = NULL;
1053
1054 if (optlen > 40)
1055 goto e_inval;
1056 err = ip_options_get(sock_net(sk), &opt, optval, optlen);
1057 if (err)
1058 break;
1059 old = rcu_dereference_protected(inet->inet_opt,
1060 lockdep_sock_is_held(sk));
1061 if (inet_test_bit(IS_ICSK, sk)) {
1062 struct inet_connection_sock *icsk = inet_csk(sk);
1063 #if IS_ENABLED(CONFIG_IPV6)
1064 if (sk->sk_family == PF_INET ||
1065 (!((1 << sk->sk_state) &
1066 (TCPF_LISTEN | TCPF_CLOSE)) &&
1067 inet->inet_daddr != LOOPBACK4_IPV6)) {
1068 #endif
1069 if (old)
1070 icsk->icsk_ext_hdr_len -= old->opt.optlen;
1071 if (opt)
1072 icsk->icsk_ext_hdr_len += opt->opt.optlen;
1073 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
1074 #if IS_ENABLED(CONFIG_IPV6)
1075 }
1076 #endif
1077 }
1078 rcu_assign_pointer(inet->inet_opt, opt);
1079 if (old)
1080 kfree_rcu(old, rcu);
1081 break;
1082 }
1083 case IP_CHECKSUM:
1084 if (val) {
1085 if (!(inet_test_bit(CHECKSUM, sk))) {
1086 inet_inc_convert_csum(sk);
1087 inet_set_bit(CHECKSUM, sk);
1088 }
1089 } else {
1090 if (inet_test_bit(CHECKSUM, sk)) {
1091 inet_dec_convert_csum(sk);
1092 inet_clear_bit(CHECKSUM, sk);
1093 }
1094 }
1095 break;
1096 case IP_TOS: /* This sets both TOS and Precedence */
1097 __ip_sock_set_tos(sk, val);
1098 break;
1099 case IP_MTU_DISCOVER:
1100 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
1101 goto e_inval;
1102 inet->pmtudisc = val;
1103 break;
1104 case IP_MULTICAST_TTL:
1105 if (sk->sk_type == SOCK_STREAM)
1106 goto e_inval;
1107 if (optlen < 1)
1108 goto e_inval;
1109 if (val == -1)
1110 val = 1;
1111 if (val < 0 || val > 255)
1112 goto e_inval;
1113 inet->mc_ttl = val;
1114 break;
1115 case IP_UNICAST_IF:
1116 {
1117 struct net_device *dev = NULL;
1118 int ifindex;
1119 int midx;
1120
1121 if (optlen != sizeof(int))
1122 goto e_inval;
1123
1124 ifindex = (__force int)ntohl((__force __be32)val);
1125 if (ifindex == 0) {
1126 inet->uc_index = 0;
1127 err = 0;
1128 break;
1129 }
1130
1131 dev = dev_get_by_index(sock_net(sk), ifindex);
1132 err = -EADDRNOTAVAIL;
1133 if (!dev)
1134 break;
1135
1136 midx = l3mdev_master_ifindex(dev);
1137 dev_put(dev);
1138
1139 err = -EINVAL;
1140 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
1141 break;
1142
1143 inet->uc_index = ifindex;
1144 err = 0;
1145 break;
1146 }
1147 case IP_MULTICAST_IF:
1148 {
1149 struct ip_mreqn mreq;
1150 struct net_device *dev = NULL;
1151 int midx;
1152
1153 if (sk->sk_type == SOCK_STREAM)
1154 goto e_inval;
1155 /*
1156 * Check the arguments are allowable
1157 */
1158
1159 if (optlen < sizeof(struct in_addr))
1160 goto e_inval;
1161
1162 err = -EFAULT;
1163 if (optlen >= sizeof(struct ip_mreqn)) {
1164 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1165 break;
1166 } else {
1167 memset(&mreq, 0, sizeof(mreq));
1168 if (optlen >= sizeof(struct ip_mreq)) {
1169 if (copy_from_sockptr(&mreq, optval,
1170 sizeof(struct ip_mreq)))
1171 break;
1172 } else if (optlen >= sizeof(struct in_addr)) {
1173 if (copy_from_sockptr(&mreq.imr_address, optval,
1174 sizeof(struct in_addr)))
1175 break;
1176 }
1177 }
1178
1179 if (!mreq.imr_ifindex) {
1180 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
1181 inet->mc_index = 0;
1182 inet->mc_addr = 0;
1183 err = 0;
1184 break;
1185 }
1186 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
1187 if (dev)
1188 mreq.imr_ifindex = dev->ifindex;
1189 } else
1190 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
1191
1192
1193 err = -EADDRNOTAVAIL;
1194 if (!dev)
1195 break;
1196
1197 midx = l3mdev_master_ifindex(dev);
1198
1199 dev_put(dev);
1200
1201 err = -EINVAL;
1202 if (sk->sk_bound_dev_if &&
1203 mreq.imr_ifindex != sk->sk_bound_dev_if &&
1204 midx != sk->sk_bound_dev_if)
1205 break;
1206
1207 inet->mc_index = mreq.imr_ifindex;
1208 inet->mc_addr = mreq.imr_address.s_addr;
1209 err = 0;
1210 break;
1211 }
1212
1213 case IP_ADD_MEMBERSHIP:
1214 case IP_DROP_MEMBERSHIP:
1215 {
1216 struct ip_mreqn mreq;
1217
1218 err = -EPROTO;
1219 if (inet_test_bit(IS_ICSK, sk))
1220 break;
1221
1222 if (optlen < sizeof(struct ip_mreq))
1223 goto e_inval;
1224 err = -EFAULT;
1225 if (optlen >= sizeof(struct ip_mreqn)) {
1226 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1227 break;
1228 } else {
1229 memset(&mreq, 0, sizeof(mreq));
1230 if (copy_from_sockptr(&mreq, optval,
1231 sizeof(struct ip_mreq)))
1232 break;
1233 }
1234
1235 if (optname == IP_ADD_MEMBERSHIP)
1236 err = ip_mc_join_group(sk, &mreq);
1237 else
1238 err = ip_mc_leave_group(sk, &mreq);
1239 break;
1240 }
1241 case IP_MSFILTER:
1242 {
1243 struct ip_msfilter *msf;
1244
1245 if (optlen < IP_MSFILTER_SIZE(0))
1246 goto e_inval;
1247 if (optlen > READ_ONCE(sysctl_optmem_max)) {
1248 err = -ENOBUFS;
1249 break;
1250 }
1251 msf = memdup_sockptr(optval, optlen);
1252 if (IS_ERR(msf)) {
1253 err = PTR_ERR(msf);
1254 break;
1255 }
1256 /* numsrc >= (1G-4) overflow in 32 bits */
1257 if (msf->imsf_numsrc >= 0x3ffffffcU ||
1258 msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
1259 kfree(msf);
1260 err = -ENOBUFS;
1261 break;
1262 }
1263 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
1264 kfree(msf);
1265 err = -EINVAL;
1266 break;
1267 }
1268 err = ip_mc_msfilter(sk, msf, 0);
1269 kfree(msf);
1270 break;
1271 }
1272 case IP_BLOCK_SOURCE:
1273 case IP_UNBLOCK_SOURCE:
1274 case IP_ADD_SOURCE_MEMBERSHIP:
1275 case IP_DROP_SOURCE_MEMBERSHIP:
1276 {
1277 struct ip_mreq_source mreqs;
1278 int omode, add;
1279
1280 if (optlen != sizeof(struct ip_mreq_source))
1281 goto e_inval;
1282 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
1283 err = -EFAULT;
1284 break;
1285 }
1286 if (optname == IP_BLOCK_SOURCE) {
1287 omode = MCAST_EXCLUDE;
1288 add = 1;
1289 } else if (optname == IP_UNBLOCK_SOURCE) {
1290 omode = MCAST_EXCLUDE;
1291 add = 0;
1292 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
1293 struct ip_mreqn mreq;
1294
1295 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
1296 mreq.imr_address.s_addr = mreqs.imr_interface;
1297 mreq.imr_ifindex = 0;
1298 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1299 if (err && err != -EADDRINUSE)
1300 break;
1301 omode = MCAST_INCLUDE;
1302 add = 1;
1303 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
1304 omode = MCAST_INCLUDE;
1305 add = 0;
1306 }
1307 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1308 break;
1309 }
1310 case MCAST_JOIN_GROUP:
1311 case MCAST_LEAVE_GROUP:
1312 if (in_compat_syscall())
1313 err = compat_ip_mcast_join_leave(sk, optname, optval,
1314 optlen);
1315 else
1316 err = ip_mcast_join_leave(sk, optname, optval, optlen);
1317 break;
1318 case MCAST_JOIN_SOURCE_GROUP:
1319 case MCAST_LEAVE_SOURCE_GROUP:
1320 case MCAST_BLOCK_SOURCE:
1321 case MCAST_UNBLOCK_SOURCE:
1322 err = do_mcast_group_source(sk, optname, optval, optlen);
1323 break;
1324 case MCAST_MSFILTER:
1325 if (in_compat_syscall())
1326 err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
1327 else
1328 err = ip_set_mcast_msfilter(sk, optval, optlen);
1329 break;
1330 case IP_IPSEC_POLICY:
1331 case IP_XFRM_POLICY:
1332 err = -EPERM;
1333 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1334 break;
1335 err = xfrm_user_policy(sk, optname, optval, optlen);
1336 break;
1337
1338 case IP_LOCAL_PORT_RANGE:
1339 {
1340 const __u16 lo = val;
1341 const __u16 hi = val >> 16;
1342
1343 if (optlen != sizeof(__u32))
1344 goto e_inval;
1345 if (lo != 0 && hi != 0 && lo > hi)
1346 goto e_inval;
1347
1348 inet->local_port_range.lo = lo;
1349 inet->local_port_range.hi = hi;
1350 break;
1351 }
1352 default:
1353 err = -ENOPROTOOPT;
1354 break;
1355 }
1356 sockopt_release_sock(sk);
1357 if (needs_rtnl)
1358 rtnl_unlock();
1359 return err;
1360
1361 e_inval:
1362 sockopt_release_sock(sk);
1363 if (needs_rtnl)
1364 rtnl_unlock();
1365 return -EINVAL;
1366 }
1367
1368 /**
1369 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1370 * @sk: socket
1371 * @skb: buffer
1372 * @drop_dst: if true, drops skb dst
1373 *
1374 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1375 * destination in skb->cb[] before dst drop.
1376 * This way, receiver doesn't make cache line misses to read rtable.
1377 */
ipv4_pktinfo_prepare(const struct sock * sk,struct sk_buff * skb,bool drop_dst)1378 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
1379 {
1380 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1381 bool prepare = inet_test_bit(PKTINFO, sk) ||
1382 ipv6_sk_rxinfo(sk);
1383
1384 if (prepare && skb_rtable(skb)) {
1385 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1386 * which has interface index (iif) as the first member of the
1387 * underlying inet{6}_skb_parm struct. This code then overlays
1388 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1389 * element so the iif is picked up from the prior IPCB. If iif
1390 * is the loopback interface, then return the sending interface
1391 * (e.g., process binds socket to eth0 for Tx which is
1392 * redirected to loopback in the rtable/dst).
1393 */
1394 struct rtable *rt = skb_rtable(skb);
1395 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1396
1397 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1398 pktinfo->ipi_ifindex = inet_iif(skb);
1399 else if (l3slave && rt && rt->rt_iif)
1400 pktinfo->ipi_ifindex = rt->rt_iif;
1401
1402 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1403 } else {
1404 pktinfo->ipi_ifindex = 0;
1405 pktinfo->ipi_spec_dst.s_addr = 0;
1406 }
1407 if (drop_dst)
1408 skb_dst_drop(skb);
1409 }
1410
ip_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1411 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1412 unsigned int optlen)
1413 {
1414 int err;
1415
1416 if (level != SOL_IP)
1417 return -ENOPROTOOPT;
1418
1419 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1420 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1421 if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
1422 optname < BPFILTER_IPT_SET_MAX)
1423 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
1424 #endif
1425 #ifdef CONFIG_NETFILTER
1426 /* we need to exclude all possible ENOPROTOOPTs except default case */
1427 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1428 optname != IP_IPSEC_POLICY &&
1429 optname != IP_XFRM_POLICY &&
1430 !ip_mroute_opt(optname))
1431 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1432 #endif
1433 return err;
1434 }
1435 EXPORT_SYMBOL(ip_setsockopt);
1436
1437 /*
1438 * Get the options. Note for future reference. The GET of IP options gets
1439 * the _received_ ones. The set sets the _sent_ ones.
1440 */
1441
getsockopt_needs_rtnl(int optname)1442 static bool getsockopt_needs_rtnl(int optname)
1443 {
1444 switch (optname) {
1445 case IP_MSFILTER:
1446 case MCAST_MSFILTER:
1447 return true;
1448 }
1449 return false;
1450 }
1451
ip_get_mcast_msfilter(struct sock * sk,sockptr_t optval,sockptr_t optlen,int len)1452 static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1453 sockptr_t optlen, int len)
1454 {
1455 const int size0 = offsetof(struct group_filter, gf_slist_flex);
1456 struct group_filter gsf;
1457 int num, gsf_size;
1458 int err;
1459
1460 if (len < size0)
1461 return -EINVAL;
1462 if (copy_from_sockptr(&gsf, optval, size0))
1463 return -EFAULT;
1464
1465 num = gsf.gf_numsrc;
1466 err = ip_mc_gsfget(sk, &gsf, optval,
1467 offsetof(struct group_filter, gf_slist_flex));
1468 if (err)
1469 return err;
1470 if (gsf.gf_numsrc < num)
1471 num = gsf.gf_numsrc;
1472 gsf_size = GROUP_FILTER_SIZE(num);
1473 if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) ||
1474 copy_to_sockptr(optval, &gsf, size0))
1475 return -EFAULT;
1476 return 0;
1477 }
1478
compat_ip_get_mcast_msfilter(struct sock * sk,sockptr_t optval,sockptr_t optlen,int len)1479 static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1480 sockptr_t optlen, int len)
1481 {
1482 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
1483 struct compat_group_filter gf32;
1484 struct group_filter gf;
1485 int num;
1486 int err;
1487
1488 if (len < size0)
1489 return -EINVAL;
1490 if (copy_from_sockptr(&gf32, optval, size0))
1491 return -EFAULT;
1492
1493 gf.gf_interface = gf32.gf_interface;
1494 gf.gf_fmode = gf32.gf_fmode;
1495 num = gf.gf_numsrc = gf32.gf_numsrc;
1496 gf.gf_group = gf32.gf_group;
1497
1498 err = ip_mc_gsfget(sk, &gf, optval,
1499 offsetof(struct compat_group_filter, gf_slist_flex));
1500 if (err)
1501 return err;
1502 if (gf.gf_numsrc < num)
1503 num = gf.gf_numsrc;
1504 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
1505 if (copy_to_sockptr(optlen, &len, sizeof(int)) ||
1506 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode),
1507 &gf.gf_fmode, sizeof(gf.gf_fmode)) ||
1508 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc),
1509 &gf.gf_numsrc, sizeof(gf.gf_numsrc)))
1510 return -EFAULT;
1511 return 0;
1512 }
1513
do_ip_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen)1514 int do_ip_getsockopt(struct sock *sk, int level, int optname,
1515 sockptr_t optval, sockptr_t optlen)
1516 {
1517 struct inet_sock *inet = inet_sk(sk);
1518 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1519 int val, err = 0;
1520 int len;
1521
1522 if (level != SOL_IP)
1523 return -EOPNOTSUPP;
1524
1525 if (ip_mroute_opt(optname))
1526 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1527
1528 if (copy_from_sockptr(&len, optlen, sizeof(int)))
1529 return -EFAULT;
1530 if (len < 0)
1531 return -EINVAL;
1532
1533 /* Handle options that can be read without locking the socket. */
1534 switch (optname) {
1535 case IP_PKTINFO:
1536 val = inet_test_bit(PKTINFO, sk);
1537 goto copyval;
1538 case IP_RECVTTL:
1539 val = inet_test_bit(TTL, sk);
1540 goto copyval;
1541 case IP_RECVTOS:
1542 val = inet_test_bit(TOS, sk);
1543 goto copyval;
1544 case IP_RECVOPTS:
1545 val = inet_test_bit(RECVOPTS, sk);
1546 goto copyval;
1547 case IP_RETOPTS:
1548 val = inet_test_bit(RETOPTS, sk);
1549 goto copyval;
1550 case IP_PASSSEC:
1551 val = inet_test_bit(PASSSEC, sk);
1552 goto copyval;
1553 case IP_RECVORIGDSTADDR:
1554 val = inet_test_bit(ORIGDSTADDR, sk);
1555 goto copyval;
1556 case IP_CHECKSUM:
1557 val = inet_test_bit(CHECKSUM, sk);
1558 goto copyval;
1559 case IP_RECVFRAGSIZE:
1560 val = inet_test_bit(RECVFRAGSIZE, sk);
1561 goto copyval;
1562 case IP_RECVERR:
1563 val = inet_test_bit(RECVERR, sk);
1564 goto copyval;
1565 case IP_RECVERR_RFC4884:
1566 val = inet_test_bit(RECVERR_RFC4884, sk);
1567 goto copyval;
1568 case IP_FREEBIND:
1569 val = inet_test_bit(FREEBIND, sk);
1570 goto copyval;
1571 case IP_HDRINCL:
1572 val = inet_test_bit(HDRINCL, sk);
1573 goto copyval;
1574 case IP_MULTICAST_LOOP:
1575 val = inet_test_bit(MC_LOOP, sk);
1576 goto copyval;
1577 case IP_MULTICAST_ALL:
1578 val = inet_test_bit(MC_ALL, sk);
1579 goto copyval;
1580 case IP_TRANSPARENT:
1581 val = inet_test_bit(TRANSPARENT, sk);
1582 goto copyval;
1583 case IP_NODEFRAG:
1584 val = inet_test_bit(NODEFRAG, sk);
1585 goto copyval;
1586 case IP_BIND_ADDRESS_NO_PORT:
1587 val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
1588 goto copyval;
1589 case IP_TTL:
1590 val = READ_ONCE(inet->uc_ttl);
1591 if (val < 0)
1592 val = READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_default_ttl);
1593 goto copyval;
1594 case IP_MINTTL:
1595 val = READ_ONCE(inet->min_ttl);
1596 goto copyval;
1597 }
1598
1599 if (needs_rtnl)
1600 rtnl_lock();
1601 sockopt_lock_sock(sk);
1602
1603 switch (optname) {
1604 case IP_OPTIONS:
1605 {
1606 unsigned char optbuf[sizeof(struct ip_options)+40];
1607 struct ip_options *opt = (struct ip_options *)optbuf;
1608 struct ip_options_rcu *inet_opt;
1609
1610 inet_opt = rcu_dereference_protected(inet->inet_opt,
1611 lockdep_sock_is_held(sk));
1612 opt->optlen = 0;
1613 if (inet_opt)
1614 memcpy(optbuf, &inet_opt->opt,
1615 sizeof(struct ip_options) +
1616 inet_opt->opt.optlen);
1617 sockopt_release_sock(sk);
1618
1619 if (opt->optlen == 0) {
1620 len = 0;
1621 return copy_to_sockptr(optlen, &len, sizeof(int));
1622 }
1623
1624 ip_options_undo(opt);
1625
1626 len = min_t(unsigned int, len, opt->optlen);
1627 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1628 return -EFAULT;
1629 if (copy_to_sockptr(optval, opt->__data, len))
1630 return -EFAULT;
1631 return 0;
1632 }
1633 case IP_TOS:
1634 val = inet->tos;
1635 break;
1636 case IP_MTU_DISCOVER:
1637 val = inet->pmtudisc;
1638 break;
1639 case IP_MTU:
1640 {
1641 struct dst_entry *dst;
1642 val = 0;
1643 dst = sk_dst_get(sk);
1644 if (dst) {
1645 val = dst_mtu(dst);
1646 dst_release(dst);
1647 }
1648 if (!val) {
1649 sockopt_release_sock(sk);
1650 return -ENOTCONN;
1651 }
1652 break;
1653 }
1654 case IP_MULTICAST_TTL:
1655 val = inet->mc_ttl;
1656 break;
1657 case IP_UNICAST_IF:
1658 val = (__force int)htonl((__u32) inet->uc_index);
1659 break;
1660 case IP_MULTICAST_IF:
1661 {
1662 struct in_addr addr;
1663 len = min_t(unsigned int, len, sizeof(struct in_addr));
1664 addr.s_addr = inet->mc_addr;
1665 sockopt_release_sock(sk);
1666
1667 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1668 return -EFAULT;
1669 if (copy_to_sockptr(optval, &addr, len))
1670 return -EFAULT;
1671 return 0;
1672 }
1673 case IP_MSFILTER:
1674 {
1675 struct ip_msfilter msf;
1676
1677 if (len < IP_MSFILTER_SIZE(0)) {
1678 err = -EINVAL;
1679 goto out;
1680 }
1681 if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) {
1682 err = -EFAULT;
1683 goto out;
1684 }
1685 err = ip_mc_msfget(sk, &msf, optval, optlen);
1686 goto out;
1687 }
1688 case MCAST_MSFILTER:
1689 if (in_compat_syscall())
1690 err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
1691 len);
1692 else
1693 err = ip_get_mcast_msfilter(sk, optval, optlen, len);
1694 goto out;
1695 case IP_PKTOPTIONS:
1696 {
1697 struct msghdr msg;
1698
1699 sockopt_release_sock(sk);
1700
1701 if (sk->sk_type != SOCK_STREAM)
1702 return -ENOPROTOOPT;
1703
1704 if (optval.is_kernel) {
1705 msg.msg_control_is_user = false;
1706 msg.msg_control = optval.kernel;
1707 } else {
1708 msg.msg_control_is_user = true;
1709 msg.msg_control_user = optval.user;
1710 }
1711 msg.msg_controllen = len;
1712 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
1713
1714 if (inet_test_bit(PKTINFO, sk)) {
1715 struct in_pktinfo info;
1716
1717 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1718 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1719 info.ipi_ifindex = inet->mc_index;
1720 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1721 }
1722 if (inet_test_bit(TTL, sk)) {
1723 int hlim = inet->mc_ttl;
1724 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1725 }
1726 if (inet_test_bit(TOS, sk)) {
1727 int tos = inet->rcv_tos;
1728 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1729 }
1730 len -= msg.msg_controllen;
1731 return copy_to_sockptr(optlen, &len, sizeof(int));
1732 }
1733 case IP_LOCAL_PORT_RANGE:
1734 val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
1735 break;
1736 case IP_PROTOCOL:
1737 val = inet_sk(sk)->inet_num;
1738 break;
1739 default:
1740 sockopt_release_sock(sk);
1741 return -ENOPROTOOPT;
1742 }
1743 sockopt_release_sock(sk);
1744 copyval:
1745 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1746 unsigned char ucval = (unsigned char)val;
1747 len = 1;
1748 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1749 return -EFAULT;
1750 if (copy_to_sockptr(optval, &ucval, 1))
1751 return -EFAULT;
1752 } else {
1753 len = min_t(unsigned int, sizeof(int), len);
1754 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1755 return -EFAULT;
1756 if (copy_to_sockptr(optval, &val, len))
1757 return -EFAULT;
1758 }
1759 return 0;
1760
1761 out:
1762 sockopt_release_sock(sk);
1763 if (needs_rtnl)
1764 rtnl_unlock();
1765 return err;
1766 }
1767
ip_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1768 int ip_getsockopt(struct sock *sk, int level,
1769 int optname, char __user *optval, int __user *optlen)
1770 {
1771 int err;
1772
1773 err = do_ip_getsockopt(sk, level, optname,
1774 USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
1775
1776 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1777 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1778 optname < BPFILTER_IPT_GET_MAX)
1779 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
1780 #endif
1781 #ifdef CONFIG_NETFILTER
1782 /* we need to exclude all possible ENOPROTOOPTs except default case */
1783 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1784 !ip_mroute_opt(optname)) {
1785 int len;
1786
1787 if (get_user(len, optlen))
1788 return -EFAULT;
1789
1790 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1791 if (err >= 0)
1792 err = put_user(len, optlen);
1793 return err;
1794 }
1795 #endif
1796 return err;
1797 }
1798 EXPORT_SYMBOL(ip_getsockopt);
1799