1 /*
2 * RAW sockets for IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <pedro_m@yahoo.com>
7 *
8 * Adapted from linux/net/ipv4/raw.c
9 *
10 * $Id: raw.c,v 1.50.2.1 2002/03/05 12:47:34 davem Exp $
11 *
12 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 */
21
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/sched.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <asm/uaccess.h>
33 #include <asm/ioctls.h>
34
35 #include <net/sock.h>
36 #include <net/snmp.h>
37
38 #include <net/ipv6.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/ip6_route.h>
42 #include <net/addrconf.h>
43 #include <net/transp_v6.h>
44 #include <net/udp.h>
45 #include <net/inet_common.h>
46
47 #include <net/rawv6.h>
48
49 struct sock *raw_v6_htable[RAWV6_HTABLE_SIZE];
50 rwlock_t raw_v6_lock = RW_LOCK_UNLOCKED;
51
raw_v6_hash(struct sock * sk)52 static void raw_v6_hash(struct sock *sk)
53 {
54 struct sock **skp = &raw_v6_htable[sk->num & (RAWV6_HTABLE_SIZE - 1)];
55
56 write_lock_bh(&raw_v6_lock);
57 if ((sk->next = *skp) != NULL)
58 (*skp)->pprev = &sk->next;
59 *skp = sk;
60 sk->pprev = skp;
61 sock_prot_inc_use(sk->prot);
62 sock_hold(sk);
63 write_unlock_bh(&raw_v6_lock);
64 }
65
raw_v6_unhash(struct sock * sk)66 static void raw_v6_unhash(struct sock *sk)
67 {
68 write_lock_bh(&raw_v6_lock);
69 if (sk->pprev) {
70 if (sk->next)
71 sk->next->pprev = sk->pprev;
72 *sk->pprev = sk->next;
73 sk->pprev = NULL;
74 sock_prot_dec_use(sk->prot);
75 __sock_put(sk);
76 }
77 write_unlock_bh(&raw_v6_lock);
78 }
79
80
81 /* Grumble... icmp and ip_input want to get at this... */
__raw_v6_lookup(struct sock * sk,unsigned short num,struct in6_addr * loc_addr,struct in6_addr * rmt_addr)82 struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
83 struct in6_addr *loc_addr, struct in6_addr *rmt_addr)
84 {
85 struct sock *s = sk;
86 int addr_type = ipv6_addr_type(loc_addr);
87
88 for(s = sk; s; s = s->next) {
89 if(s->num == num) {
90 struct ipv6_pinfo *np = &s->net_pinfo.af_inet6;
91
92 if (!ipv6_addr_any(&np->daddr) &&
93 ipv6_addr_cmp(&np->daddr, rmt_addr))
94 continue;
95
96 if (!ipv6_addr_any(&np->rcv_saddr)) {
97 if (ipv6_addr_cmp(&np->rcv_saddr, loc_addr) == 0)
98 break;
99 if ((addr_type & IPV6_ADDR_MULTICAST) &&
100 inet6_mc_check(s, loc_addr, rmt_addr))
101 break;
102 continue;
103 }
104 break;
105 }
106 }
107 return s;
108 }
109
110 /*
111 * 0 - deliver
112 * 1 - block
113 */
icmpv6_filter(struct sock * sk,struct sk_buff * skb)114 static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
115 {
116 struct icmp6hdr *icmph;
117 struct raw6_opt *opt;
118
119 opt = &sk->tp_pinfo.tp_raw;
120 if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
121 __u32 *data = &opt->filter.data[0];
122 int bit_nr;
123
124 icmph = (struct icmp6hdr *) skb->data;
125 bit_nr = icmph->icmp6_type;
126
127 return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
128 }
129 return 0;
130 }
131
132 /*
133 * demultiplex raw sockets.
134 * (should consider queueing the skb in the sock receive_queue
135 * without calling rawv6.c)
136 */
ipv6_raw_deliver(struct sk_buff * skb,int nexthdr)137 struct sock * ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
138 {
139 struct in6_addr *saddr;
140 struct in6_addr *daddr;
141 struct sock *sk, *sk2;
142 __u8 hash;
143
144 saddr = &skb->nh.ipv6h->saddr;
145 daddr = saddr + 1;
146
147 hash = nexthdr & (MAX_INET_PROTOS - 1);
148
149 read_lock(&raw_v6_lock);
150 sk = raw_v6_htable[hash];
151
152 /*
153 * The first socket found will be delivered after
154 * delivery to transport protocols.
155 */
156
157 if (sk == NULL)
158 goto out;
159
160 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr);
161
162 if (sk) {
163 sk2 = sk;
164
165 while ((sk2 = __raw_v6_lookup(sk2->next, nexthdr, daddr, saddr))) {
166 struct sk_buff *buff;
167
168 if (nexthdr == IPPROTO_ICMPV6 &&
169 icmpv6_filter(sk2, skb))
170 continue;
171
172 buff = skb_clone(skb, GFP_ATOMIC);
173 if (buff)
174 rawv6_rcv(sk2, buff);
175 }
176 }
177
178 if (sk && nexthdr == IPPROTO_ICMPV6 && icmpv6_filter(sk, skb))
179 sk = NULL;
180
181 out:
182 if (sk)
183 sock_hold(sk);
184 read_unlock(&raw_v6_lock);
185 return sk;
186 }
187
188 /* This cleans up af_inet6 a bit. -DaveM */
rawv6_bind(struct sock * sk,struct sockaddr * uaddr,int addr_len)189 static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
190 {
191 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
192 __u32 v4addr = 0;
193 int addr_type;
194 int err;
195
196 if (addr_len < SIN6_LEN_RFC2133)
197 return -EINVAL;
198 addr_type = ipv6_addr_type(&addr->sin6_addr);
199
200 /* Raw sockets are IPv6 only */
201 if (addr_type == IPV6_ADDR_MAPPED)
202 return(-EADDRNOTAVAIL);
203
204 lock_sock(sk);
205
206 err = -EINVAL;
207 if (sk->state != TCP_CLOSE)
208 goto out;
209
210 if (addr_type & IPV6_ADDR_LINKLOCAL) {
211 if (addr_len >= sizeof(struct sockaddr_in6) &&
212 addr->sin6_scope_id) {
213 /* Override any existing binding, if another one
214 * is supplied by user.
215 */
216 sk->bound_dev_if = addr->sin6_scope_id;
217 }
218
219 /* Binding to link-local address requires an interface */
220 if (sk->bound_dev_if == 0)
221 goto out;
222 }
223
224 /* Check if the address belongs to the host. */
225 if (addr_type != IPV6_ADDR_ANY) {
226 /* ipv4 addr of the socket is invalid. Only the
227 * unpecified and mapped address have a v4 equivalent.
228 */
229 v4addr = LOOPBACK4_IPV6;
230 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
231 err = -EADDRNOTAVAIL;
232 if (!ipv6_chk_addr(&addr->sin6_addr, NULL))
233 goto out;
234 }
235 }
236
237 sk->rcv_saddr = v4addr;
238 sk->saddr = v4addr;
239 ipv6_addr_copy(&sk->net_pinfo.af_inet6.rcv_saddr, &addr->sin6_addr);
240 if (!(addr_type & IPV6_ADDR_MULTICAST))
241 ipv6_addr_copy(&sk->net_pinfo.af_inet6.saddr, &addr->sin6_addr);
242 err = 0;
243 out:
244 release_sock(sk);
245 return err;
246 }
247
rawv6_err(struct sock * sk,struct sk_buff * skb,struct inet6_skb_parm * opt,int type,int code,int offset,u32 info)248 void rawv6_err(struct sock *sk, struct sk_buff *skb,
249 struct inet6_skb_parm *opt,
250 int type, int code, int offset, u32 info)
251 {
252 int err;
253 int harderr;
254
255 /* Report error on raw socket, if:
256 1. User requested recverr.
257 2. Socket is connected (otherwise the error indication
258 is useless without recverr and error is hard.
259 */
260 if (!sk->net_pinfo.af_inet6.recverr && sk->state != TCP_ESTABLISHED)
261 return;
262
263 harderr = icmpv6_err_convert(type, code, &err);
264 if (type == ICMPV6_PKT_TOOBIG)
265 harderr = (sk->net_pinfo.af_inet6.pmtudisc == IPV6_PMTUDISC_DO);
266
267 if (sk->net_pinfo.af_inet6.recverr) {
268 u8 *payload = skb->data;
269 if (!sk->protinfo.af_inet.hdrincl)
270 payload += offset;
271 ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
272 }
273
274 if (sk->net_pinfo.af_inet6.recverr || harderr) {
275 sk->err = err;
276 sk->error_report(sk);
277 }
278 }
279
rawv6_rcv_skb(struct sock * sk,struct sk_buff * skb)280 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
281 {
282 if ((sk->tp_pinfo.tp_raw.checksum
283 #if defined(CONFIG_FILTER)
284 || sk->filter
285 #endif
286 ) && skb->ip_summed != CHECKSUM_UNNECESSARY) {
287 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
288 IP6_INC_STATS_BH(Ip6InDiscards);
289 kfree_skb(skb);
290 return 0;
291 }
292 skb->ip_summed = CHECKSUM_UNNECESSARY;
293 }
294 /* Charge it to the socket. */
295 if (sock_queue_rcv_skb(sk,skb)<0) {
296 IP6_INC_STATS_BH(Ip6InDiscards);
297 kfree_skb(skb);
298 return 0;
299 }
300
301 IP6_INC_STATS_BH(Ip6InDelivers);
302 return 0;
303 }
304
305 /*
306 * This is next to useless...
307 * if we demultiplex in network layer we don't need the extra call
308 * just to queue the skb...
309 * maybe we could have the network decide uppon a hint if it
310 * should call raw_rcv for demultiplexing
311 */
rawv6_rcv(struct sock * sk,struct sk_buff * skb)312 int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
313 {
314 if (!sk->tp_pinfo.tp_raw.checksum)
315 skb->ip_summed = CHECKSUM_UNNECESSARY;
316
317 if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
318 if (skb->ip_summed == CHECKSUM_HW) {
319 skb->ip_summed = CHECKSUM_UNNECESSARY;
320 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
321 &skb->nh.ipv6h->daddr,
322 skb->len, sk->num, skb->csum)) {
323 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "raw v6 hw csum failure.\n"));
324 skb->ip_summed = CHECKSUM_NONE;
325 }
326 }
327 if (skb->ip_summed == CHECKSUM_NONE)
328 skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
329 &skb->nh.ipv6h->daddr,
330 skb->len, sk->num, 0);
331 }
332
333 if (sk->protinfo.af_inet.hdrincl) {
334 if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
335 (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
336 IP6_INC_STATS_BH(Ip6InDiscards);
337 kfree_skb(skb);
338 return 0;
339 }
340 skb->ip_summed = CHECKSUM_UNNECESSARY;
341 }
342
343 rawv6_rcv_skb(sk, skb);
344 return 0;
345 }
346
347
348 /*
349 * This should be easy, if there is something there
350 * we return it, otherwise we block.
351 */
352
rawv6_recvmsg(struct sock * sk,struct msghdr * msg,int len,int noblock,int flags,int * addr_len)353 int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
354 int noblock, int flags, int *addr_len)
355 {
356 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
357 struct sk_buff *skb;
358 int copied, err;
359
360 if (flags & MSG_OOB)
361 return -EOPNOTSUPP;
362
363 if (addr_len)
364 *addr_len=sizeof(*sin6);
365
366 if (flags & MSG_ERRQUEUE)
367 return ipv6_recv_error(sk, msg, len);
368
369 skb = skb_recv_datagram(sk, flags, noblock, &err);
370 if (!skb)
371 goto out;
372
373 copied = skb->len;
374 if (copied > len) {
375 copied = len;
376 msg->msg_flags |= MSG_TRUNC;
377 }
378
379 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
380 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
381 } else if (msg->msg_flags&MSG_TRUNC) {
382 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
383 goto csum_copy_err;
384 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
385 } else {
386 err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
387 if (err == -EINVAL)
388 goto csum_copy_err;
389 }
390 if (err)
391 goto out_free;
392
393 /* Copy the address. */
394 if (sin6) {
395 sin6->sin6_family = AF_INET6;
396 memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
397 sizeof(struct in6_addr));
398 sin6->sin6_flowinfo = 0;
399 sin6->sin6_scope_id = 0;
400 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
401 struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
402 sin6->sin6_scope_id = opt->iif;
403 }
404 }
405
406 sock_recv_timestamp(msg, sk, skb);
407
408 if (sk->net_pinfo.af_inet6.rxopt.all)
409 datagram_recv_ctl(sk, msg, skb);
410
411 err = copied;
412 if (flags & MSG_TRUNC)
413 err = skb->len;
414
415 out_free:
416 skb_free_datagram(sk, skb);
417 out:
418 return err;
419
420 csum_copy_err:
421 /* Clear queue. */
422 if (flags&MSG_PEEK) {
423 int clear = 0;
424 spin_lock_irq(&sk->receive_queue.lock);
425 if (skb == skb_peek(&sk->receive_queue)) {
426 __skb_unlink(skb, &sk->receive_queue);
427 clear = 1;
428 }
429 spin_unlock_irq(&sk->receive_queue.lock);
430 if (clear)
431 kfree_skb(skb);
432 }
433
434 /* Error for blocking case is chosen to masquerade
435 as some normal condition.
436 */
437 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
438 IP6_INC_STATS_USER(Ip6InDiscards);
439 goto out_free;
440 }
441
442 /*
443 * Sending...
444 */
445
446 struct rawv6_fakehdr {
447 struct iovec *iov;
448 struct sock *sk;
449 __u32 len;
450 __u32 cksum;
451 __u32 proto;
452 struct in6_addr *daddr;
453 };
454
rawv6_getfrag(const void * data,struct in6_addr * saddr,char * buff,unsigned int offset,unsigned int len)455 static int rawv6_getfrag(const void *data, struct in6_addr *saddr,
456 char *buff, unsigned int offset, unsigned int len)
457 {
458 struct iovec *iov = (struct iovec *) data;
459
460 return memcpy_fromiovecend(buff, iov, offset, len);
461 }
462
rawv6_frag_cksum(const void * data,struct in6_addr * addr,char * buff,unsigned int offset,unsigned int len)463 static int rawv6_frag_cksum(const void *data, struct in6_addr *addr,
464 char *buff, unsigned int offset,
465 unsigned int len)
466 {
467 struct rawv6_fakehdr *hdr = (struct rawv6_fakehdr *) data;
468
469 if (csum_partial_copy_fromiovecend(buff, hdr->iov, offset,
470 len, &hdr->cksum))
471 return -EFAULT;
472
473 if (offset == 0) {
474 struct sock *sk;
475 struct raw6_opt *opt;
476 struct in6_addr *daddr;
477
478 sk = hdr->sk;
479 opt = &sk->tp_pinfo.tp_raw;
480
481 if (hdr->daddr)
482 daddr = hdr->daddr;
483 else
484 daddr = addr + 1;
485
486 hdr->cksum = csum_ipv6_magic(addr, daddr, hdr->len,
487 hdr->proto, hdr->cksum);
488
489 if (opt->offset + 1 < len) {
490 __u16 *csum;
491
492 csum = (__u16 *) (buff + opt->offset);
493 if (*csum) {
494 /* in case cksum was not initialized */
495 __u32 sum = hdr->cksum;
496 sum += *csum;
497 *csum = hdr->cksum = (sum + (sum>>16));
498 } else {
499 *csum = hdr->cksum;
500 }
501 } else {
502 if (net_ratelimit())
503 printk(KERN_DEBUG "icmp: cksum offset too big\n");
504 return -EINVAL;
505 }
506 }
507 return 0;
508 }
509
510
rawv6_sendmsg(struct sock * sk,struct msghdr * msg,int len)511 static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
512 {
513 struct ipv6_txoptions opt_space;
514 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
515 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
516 struct ipv6_txoptions *opt = NULL;
517 struct ip6_flowlabel *flowlabel = NULL;
518 struct flowi fl;
519 int addr_len = msg->msg_namelen;
520 struct in6_addr *daddr;
521 struct raw6_opt *raw_opt;
522 int hlimit = -1;
523 u16 proto;
524 int err;
525
526 /* Rough check on arithmetic overflow,
527 better check is made in ip6_build_xmit
528 */
529 if (len < 0)
530 return -EMSGSIZE;
531
532 /* Mirror BSD error message compatibility */
533 if (msg->msg_flags & MSG_OOB)
534 return -EOPNOTSUPP;
535
536 /*
537 * Get and verify the address.
538 */
539
540 fl.fl6_flowlabel = 0;
541 fl.oif = 0;
542
543 if (sin6) {
544 if (addr_len < SIN6_LEN_RFC2133)
545 return -EINVAL;
546
547 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
548 return(-EINVAL);
549
550 /* port is the proto value [0..255] carried in nexthdr */
551 proto = ntohs(sin6->sin6_port);
552
553 if (!proto)
554 proto = sk->num;
555
556 if (proto > 255)
557 return(-EINVAL);
558
559 daddr = &sin6->sin6_addr;
560 if (np->sndflow) {
561 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
562 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
563 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
564 if (flowlabel == NULL)
565 return -EINVAL;
566 daddr = &flowlabel->dst;
567 }
568 }
569
570 /* Otherwise it will be difficult to maintain sk->dst_cache. */
571 if (sk->state == TCP_ESTABLISHED &&
572 !ipv6_addr_cmp(daddr, &sk->net_pinfo.af_inet6.daddr))
573 daddr = &sk->net_pinfo.af_inet6.daddr;
574
575 if (addr_len >= sizeof(struct sockaddr_in6) &&
576 sin6->sin6_scope_id &&
577 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
578 fl.oif = sin6->sin6_scope_id;
579 } else {
580 if (sk->state != TCP_ESTABLISHED)
581 return -EDESTADDRREQ;
582
583 proto = sk->num;
584 daddr = &(sk->net_pinfo.af_inet6.daddr);
585 fl.fl6_flowlabel = np->flow_label;
586 }
587
588 if (ipv6_addr_any(daddr)) {
589 /*
590 * unspecfied destination address
591 * treated as error... is this correct ?
592 */
593 return(-EINVAL);
594 }
595
596 if (fl.oif == 0)
597 fl.oif = sk->bound_dev_if;
598 fl.fl6_src = NULL;
599
600 if (msg->msg_controllen) {
601 opt = &opt_space;
602 memset(opt, 0, sizeof(struct ipv6_txoptions));
603
604 err = datagram_send_ctl(msg, &fl, opt, &hlimit);
605 if (err < 0) {
606 fl6_sock_release(flowlabel);
607 return err;
608 }
609 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
610 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
611 if (flowlabel == NULL)
612 return -EINVAL;
613 }
614 if (!(opt->opt_nflen|opt->opt_flen))
615 opt = NULL;
616 }
617 if (opt == NULL)
618 opt = np->opt;
619 if (flowlabel)
620 opt = fl6_merge_options(&opt_space, flowlabel, opt);
621
622 raw_opt = &sk->tp_pinfo.tp_raw;
623
624 fl.proto = proto;
625 fl.fl6_dst = daddr;
626 if (fl.fl6_src == NULL && !ipv6_addr_any(&np->saddr))
627 fl.fl6_src = &np->saddr;
628 fl.uli_u.icmpt.type = 0;
629 fl.uli_u.icmpt.code = 0;
630
631 if (raw_opt->checksum) {
632 struct rawv6_fakehdr hdr;
633
634 hdr.iov = msg->msg_iov;
635 hdr.sk = sk;
636 hdr.len = len;
637 hdr.cksum = 0;
638 hdr.proto = proto;
639
640 if (opt && opt->srcrt)
641 hdr.daddr = daddr;
642 else
643 hdr.daddr = NULL;
644
645 err = ip6_build_xmit(sk, rawv6_frag_cksum, &hdr, &fl, len,
646 opt, hlimit, msg->msg_flags);
647 } else {
648 err = ip6_build_xmit(sk, rawv6_getfrag, msg->msg_iov, &fl, len,
649 opt, hlimit, msg->msg_flags);
650 }
651
652 fl6_sock_release(flowlabel);
653
654 return err<0?err:len;
655 }
656
rawv6_seticmpfilter(struct sock * sk,int level,int optname,char * optval,int optlen)657 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
658 char *optval, int optlen)
659 {
660 switch (optname) {
661 case ICMPV6_FILTER:
662 if (optlen > sizeof(struct icmp6_filter))
663 optlen = sizeof(struct icmp6_filter);
664 if (copy_from_user(&sk->tp_pinfo.tp_raw.filter, optval, optlen))
665 return -EFAULT;
666 return 0;
667 default:
668 return -ENOPROTOOPT;
669 };
670
671 return 0;
672 }
673
rawv6_geticmpfilter(struct sock * sk,int level,int optname,char * optval,int * optlen)674 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
675 char *optval, int *optlen)
676 {
677 int len;
678
679 switch (optname) {
680 case ICMPV6_FILTER:
681 if (get_user(len, optlen))
682 return -EFAULT;
683 if (len < 0)
684 return -EINVAL;
685 if (len > sizeof(struct icmp6_filter))
686 len = sizeof(struct icmp6_filter);
687 if (put_user(len, optlen))
688 return -EFAULT;
689 if (copy_to_user(optval, &sk->tp_pinfo.tp_raw.filter, len))
690 return -EFAULT;
691 return 0;
692 default:
693 return -ENOPROTOOPT;
694 };
695
696 return 0;
697 }
698
699
rawv6_setsockopt(struct sock * sk,int level,int optname,char * optval,int optlen)700 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
701 char *optval, int optlen)
702 {
703 struct raw6_opt *opt = &sk->tp_pinfo.tp_raw;
704 int val;
705
706 switch(level) {
707 case SOL_RAW:
708 break;
709
710 case SOL_ICMPV6:
711 if (sk->num != IPPROTO_ICMPV6)
712 return -EOPNOTSUPP;
713 return rawv6_seticmpfilter(sk, level, optname, optval,
714 optlen);
715 case SOL_IPV6:
716 if (optname == IPV6_CHECKSUM)
717 break;
718 default:
719 return ipv6_setsockopt(sk, level, optname, optval,
720 optlen);
721 };
722
723 if (get_user(val, (int *)optval))
724 return -EFAULT;
725
726 switch (optname) {
727 case IPV6_CHECKSUM:
728 /* You may get strange result with a positive odd offset;
729 RFC2292bis agrees with me. */
730 if (val > 0 && (val&1))
731 return(-EINVAL);
732 if (val < 0) {
733 opt->checksum = 0;
734 } else {
735 opt->checksum = 1;
736 opt->offset = val;
737 }
738
739 return 0;
740 break;
741
742 default:
743 return(-ENOPROTOOPT);
744 }
745 }
746
rawv6_getsockopt(struct sock * sk,int level,int optname,char * optval,int * optlen)747 static int rawv6_getsockopt(struct sock *sk, int level, int optname,
748 char *optval, int *optlen)
749 {
750 struct raw6_opt *opt = &sk->tp_pinfo.tp_raw;
751 int val, len;
752
753 switch(level) {
754 case SOL_RAW:
755 break;
756
757 case SOL_ICMPV6:
758 if (sk->num != IPPROTO_ICMPV6)
759 return -EOPNOTSUPP;
760 return rawv6_geticmpfilter(sk, level, optname, optval,
761 optlen);
762 case SOL_IPV6:
763 if (optname == IPV6_CHECKSUM)
764 break;
765 default:
766 return ipv6_getsockopt(sk, level, optname, optval,
767 optlen);
768 };
769
770 if (get_user(len,optlen))
771 return -EFAULT;
772
773 switch (optname) {
774 case IPV6_CHECKSUM:
775 if (opt->checksum == 0)
776 val = -1;
777 else
778 val = opt->offset;
779 break;
780
781 default:
782 return -ENOPROTOOPT;
783 }
784
785 len = min_t(unsigned int, sizeof(int), len);
786
787 if (put_user(len, optlen))
788 return -EFAULT;
789 if (copy_to_user(optval,&val,len))
790 return -EFAULT;
791 return 0;
792 }
793
rawv6_ioctl(struct sock * sk,int cmd,unsigned long arg)794 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
795 {
796 switch(cmd) {
797 case SIOCOUTQ:
798 {
799 int amount = atomic_read(&sk->wmem_alloc);
800 return put_user(amount, (int *)arg);
801 }
802 case SIOCINQ:
803 {
804 struct sk_buff *skb;
805 int amount = 0;
806
807 spin_lock_irq(&sk->receive_queue.lock);
808 skb = skb_peek(&sk->receive_queue);
809 if (skb != NULL)
810 amount = skb->tail - skb->h.raw;
811 spin_unlock_irq(&sk->receive_queue.lock);
812 return put_user(amount, (int *)arg);
813 }
814
815 default:
816 return -ENOIOCTLCMD;
817 }
818 }
819
rawv6_close(struct sock * sk,long timeout)820 static void rawv6_close(struct sock *sk, long timeout)
821 {
822 if (sk->num == IPPROTO_RAW)
823 ip6_ra_control(sk, -1, NULL);
824
825 inet_sock_release(sk);
826 }
827
rawv6_init_sk(struct sock * sk)828 static int rawv6_init_sk(struct sock *sk)
829 {
830 if (sk->num == IPPROTO_ICMPV6){
831 struct raw6_opt *opt = &sk->tp_pinfo.tp_raw;
832 opt->checksum = 1;
833 opt->offset = 2;
834 }
835 return(0);
836 }
837
838 #define LINE_LEN 190
839 #define LINE_FMT "%-190s\n"
840
get_raw6_sock(struct sock * sp,char * tmpbuf,int i)841 static void get_raw6_sock(struct sock *sp, char *tmpbuf, int i)
842 {
843 struct in6_addr *dest, *src;
844 __u16 destp, srcp;
845
846 dest = &sp->net_pinfo.af_inet6.daddr;
847 src = &sp->net_pinfo.af_inet6.rcv_saddr;
848 destp = 0;
849 srcp = sp->num;
850 sprintf(tmpbuf,
851 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
852 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
853 i,
854 src->s6_addr32[0], src->s6_addr32[1],
855 src->s6_addr32[2], src->s6_addr32[3], srcp,
856 dest->s6_addr32[0], dest->s6_addr32[1],
857 dest->s6_addr32[2], dest->s6_addr32[3], destp,
858 sp->state,
859 atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
860 0, 0L, 0,
861 sock_i_uid(sp), 0,
862 sock_i_ino(sp),
863 atomic_read(&sp->refcnt), sp);
864 }
865
raw6_get_info(char * buffer,char ** start,off_t offset,int length)866 int raw6_get_info(char *buffer, char **start, off_t offset, int length)
867 {
868 int len = 0, num = 0, i;
869 off_t pos = 0;
870 off_t begin;
871 char tmpbuf[LINE_LEN+2];
872
873 if (offset < LINE_LEN+1)
874 len += sprintf(buffer, LINE_FMT,
875 " sl " /* 6 */
876 "local_address " /* 38 */
877 "remote_address " /* 38 */
878 "st tx_queue rx_queue tr tm->when retrnsmt" /* 41 */
879 " uid timeout inode"); /* 21 */
880 /*----*/
881 /*144 */
882 pos = LINE_LEN+1;
883 read_lock(&raw_v6_lock);
884 for (i = 0; i < RAWV6_HTABLE_SIZE; i++) {
885 struct sock *sk;
886
887 for (sk = raw_v6_htable[i]; sk; sk = sk->next, num++) {
888 if (sk->family != PF_INET6)
889 continue;
890 pos += LINE_LEN+1;
891 if (pos <= offset)
892 continue;
893 get_raw6_sock(sk, tmpbuf, i);
894 len += sprintf(buffer+len, LINE_FMT, tmpbuf);
895 if(len >= length)
896 goto out;
897 }
898 }
899 out:
900 read_unlock(&raw_v6_lock);
901 begin = len - (pos - offset);
902 *start = buffer + begin;
903 len -= begin;
904 if(len > length)
905 len = length;
906 if (len < 0)
907 len = 0;
908 return len;
909 }
910
911 struct proto rawv6_prot = {
912 name: "RAW",
913 close: rawv6_close,
914 connect: udpv6_connect,
915 disconnect: udp_disconnect,
916 ioctl: rawv6_ioctl,
917 init: rawv6_init_sk,
918 destroy: inet6_destroy_sock,
919 setsockopt: rawv6_setsockopt,
920 getsockopt: rawv6_getsockopt,
921 sendmsg: rawv6_sendmsg,
922 recvmsg: rawv6_recvmsg,
923 bind: rawv6_bind,
924 backlog_rcv: rawv6_rcv_skb,
925 hash: raw_v6_hash,
926 unhash: raw_v6_unhash,
927 };
928