1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SUCS NET3:
4 *
5 * Generic datagram handling routines. These are generic for all
6 * protocols. Possibly a generic IP version on top of these would
7 * make sense. Not tonight however 8-).
8 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9 * NetROM layer all have identical poll code and mostly
10 * identical recvmsg() code. So we share it here. The poll was
11 * shared before but buried in udp.c so I moved it.
12 *
13 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
14 * udp.c code)
15 *
16 * Fixes:
17 * Alan Cox : NULL return from skb_peek_copy()
18 * understood
19 * Alan Cox : Rewrote skb_read_datagram to avoid the
20 * skb_peek_copy stuff.
21 * Alan Cox : Added support for SOCK_SEQPACKET.
22 * IPX can no longer use the SO_TYPE hack
23 * but AX.25 now works right, and SPX is
24 * feasible.
25 * Alan Cox : Fixed write poll of non IP protocol
26 * crash.
27 * Florian La Roche: Changed for my new skbuff handling.
28 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
29 * Linus Torvalds : BSD semantic fixes.
30 * Alan Cox : Datagram iovec handling
31 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
32 * Alan Cox : POSIXisms
33 * Pete Wyckoff : Unconnected accept() fix.
34 *
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/uio.h>
54 #include <linux/indirect_call_wrapper.h>
55
56 #include <net/protocol.h>
57 #include <linux/skbuff.h>
58
59 #include <net/checksum.h>
60 #include <net/sock.h>
61 #include <net/tcp_states.h>
62 #include <trace/events/skb.h>
63 #include <net/busy_poll.h>
64
65 /*
66 * Is a socket 'connection oriented' ?
67 */
connection_based(struct sock * sk)68 static inline int connection_based(struct sock *sk)
69 {
70 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
71 }
72
receiver_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)73 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
74 void *key)
75 {
76 /*
77 * Avoid a wakeup if event not interesting for us
78 */
79 if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
80 return 0;
81 return autoremove_wake_function(wait, mode, sync, key);
82 }
83 /*
84 * Wait for the last received packet to be different from skb
85 */
__skb_wait_for_more_packets(struct sock * sk,struct sk_buff_head * queue,int * err,long * timeo_p,const struct sk_buff * skb)86 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
87 int *err, long *timeo_p,
88 const struct sk_buff *skb)
89 {
90 int error;
91 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
92
93 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
94
95 /* Socket errors? */
96 error = sock_error(sk);
97 if (error)
98 goto out_err;
99
100 if (READ_ONCE(queue->prev) != skb)
101 goto out;
102
103 /* Socket shut down? */
104 if (sk->sk_shutdown & RCV_SHUTDOWN)
105 goto out_noerr;
106
107 /* Sequenced packets can come disconnected.
108 * If so we report the problem
109 */
110 error = -ENOTCONN;
111 if (connection_based(sk) &&
112 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
113 goto out_err;
114
115 /* handle signals */
116 if (signal_pending(current))
117 goto interrupted;
118
119 error = 0;
120 *timeo_p = schedule_timeout(*timeo_p);
121 out:
122 finish_wait(sk_sleep(sk), &wait);
123 return error;
124 interrupted:
125 error = sock_intr_errno(*timeo_p);
126 out_err:
127 *err = error;
128 goto out;
129 out_noerr:
130 *err = 0;
131 error = 1;
132 goto out;
133 }
134 EXPORT_SYMBOL(__skb_wait_for_more_packets);
135
skb_set_peeked(struct sk_buff * skb)136 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
137 {
138 struct sk_buff *nskb;
139
140 if (skb->peeked)
141 return skb;
142
143 /* We have to unshare an skb before modifying it. */
144 if (!skb_shared(skb))
145 goto done;
146
147 nskb = skb_clone(skb, GFP_ATOMIC);
148 if (!nskb)
149 return ERR_PTR(-ENOMEM);
150
151 skb->prev->next = nskb;
152 skb->next->prev = nskb;
153 nskb->prev = skb->prev;
154 nskb->next = skb->next;
155
156 consume_skb(skb);
157 skb = nskb;
158
159 done:
160 skb->peeked = 1;
161
162 return skb;
163 }
164
__skb_try_recv_from_queue(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,int * off,int * err,struct sk_buff ** last)165 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
166 struct sk_buff_head *queue,
167 unsigned int flags,
168 int *off, int *err,
169 struct sk_buff **last)
170 {
171 bool peek_at_off = false;
172 struct sk_buff *skb;
173 int _off = 0;
174
175 if (unlikely(flags & MSG_PEEK && *off >= 0)) {
176 peek_at_off = true;
177 _off = *off;
178 }
179
180 *last = queue->prev;
181 skb_queue_walk(queue, skb) {
182 if (flags & MSG_PEEK) {
183 if (peek_at_off && _off >= skb->len &&
184 (_off || skb->peeked)) {
185 _off -= skb->len;
186 continue;
187 }
188 if (!skb->len) {
189 skb = skb_set_peeked(skb);
190 if (IS_ERR(skb)) {
191 *err = PTR_ERR(skb);
192 return NULL;
193 }
194 }
195 refcount_inc(&skb->users);
196 } else {
197 __skb_unlink(skb, queue);
198 }
199 *off = _off;
200 return skb;
201 }
202 return NULL;
203 }
204
205 /**
206 * __skb_try_recv_datagram - Receive a datagram skbuff
207 * @sk: socket
208 * @queue: socket queue from which to receive
209 * @flags: MSG\_ flags
210 * @off: an offset in bytes to peek skb from. Returns an offset
211 * within an skb where data actually starts
212 * @err: error code returned
213 * @last: set to last peeked message to inform the wait function
214 * what to look for when peeking
215 *
216 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
217 * and possible races. This replaces identical code in packet, raw and
218 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
219 * the long standing peek and read race for datagram sockets. If you
220 * alter this routine remember it must be re-entrant.
221 *
222 * This function will lock the socket if a skb is returned, so
223 * the caller needs to unlock the socket in that case (usually by
224 * calling skb_free_datagram). Returns NULL with @err set to
225 * -EAGAIN if no data was available or to some other value if an
226 * error was detected.
227 *
228 * * It does not lock socket since today. This function is
229 * * free of race conditions. This measure should/can improve
230 * * significantly datagram socket latencies at high loads,
231 * * when data copying to user space takes lots of time.
232 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
233 * * 8) Great win.)
234 * * --ANK (980729)
235 *
236 * The order of the tests when we find no data waiting are specified
237 * quite explicitly by POSIX 1003.1g, don't change them without having
238 * the standard around please.
239 */
__skb_try_recv_datagram(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,int * off,int * err,struct sk_buff ** last)240 struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
241 struct sk_buff_head *queue,
242 unsigned int flags, int *off, int *err,
243 struct sk_buff **last)
244 {
245 struct sk_buff *skb;
246 unsigned long cpu_flags;
247 /*
248 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
249 */
250 int error = sock_error(sk);
251
252 if (error)
253 goto no_packet;
254
255 do {
256 /* Again only user level code calls this function, so nothing
257 * interrupt level will suddenly eat the receive_queue.
258 *
259 * Look at current nfs client by the way...
260 * However, this function was correct in any case. 8)
261 */
262 spin_lock_irqsave(&queue->lock, cpu_flags);
263 skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
264 last);
265 spin_unlock_irqrestore(&queue->lock, cpu_flags);
266 if (error)
267 goto no_packet;
268 if (skb)
269 return skb;
270
271 if (!sk_can_busy_loop(sk))
272 break;
273
274 sk_busy_loop(sk, flags & MSG_DONTWAIT);
275 } while (READ_ONCE(queue->prev) != *last);
276
277 error = -EAGAIN;
278
279 no_packet:
280 *err = error;
281 return NULL;
282 }
283 EXPORT_SYMBOL(__skb_try_recv_datagram);
284
__skb_recv_datagram(struct sock * sk,struct sk_buff_head * sk_queue,unsigned int flags,int * off,int * err)285 struct sk_buff *__skb_recv_datagram(struct sock *sk,
286 struct sk_buff_head *sk_queue,
287 unsigned int flags, int *off, int *err)
288 {
289 struct sk_buff *skb, *last;
290 long timeo;
291
292 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
293
294 do {
295 skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
296 &last);
297 if (skb)
298 return skb;
299
300 if (*err != -EAGAIN)
301 break;
302 } while (timeo &&
303 !__skb_wait_for_more_packets(sk, sk_queue, err,
304 &timeo, last));
305
306 return NULL;
307 }
308 EXPORT_SYMBOL(__skb_recv_datagram);
309
skb_recv_datagram(struct sock * sk,unsigned int flags,int * err)310 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
311 int *err)
312 {
313 int off = 0;
314
315 return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags,
316 &off, err);
317 }
318 EXPORT_SYMBOL(skb_recv_datagram);
319
skb_free_datagram(struct sock * sk,struct sk_buff * skb)320 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
321 {
322 consume_skb(skb);
323 sk_mem_reclaim_partial(sk);
324 }
325 EXPORT_SYMBOL(skb_free_datagram);
326
__skb_free_datagram_locked(struct sock * sk,struct sk_buff * skb,int len)327 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
328 {
329 bool slow;
330
331 if (!skb_unref(skb)) {
332 sk_peek_offset_bwd(sk, len);
333 return;
334 }
335
336 slow = lock_sock_fast(sk);
337 sk_peek_offset_bwd(sk, len);
338 skb_orphan(skb);
339 sk_mem_reclaim_partial(sk);
340 unlock_sock_fast(sk, slow);
341
342 /* skb is now orphaned, can be freed outside of locked section */
343 __kfree_skb(skb);
344 }
345 EXPORT_SYMBOL(__skb_free_datagram_locked);
346
__sk_queue_drop_skb(struct sock * sk,struct sk_buff_head * sk_queue,struct sk_buff * skb,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb))347 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
348 struct sk_buff *skb, unsigned int flags,
349 void (*destructor)(struct sock *sk,
350 struct sk_buff *skb))
351 {
352 int err = 0;
353
354 if (flags & MSG_PEEK) {
355 err = -ENOENT;
356 spin_lock_bh(&sk_queue->lock);
357 if (skb->next) {
358 __skb_unlink(skb, sk_queue);
359 refcount_dec(&skb->users);
360 if (destructor)
361 destructor(sk, skb);
362 err = 0;
363 }
364 spin_unlock_bh(&sk_queue->lock);
365 }
366
367 atomic_inc(&sk->sk_drops);
368 return err;
369 }
370 EXPORT_SYMBOL(__sk_queue_drop_skb);
371
372 /**
373 * skb_kill_datagram - Free a datagram skbuff forcibly
374 * @sk: socket
375 * @skb: datagram skbuff
376 * @flags: MSG\_ flags
377 *
378 * This function frees a datagram skbuff that was received by
379 * skb_recv_datagram. The flags argument must match the one
380 * used for skb_recv_datagram.
381 *
382 * If the MSG_PEEK flag is set, and the packet is still on the
383 * receive queue of the socket, it will be taken off the queue
384 * before it is freed.
385 *
386 * This function currently only disables BH when acquiring the
387 * sk_receive_queue lock. Therefore it must not be used in a
388 * context where that lock is acquired in an IRQ context.
389 *
390 * It returns 0 if the packet was removed by us.
391 */
392
skb_kill_datagram(struct sock * sk,struct sk_buff * skb,unsigned int flags)393 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
394 {
395 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
396 NULL);
397
398 kfree_skb(skb);
399 sk_mem_reclaim_partial(sk);
400 return err;
401 }
402 EXPORT_SYMBOL(skb_kill_datagram);
403
404 INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
405 size_t bytes,
406 void *data __always_unused,
407 struct iov_iter *i));
408
__skb_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,bool fault_short,size_t (* cb)(const void *,size_t,void *,struct iov_iter *),void * data)409 static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
410 struct iov_iter *to, int len, bool fault_short,
411 size_t (*cb)(const void *, size_t, void *,
412 struct iov_iter *), void *data)
413 {
414 int start = skb_headlen(skb);
415 int i, copy = start - offset, start_off = offset, n;
416 struct sk_buff *frag_iter;
417
418 /* Copy header. */
419 if (copy > 0) {
420 if (copy > len)
421 copy = len;
422 n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
423 skb->data + offset, copy, data, to);
424 offset += n;
425 if (n != copy)
426 goto short_copy;
427 if ((len -= copy) == 0)
428 return 0;
429 }
430
431 /* Copy paged appendix. Hmm... why does this look so complicated? */
432 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
433 int end;
434 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
435
436 WARN_ON(start > offset + len);
437
438 end = start + skb_frag_size(frag);
439 if ((copy = end - offset) > 0) {
440 struct page *page = skb_frag_page(frag);
441 u8 *vaddr = kmap(page);
442
443 if (copy > len)
444 copy = len;
445 n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
446 vaddr + skb_frag_off(frag) + offset - start,
447 copy, data, to);
448 kunmap(page);
449 offset += n;
450 if (n != copy)
451 goto short_copy;
452 if (!(len -= copy))
453 return 0;
454 }
455 start = end;
456 }
457
458 skb_walk_frags(skb, frag_iter) {
459 int end;
460
461 WARN_ON(start > offset + len);
462
463 end = start + frag_iter->len;
464 if ((copy = end - offset) > 0) {
465 if (copy > len)
466 copy = len;
467 if (__skb_datagram_iter(frag_iter, offset - start,
468 to, copy, fault_short, cb, data))
469 goto fault;
470 if ((len -= copy) == 0)
471 return 0;
472 offset += copy;
473 }
474 start = end;
475 }
476 if (!len)
477 return 0;
478
479 /* This is not really a user copy fault, but rather someone
480 * gave us a bogus length on the skb. We should probably
481 * print a warning here as it may indicate a kernel bug.
482 */
483
484 fault:
485 iov_iter_revert(to, offset - start_off);
486 return -EFAULT;
487
488 short_copy:
489 if (fault_short || iov_iter_count(to))
490 goto fault;
491
492 return 0;
493 }
494
495 /**
496 * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
497 * and update a hash.
498 * @skb: buffer to copy
499 * @offset: offset in the buffer to start copying from
500 * @to: iovec iterator to copy to
501 * @len: amount of data to copy from buffer to iovec
502 * @hash: hash request to update
503 */
skb_copy_and_hash_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,struct ahash_request * hash)504 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
505 struct iov_iter *to, int len,
506 struct ahash_request *hash)
507 {
508 return __skb_datagram_iter(skb, offset, to, len, true,
509 hash_and_copy_to_iter, hash);
510 }
511 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
512
simple_copy_to_iter(const void * addr,size_t bytes,void * data __always_unused,struct iov_iter * i)513 static size_t simple_copy_to_iter(const void *addr, size_t bytes,
514 void *data __always_unused, struct iov_iter *i)
515 {
516 return copy_to_iter(addr, bytes, i);
517 }
518
519 /**
520 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
521 * @skb: buffer to copy
522 * @offset: offset in the buffer to start copying from
523 * @to: iovec iterator to copy to
524 * @len: amount of data to copy from buffer to iovec
525 */
skb_copy_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len)526 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
527 struct iov_iter *to, int len)
528 {
529 trace_skb_copy_datagram_iovec(skb, len);
530 return __skb_datagram_iter(skb, offset, to, len, false,
531 simple_copy_to_iter, NULL);
532 }
533 EXPORT_SYMBOL(skb_copy_datagram_iter);
534
535 /**
536 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
537 * @skb: buffer to copy
538 * @offset: offset in the buffer to start copying to
539 * @from: the copy source
540 * @len: amount of data to copy to buffer from iovec
541 *
542 * Returns 0 or -EFAULT.
543 */
skb_copy_datagram_from_iter(struct sk_buff * skb,int offset,struct iov_iter * from,int len)544 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
545 struct iov_iter *from,
546 int len)
547 {
548 int start = skb_headlen(skb);
549 int i, copy = start - offset;
550 struct sk_buff *frag_iter;
551
552 /* Copy header. */
553 if (copy > 0) {
554 if (copy > len)
555 copy = len;
556 if (copy_from_iter(skb->data + offset, copy, from) != copy)
557 goto fault;
558 if ((len -= copy) == 0)
559 return 0;
560 offset += copy;
561 }
562
563 /* Copy paged appendix. Hmm... why does this look so complicated? */
564 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
565 int end;
566 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
567
568 WARN_ON(start > offset + len);
569
570 end = start + skb_frag_size(frag);
571 if ((copy = end - offset) > 0) {
572 size_t copied;
573
574 if (copy > len)
575 copy = len;
576 copied = copy_page_from_iter(skb_frag_page(frag),
577 skb_frag_off(frag) + offset - start,
578 copy, from);
579 if (copied != copy)
580 goto fault;
581
582 if (!(len -= copy))
583 return 0;
584 offset += copy;
585 }
586 start = end;
587 }
588
589 skb_walk_frags(skb, frag_iter) {
590 int end;
591
592 WARN_ON(start > offset + len);
593
594 end = start + frag_iter->len;
595 if ((copy = end - offset) > 0) {
596 if (copy > len)
597 copy = len;
598 if (skb_copy_datagram_from_iter(frag_iter,
599 offset - start,
600 from, copy))
601 goto fault;
602 if ((len -= copy) == 0)
603 return 0;
604 offset += copy;
605 }
606 start = end;
607 }
608 if (!len)
609 return 0;
610
611 fault:
612 return -EFAULT;
613 }
614 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
615
__zerocopy_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)616 int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
617 struct iov_iter *from, size_t length)
618 {
619 int frag = skb_shinfo(skb)->nr_frags;
620
621 while (length && iov_iter_count(from)) {
622 struct page *pages[MAX_SKB_FRAGS];
623 struct page *last_head = NULL;
624 size_t start;
625 ssize_t copied;
626 unsigned long truesize;
627 int refs, n = 0;
628
629 if (frag == MAX_SKB_FRAGS)
630 return -EMSGSIZE;
631
632 copied = iov_iter_get_pages(from, pages, length,
633 MAX_SKB_FRAGS - frag, &start);
634 if (copied < 0)
635 return -EFAULT;
636
637 iov_iter_advance(from, copied);
638 length -= copied;
639
640 truesize = PAGE_ALIGN(copied + start);
641 skb->data_len += copied;
642 skb->len += copied;
643 skb->truesize += truesize;
644 if (sk && sk->sk_type == SOCK_STREAM) {
645 sk_wmem_queued_add(sk, truesize);
646 if (!skb_zcopy_pure(skb))
647 sk_mem_charge(sk, truesize);
648 } else {
649 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
650 }
651 for (refs = 0; copied != 0; start = 0) {
652 int size = min_t(int, copied, PAGE_SIZE - start);
653 struct page *head = compound_head(pages[n]);
654
655 start += (pages[n] - head) << PAGE_SHIFT;
656 copied -= size;
657 n++;
658 if (frag) {
659 skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1];
660
661 if (head == skb_frag_page(last) &&
662 start == skb_frag_off(last) + skb_frag_size(last)) {
663 skb_frag_size_add(last, size);
664 /* We combined this page, we need to release
665 * a reference. Since compound pages refcount
666 * is shared among many pages, batch the refcount
667 * adjustments to limit false sharing.
668 */
669 last_head = head;
670 refs++;
671 continue;
672 }
673 }
674 if (refs) {
675 page_ref_sub(last_head, refs);
676 refs = 0;
677 }
678 skb_fill_page_desc_noacc(skb, frag++, head, start, size);
679 }
680 if (refs)
681 page_ref_sub(last_head, refs);
682 }
683 return 0;
684 }
685 EXPORT_SYMBOL(__zerocopy_sg_from_iter);
686
687 /**
688 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
689 * @skb: buffer to copy
690 * @from: the source to copy from
691 *
692 * The function will first copy up to headlen, and then pin the userspace
693 * pages and build frags through them.
694 *
695 * Returns 0, -EFAULT or -EMSGSIZE.
696 */
zerocopy_sg_from_iter(struct sk_buff * skb,struct iov_iter * from)697 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
698 {
699 int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
700
701 /* copy up to skb headlen */
702 if (skb_copy_datagram_from_iter(skb, 0, from, copy))
703 return -EFAULT;
704
705 return __zerocopy_sg_from_iter(NULL, skb, from, ~0U);
706 }
707 EXPORT_SYMBOL(zerocopy_sg_from_iter);
708
709 /**
710 * skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
711 * and update a checksum.
712 * @skb: buffer to copy
713 * @offset: offset in the buffer to start copying from
714 * @to: iovec iterator to copy to
715 * @len: amount of data to copy from buffer to iovec
716 * @csump: checksum pointer
717 */
skb_copy_and_csum_datagram(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,__wsum * csump)718 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
719 struct iov_iter *to, int len,
720 __wsum *csump)
721 {
722 struct csum_state csdata = { .csum = *csump };
723 int ret;
724
725 ret = __skb_datagram_iter(skb, offset, to, len, true,
726 csum_and_copy_to_iter, &csdata);
727 if (ret)
728 return ret;
729
730 *csump = csdata.csum;
731 return 0;
732 }
733
734 /**
735 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
736 * @skb: skbuff
737 * @hlen: hardware length
738 * @msg: destination
739 *
740 * Caller _must_ check that skb will fit to this iovec.
741 *
742 * Returns: 0 - success.
743 * -EINVAL - checksum failure.
744 * -EFAULT - fault during copy.
745 */
skb_copy_and_csum_datagram_msg(struct sk_buff * skb,int hlen,struct msghdr * msg)746 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
747 int hlen, struct msghdr *msg)
748 {
749 __wsum csum;
750 int chunk = skb->len - hlen;
751
752 if (!chunk)
753 return 0;
754
755 if (msg_data_left(msg) < chunk) {
756 if (__skb_checksum_complete(skb))
757 return -EINVAL;
758 if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
759 goto fault;
760 } else {
761 csum = csum_partial(skb->data, hlen, skb->csum);
762 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
763 chunk, &csum))
764 goto fault;
765
766 if (csum_fold(csum)) {
767 iov_iter_revert(&msg->msg_iter, chunk);
768 return -EINVAL;
769 }
770
771 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
772 !skb->csum_complete_sw)
773 netdev_rx_csum_fault(NULL, skb);
774 }
775 return 0;
776 fault:
777 return -EFAULT;
778 }
779 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
780
781 /**
782 * datagram_poll - generic datagram poll
783 * @file: file struct
784 * @sock: socket
785 * @wait: poll table
786 *
787 * Datagram poll: Again totally generic. This also handles
788 * sequenced packet sockets providing the socket receive queue
789 * is only ever holding data ready to receive.
790 *
791 * Note: when you *don't* use this routine for this protocol,
792 * and you use a different write policy from sock_writeable()
793 * then please supply your own write_space callback.
794 */
datagram_poll(struct file * file,struct socket * sock,poll_table * wait)795 __poll_t datagram_poll(struct file *file, struct socket *sock,
796 poll_table *wait)
797 {
798 struct sock *sk = sock->sk;
799 __poll_t mask;
800
801 sock_poll_wait(file, sock, wait);
802 mask = 0;
803
804 /* exceptional events? */
805 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
806 mask |= EPOLLERR |
807 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
808
809 if (sk->sk_shutdown & RCV_SHUTDOWN)
810 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
811 if (sk->sk_shutdown == SHUTDOWN_MASK)
812 mask |= EPOLLHUP;
813
814 /* readable? */
815 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
816 mask |= EPOLLIN | EPOLLRDNORM;
817
818 /* Connection-based need to check for termination and startup */
819 if (connection_based(sk)) {
820 if (sk->sk_state == TCP_CLOSE)
821 mask |= EPOLLHUP;
822 /* connection hasn't started yet? */
823 if (sk->sk_state == TCP_SYN_SENT)
824 return mask;
825 }
826
827 /* writable? */
828 if (sock_writeable(sk))
829 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
830 else
831 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
832
833 return mask;
834 }
835 EXPORT_SYMBOL(datagram_poll);
836