1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	u32				file_slot;
32 	unsigned long			nofile;
33 };
34 
35 struct io_socket {
36 	struct file			*file;
37 	int				domain;
38 	int				type;
39 	int				protocol;
40 	int				flags;
41 	u32				file_slot;
42 	unsigned long			nofile;
43 };
44 
45 struct io_connect {
46 	struct file			*file;
47 	struct sockaddr __user		*addr;
48 	int				addr_len;
49 	bool				in_progress;
50 };
51 
52 struct io_sr_msg {
53 	struct file			*file;
54 	union {
55 		struct compat_msghdr __user	*umsg_compat;
56 		struct user_msghdr __user	*umsg;
57 		void __user			*buf;
58 	};
59 	unsigned			len;
60 	unsigned			done_io;
61 	unsigned			msg_flags;
62 	u16				flags;
63 	/* initialised and used only by !msg send variants */
64 	u16				addr_len;
65 	u16				buf_group;
66 	void __user			*addr;
67 	/* used only for send zerocopy */
68 	struct io_kiocb 		*notif;
69 };
70 
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)71 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
72 {
73 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
74 
75 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
76 		     sqe->buf_index || sqe->splice_fd_in))
77 		return -EINVAL;
78 
79 	shutdown->how = READ_ONCE(sqe->len);
80 	return 0;
81 }
82 
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)83 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
84 {
85 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
86 	struct socket *sock;
87 	int ret;
88 
89 	if (issue_flags & IO_URING_F_NONBLOCK)
90 		return -EAGAIN;
91 
92 	sock = sock_from_file(req->file);
93 	if (unlikely(!sock))
94 		return -ENOTSOCK;
95 
96 	ret = __sys_shutdown_sock(sock, shutdown->how);
97 	io_req_set_res(req, ret, 0);
98 	return IOU_OK;
99 }
100 
io_net_retry(struct socket * sock,int flags)101 static bool io_net_retry(struct socket *sock, int flags)
102 {
103 	if (!(flags & MSG_WAITALL))
104 		return false;
105 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
106 }
107 
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)108 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
109 {
110 	struct io_async_msghdr *hdr = req->async_data;
111 
112 	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
113 		return;
114 
115 	/* Let normal cleanup path reap it if we fail adding to the cache */
116 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
117 		req->async_data = NULL;
118 		req->flags &= ~REQ_F_ASYNC_DATA;
119 	}
120 }
121 
io_msg_alloc_async(struct io_kiocb * req,unsigned int issue_flags)122 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
123 						  unsigned int issue_flags)
124 {
125 	struct io_ring_ctx *ctx = req->ctx;
126 	struct io_cache_entry *entry;
127 	struct io_async_msghdr *hdr;
128 
129 	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
130 	    (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
131 		hdr = container_of(entry, struct io_async_msghdr, cache);
132 		hdr->free_iov = NULL;
133 		req->flags |= REQ_F_ASYNC_DATA;
134 		req->async_data = hdr;
135 		return hdr;
136 	}
137 
138 	if (!io_alloc_async_data(req)) {
139 		hdr = req->async_data;
140 		hdr->free_iov = NULL;
141 		return hdr;
142 	}
143 	return NULL;
144 }
145 
io_msg_alloc_async_prep(struct io_kiocb * req)146 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
147 {
148 	/* ->prep_async is always called from the submission context */
149 	return io_msg_alloc_async(req, 0);
150 }
151 
io_setup_async_msg(struct io_kiocb * req,struct io_async_msghdr * kmsg,unsigned int issue_flags)152 static int io_setup_async_msg(struct io_kiocb *req,
153 			      struct io_async_msghdr *kmsg,
154 			      unsigned int issue_flags)
155 {
156 	struct io_async_msghdr *async_msg;
157 
158 	if (req_has_async_data(req))
159 		return -EAGAIN;
160 	async_msg = io_msg_alloc_async(req, issue_flags);
161 	if (!async_msg) {
162 		kfree(kmsg->free_iov);
163 		return -ENOMEM;
164 	}
165 	req->flags |= REQ_F_NEED_CLEANUP;
166 	memcpy(async_msg, kmsg, sizeof(*kmsg));
167 	if (async_msg->msg.msg_name)
168 		async_msg->msg.msg_name = &async_msg->addr;
169 	/* if were using fast_iov, set it to the new one */
170 	if (!kmsg->free_iov) {
171 		size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
172 		async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
173 	}
174 
175 	return -EAGAIN;
176 }
177 
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)178 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
179 			       struct io_async_msghdr *iomsg)
180 {
181 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
182 
183 	iomsg->msg.msg_name = &iomsg->addr;
184 	iomsg->free_iov = iomsg->fast_iov;
185 	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
186 					&iomsg->free_iov);
187 }
188 
io_send_prep_async(struct io_kiocb * req)189 int io_send_prep_async(struct io_kiocb *req)
190 {
191 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
192 	struct io_async_msghdr *io;
193 	int ret;
194 
195 	if (!zc->addr || req_has_async_data(req))
196 		return 0;
197 	io = io_msg_alloc_async_prep(req);
198 	if (!io)
199 		return -ENOMEM;
200 	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
201 	return ret;
202 }
203 
io_setup_async_addr(struct io_kiocb * req,struct sockaddr_storage * addr_storage,unsigned int issue_flags)204 static int io_setup_async_addr(struct io_kiocb *req,
205 			      struct sockaddr_storage *addr_storage,
206 			      unsigned int issue_flags)
207 {
208 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
209 	struct io_async_msghdr *io;
210 
211 	if (!sr->addr || req_has_async_data(req))
212 		return -EAGAIN;
213 	io = io_msg_alloc_async(req, issue_flags);
214 	if (!io)
215 		return -ENOMEM;
216 	memcpy(&io->addr, addr_storage, sizeof(io->addr));
217 	return -EAGAIN;
218 }
219 
io_sendmsg_prep_async(struct io_kiocb * req)220 int io_sendmsg_prep_async(struct io_kiocb *req)
221 {
222 	int ret;
223 
224 	if (!io_msg_alloc_async_prep(req))
225 		return -ENOMEM;
226 	ret = io_sendmsg_copy_hdr(req, req->async_data);
227 	if (!ret)
228 		req->flags |= REQ_F_NEED_CLEANUP;
229 	return ret;
230 }
231 
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)232 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
233 {
234 	struct io_async_msghdr *io = req->async_data;
235 
236 	kfree(io->free_iov);
237 }
238 
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)239 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
240 {
241 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
242 
243 	if (req->opcode == IORING_OP_SEND) {
244 		if (READ_ONCE(sqe->__pad3[0]))
245 			return -EINVAL;
246 		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
247 		sr->addr_len = READ_ONCE(sqe->addr_len);
248 	} else if (sqe->addr2 || sqe->file_index) {
249 		return -EINVAL;
250 	}
251 
252 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
253 	sr->len = READ_ONCE(sqe->len);
254 	sr->flags = READ_ONCE(sqe->ioprio);
255 	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
256 		return -EINVAL;
257 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
258 	if (sr->msg_flags & MSG_DONTWAIT)
259 		req->flags |= REQ_F_NOWAIT;
260 
261 #ifdef CONFIG_COMPAT
262 	if (req->ctx->compat)
263 		sr->msg_flags |= MSG_CMSG_COMPAT;
264 #endif
265 	sr->done_io = 0;
266 	return 0;
267 }
268 
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)269 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
270 {
271 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
272 	struct io_async_msghdr iomsg, *kmsg;
273 	struct socket *sock;
274 	unsigned flags;
275 	int min_ret = 0;
276 	int ret;
277 
278 	sock = sock_from_file(req->file);
279 	if (unlikely(!sock))
280 		return -ENOTSOCK;
281 
282 	if (req_has_async_data(req)) {
283 		kmsg = req->async_data;
284 	} else {
285 		ret = io_sendmsg_copy_hdr(req, &iomsg);
286 		if (ret)
287 			return ret;
288 		kmsg = &iomsg;
289 	}
290 
291 	if (!(req->flags & REQ_F_POLLED) &&
292 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
293 		return io_setup_async_msg(req, kmsg, issue_flags);
294 
295 	flags = sr->msg_flags;
296 	if (issue_flags & IO_URING_F_NONBLOCK)
297 		flags |= MSG_DONTWAIT;
298 	if (flags & MSG_WAITALL)
299 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
300 
301 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
302 
303 	if (ret < min_ret) {
304 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
305 			return io_setup_async_msg(req, kmsg, issue_flags);
306 		if (ret > 0 && io_net_retry(sock, flags)) {
307 			sr->done_io += ret;
308 			req->flags |= REQ_F_PARTIAL_IO;
309 			return io_setup_async_msg(req, kmsg, issue_flags);
310 		}
311 		if (ret == -ERESTARTSYS)
312 			ret = -EINTR;
313 		req_set_fail(req);
314 	}
315 	/* fast path, check for non-NULL to avoid function call */
316 	if (kmsg->free_iov)
317 		kfree(kmsg->free_iov);
318 	req->flags &= ~REQ_F_NEED_CLEANUP;
319 	io_netmsg_recycle(req, issue_flags);
320 	if (ret >= 0)
321 		ret += sr->done_io;
322 	else if (sr->done_io)
323 		ret = sr->done_io;
324 	io_req_set_res(req, ret, 0);
325 	return IOU_OK;
326 }
327 
io_send(struct io_kiocb * req,unsigned int issue_flags)328 int io_send(struct io_kiocb *req, unsigned int issue_flags)
329 {
330 	struct sockaddr_storage __address;
331 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
332 	struct msghdr msg;
333 	struct iovec iov;
334 	struct socket *sock;
335 	unsigned flags;
336 	int min_ret = 0;
337 	int ret;
338 
339 	msg.msg_name = NULL;
340 	msg.msg_control = NULL;
341 	msg.msg_controllen = 0;
342 	msg.msg_namelen = 0;
343 	msg.msg_ubuf = NULL;
344 
345 	if (sr->addr) {
346 		if (req_has_async_data(req)) {
347 			struct io_async_msghdr *io = req->async_data;
348 
349 			msg.msg_name = &io->addr;
350 		} else {
351 			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
352 			if (unlikely(ret < 0))
353 				return ret;
354 			msg.msg_name = (struct sockaddr *)&__address;
355 		}
356 		msg.msg_namelen = sr->addr_len;
357 	}
358 
359 	if (!(req->flags & REQ_F_POLLED) &&
360 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
361 		return io_setup_async_addr(req, &__address, issue_flags);
362 
363 	sock = sock_from_file(req->file);
364 	if (unlikely(!sock))
365 		return -ENOTSOCK;
366 
367 	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
368 	if (unlikely(ret))
369 		return ret;
370 
371 	flags = sr->msg_flags;
372 	if (issue_flags & IO_URING_F_NONBLOCK)
373 		flags |= MSG_DONTWAIT;
374 	if (flags & MSG_WAITALL)
375 		min_ret = iov_iter_count(&msg.msg_iter);
376 
377 	msg.msg_flags = flags;
378 	ret = sock_sendmsg(sock, &msg);
379 	if (ret < min_ret) {
380 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
381 			return io_setup_async_addr(req, &__address, issue_flags);
382 
383 		if (ret > 0 && io_net_retry(sock, flags)) {
384 			sr->len -= ret;
385 			sr->buf += ret;
386 			sr->done_io += ret;
387 			req->flags |= REQ_F_PARTIAL_IO;
388 			return io_setup_async_addr(req, &__address, issue_flags);
389 		}
390 		if (ret == -ERESTARTSYS)
391 			ret = -EINTR;
392 		req_set_fail(req);
393 	}
394 	if (ret >= 0)
395 		ret += sr->done_io;
396 	else if (sr->done_io)
397 		ret = sr->done_io;
398 	io_req_set_res(req, ret, 0);
399 	return IOU_OK;
400 }
401 
io_recvmsg_multishot_overflow(struct io_async_msghdr * iomsg)402 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
403 {
404 	int hdr;
405 
406 	if (iomsg->namelen < 0)
407 		return true;
408 	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
409 			       iomsg->namelen, &hdr))
410 		return true;
411 	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
412 		return true;
413 
414 	return false;
415 }
416 
__io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)417 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
418 				 struct io_async_msghdr *iomsg)
419 {
420 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
421 	struct user_msghdr msg;
422 	int ret;
423 
424 	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
425 		return -EFAULT;
426 
427 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
428 	if (ret)
429 		return ret;
430 
431 	if (req->flags & REQ_F_BUFFER_SELECT) {
432 		if (msg.msg_iovlen == 0) {
433 			sr->len = iomsg->fast_iov[0].iov_len = 0;
434 			iomsg->fast_iov[0].iov_base = NULL;
435 			iomsg->free_iov = NULL;
436 		} else if (msg.msg_iovlen > 1) {
437 			return -EINVAL;
438 		} else {
439 			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
440 				return -EFAULT;
441 			sr->len = iomsg->fast_iov[0].iov_len;
442 			iomsg->free_iov = NULL;
443 		}
444 
445 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
446 			iomsg->namelen = msg.msg_namelen;
447 			iomsg->controllen = msg.msg_controllen;
448 			if (io_recvmsg_multishot_overflow(iomsg))
449 				return -EOVERFLOW;
450 		}
451 	} else {
452 		iomsg->free_iov = iomsg->fast_iov;
453 		ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
454 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
455 				     false);
456 		if (ret > 0)
457 			ret = 0;
458 	}
459 
460 	return ret;
461 }
462 
463 #ifdef CONFIG_COMPAT
__io_compat_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)464 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
465 					struct io_async_msghdr *iomsg)
466 {
467 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
468 	struct compat_msghdr msg;
469 	struct compat_iovec __user *uiov;
470 	int ret;
471 
472 	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
473 		return -EFAULT;
474 
475 	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
476 	if (ret)
477 		return ret;
478 
479 	uiov = compat_ptr(msg.msg_iov);
480 	if (req->flags & REQ_F_BUFFER_SELECT) {
481 		compat_ssize_t clen;
482 
483 		iomsg->free_iov = NULL;
484 		if (msg.msg_iovlen == 0) {
485 			sr->len = 0;
486 		} else if (msg.msg_iovlen > 1) {
487 			return -EINVAL;
488 		} else {
489 			if (!access_ok(uiov, sizeof(*uiov)))
490 				return -EFAULT;
491 			if (__get_user(clen, &uiov->iov_len))
492 				return -EFAULT;
493 			if (clen < 0)
494 				return -EINVAL;
495 			sr->len = clen;
496 		}
497 
498 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
499 			iomsg->namelen = msg.msg_namelen;
500 			iomsg->controllen = msg.msg_controllen;
501 			if (io_recvmsg_multishot_overflow(iomsg))
502 				return -EOVERFLOW;
503 		}
504 	} else {
505 		iomsg->free_iov = iomsg->fast_iov;
506 		ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
507 				   UIO_FASTIOV, &iomsg->free_iov,
508 				   &iomsg->msg.msg_iter, true);
509 		if (ret < 0)
510 			return ret;
511 	}
512 
513 	return 0;
514 }
515 #endif
516 
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)517 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
518 			       struct io_async_msghdr *iomsg)
519 {
520 	iomsg->msg.msg_name = &iomsg->addr;
521 
522 #ifdef CONFIG_COMPAT
523 	if (req->ctx->compat)
524 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
525 #endif
526 
527 	return __io_recvmsg_copy_hdr(req, iomsg);
528 }
529 
io_recvmsg_prep_async(struct io_kiocb * req)530 int io_recvmsg_prep_async(struct io_kiocb *req)
531 {
532 	int ret;
533 
534 	if (!io_msg_alloc_async_prep(req))
535 		return -ENOMEM;
536 	ret = io_recvmsg_copy_hdr(req, req->async_data);
537 	if (!ret)
538 		req->flags |= REQ_F_NEED_CLEANUP;
539 	return ret;
540 }
541 
542 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
543 
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)544 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
545 {
546 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
547 
548 	if (unlikely(sqe->file_index || sqe->addr2))
549 		return -EINVAL;
550 
551 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
552 	sr->len = READ_ONCE(sqe->len);
553 	sr->flags = READ_ONCE(sqe->ioprio);
554 	if (sr->flags & ~(RECVMSG_FLAGS))
555 		return -EINVAL;
556 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
557 	if (sr->msg_flags & MSG_DONTWAIT)
558 		req->flags |= REQ_F_NOWAIT;
559 	if (sr->msg_flags & MSG_ERRQUEUE)
560 		req->flags |= REQ_F_CLEAR_POLLIN;
561 	if (sr->flags & IORING_RECV_MULTISHOT) {
562 		if (!(req->flags & REQ_F_BUFFER_SELECT))
563 			return -EINVAL;
564 		if (sr->msg_flags & MSG_WAITALL)
565 			return -EINVAL;
566 		if (req->opcode == IORING_OP_RECV && sr->len)
567 			return -EINVAL;
568 		req->flags |= REQ_F_APOLL_MULTISHOT;
569 		/*
570 		 * Store the buffer group for this multishot receive separately,
571 		 * as if we end up doing an io-wq based issue that selects a
572 		 * buffer, it has to be committed immediately and that will
573 		 * clear ->buf_list. This means we lose the link to the buffer
574 		 * list, and the eventual buffer put on completion then cannot
575 		 * restore it.
576 		 */
577 		sr->buf_group = req->buf_index;
578 	}
579 
580 #ifdef CONFIG_COMPAT
581 	if (req->ctx->compat)
582 		sr->msg_flags |= MSG_CMSG_COMPAT;
583 #endif
584 	sr->done_io = 0;
585 	return 0;
586 }
587 
io_recv_prep_retry(struct io_kiocb * req)588 static inline void io_recv_prep_retry(struct io_kiocb *req)
589 {
590 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
591 
592 	sr->done_io = 0;
593 	sr->len = 0; /* get from the provided buffer */
594 	req->buf_index = sr->buf_group;
595 }
596 
597 /*
598  * Finishes io_recv and io_recvmsg.
599  *
600  * Returns true if it is actually finished, or false if it should run
601  * again (for multishot).
602  */
io_recv_finish(struct io_kiocb * req,int * ret,unsigned int cflags,bool mshot_finished,unsigned issue_flags)603 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
604 				  unsigned int cflags, bool mshot_finished,
605 				  unsigned issue_flags)
606 {
607 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
608 		io_req_set_res(req, *ret, cflags);
609 		*ret = IOU_OK;
610 		return true;
611 	}
612 
613 	if (!mshot_finished) {
614 		if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
615 				    cflags | IORING_CQE_F_MORE, false)) {
616 			io_recv_prep_retry(req);
617 			return false;
618 		}
619 		/*
620 		 * Otherwise stop multishot but use the current result.
621 		 * Probably will end up going into overflow, but this means
622 		 * we cannot trust the ordering anymore
623 		 */
624 	}
625 
626 	io_req_set_res(req, *ret, cflags);
627 
628 	if (issue_flags & IO_URING_F_MULTISHOT)
629 		*ret = IOU_STOP_MULTISHOT;
630 	else
631 		*ret = IOU_OK;
632 	return true;
633 }
634 
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)635 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
636 				     struct io_sr_msg *sr, void __user **buf,
637 				     size_t *len)
638 {
639 	unsigned long ubuf = (unsigned long) *buf;
640 	unsigned long hdr;
641 
642 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
643 		kmsg->controllen;
644 	if (*len < hdr)
645 		return -EFAULT;
646 
647 	if (kmsg->controllen) {
648 		unsigned long control = ubuf + hdr - kmsg->controllen;
649 
650 		kmsg->msg.msg_control_user = (void __user *) control;
651 		kmsg->msg.msg_controllen = kmsg->controllen;
652 	}
653 
654 	sr->buf = *buf; /* stash for later copy */
655 	*buf = (void __user *) (ubuf + hdr);
656 	kmsg->payloadlen = *len = *len - hdr;
657 	return 0;
658 }
659 
660 struct io_recvmsg_multishot_hdr {
661 	struct io_uring_recvmsg_out msg;
662 	struct sockaddr_storage addr;
663 };
664 
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)665 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
666 				struct io_async_msghdr *kmsg,
667 				unsigned int flags, bool *finished)
668 {
669 	int err;
670 	int copy_len;
671 	struct io_recvmsg_multishot_hdr hdr;
672 
673 	if (kmsg->namelen)
674 		kmsg->msg.msg_name = &hdr.addr;
675 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
676 	kmsg->msg.msg_namelen = 0;
677 
678 	if (sock->file->f_flags & O_NONBLOCK)
679 		flags |= MSG_DONTWAIT;
680 
681 	err = sock_recvmsg(sock, &kmsg->msg, flags);
682 	*finished = err <= 0;
683 	if (err < 0)
684 		return err;
685 
686 	hdr.msg = (struct io_uring_recvmsg_out) {
687 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
688 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
689 	};
690 
691 	hdr.msg.payloadlen = err;
692 	if (err > kmsg->payloadlen)
693 		err = kmsg->payloadlen;
694 
695 	copy_len = sizeof(struct io_uring_recvmsg_out);
696 	if (kmsg->msg.msg_namelen > kmsg->namelen)
697 		copy_len += kmsg->namelen;
698 	else
699 		copy_len += kmsg->msg.msg_namelen;
700 
701 	/*
702 	 *      "fromlen shall refer to the value before truncation.."
703 	 *                      1003.1g
704 	 */
705 	hdr.msg.namelen = kmsg->msg.msg_namelen;
706 
707 	/* ensure that there is no gap between hdr and sockaddr_storage */
708 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
709 		     sizeof(struct io_uring_recvmsg_out));
710 	if (copy_to_user(io->buf, &hdr, copy_len)) {
711 		*finished = true;
712 		return -EFAULT;
713 	}
714 
715 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
716 			kmsg->controllen + err;
717 }
718 
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)719 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
720 {
721 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
722 	struct io_async_msghdr iomsg, *kmsg;
723 	struct socket *sock;
724 	unsigned int cflags;
725 	unsigned flags;
726 	int ret, min_ret = 0;
727 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
728 	bool mshot_finished = true;
729 
730 	sock = sock_from_file(req->file);
731 	if (unlikely(!sock))
732 		return -ENOTSOCK;
733 
734 	if (req_has_async_data(req)) {
735 		kmsg = req->async_data;
736 	} else {
737 		ret = io_recvmsg_copy_hdr(req, &iomsg);
738 		if (ret)
739 			return ret;
740 		kmsg = &iomsg;
741 	}
742 
743 	if (!(req->flags & REQ_F_POLLED) &&
744 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
745 		return io_setup_async_msg(req, kmsg, issue_flags);
746 
747 retry_multishot:
748 	if (io_do_buffer_select(req)) {
749 		void __user *buf;
750 		size_t len = sr->len;
751 
752 		buf = io_buffer_select(req, &len, issue_flags);
753 		if (!buf)
754 			return -ENOBUFS;
755 
756 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
757 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
758 			if (ret) {
759 				io_kbuf_recycle(req, issue_flags);
760 				return ret;
761 			}
762 		}
763 
764 		kmsg->fast_iov[0].iov_base = buf;
765 		kmsg->fast_iov[0].iov_len = len;
766 		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
767 				len);
768 	}
769 
770 	flags = sr->msg_flags;
771 	if (force_nonblock)
772 		flags |= MSG_DONTWAIT;
773 	if (flags & MSG_WAITALL)
774 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
775 
776 	kmsg->msg.msg_get_inq = 1;
777 	if (req->flags & REQ_F_APOLL_MULTISHOT)
778 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
779 					   &mshot_finished);
780 	else
781 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
782 					 kmsg->uaddr, flags);
783 
784 	if (ret < min_ret) {
785 		if (ret == -EAGAIN && force_nonblock) {
786 			ret = io_setup_async_msg(req, kmsg, issue_flags);
787 			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
788 				io_kbuf_recycle(req, issue_flags);
789 				return IOU_ISSUE_SKIP_COMPLETE;
790 			}
791 			return ret;
792 		}
793 		if (ret > 0 && io_net_retry(sock, flags)) {
794 			sr->done_io += ret;
795 			req->flags |= REQ_F_PARTIAL_IO;
796 			return io_setup_async_msg(req, kmsg, issue_flags);
797 		}
798 		if (ret == -ERESTARTSYS)
799 			ret = -EINTR;
800 		req_set_fail(req);
801 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
802 		req_set_fail(req);
803 	}
804 
805 	if (ret > 0)
806 		ret += sr->done_io;
807 	else if (sr->done_io)
808 		ret = sr->done_io;
809 	else
810 		io_kbuf_recycle(req, issue_flags);
811 
812 	cflags = io_put_kbuf(req, issue_flags);
813 	if (kmsg->msg.msg_inq)
814 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
815 
816 	if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
817 		goto retry_multishot;
818 
819 	if (mshot_finished) {
820 		/* fast path, check for non-NULL to avoid function call */
821 		if (kmsg->free_iov)
822 			kfree(kmsg->free_iov);
823 		io_netmsg_recycle(req, issue_flags);
824 		req->flags &= ~REQ_F_NEED_CLEANUP;
825 	}
826 
827 	return ret;
828 }
829 
io_recv(struct io_kiocb * req,unsigned int issue_flags)830 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
831 {
832 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
833 	struct msghdr msg;
834 	struct socket *sock;
835 	struct iovec iov;
836 	unsigned int cflags;
837 	unsigned flags;
838 	int ret, min_ret = 0;
839 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
840 	size_t len = sr->len;
841 
842 	if (!(req->flags & REQ_F_POLLED) &&
843 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
844 		return -EAGAIN;
845 
846 	sock = sock_from_file(req->file);
847 	if (unlikely(!sock))
848 		return -ENOTSOCK;
849 
850 retry_multishot:
851 	if (io_do_buffer_select(req)) {
852 		void __user *buf;
853 
854 		buf = io_buffer_select(req, &len, issue_flags);
855 		if (!buf)
856 			return -ENOBUFS;
857 		sr->buf = buf;
858 	}
859 
860 	ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
861 	if (unlikely(ret))
862 		goto out_free;
863 
864 	msg.msg_name = NULL;
865 	msg.msg_namelen = 0;
866 	msg.msg_control = NULL;
867 	msg.msg_get_inq = 1;
868 	msg.msg_flags = 0;
869 	msg.msg_controllen = 0;
870 	msg.msg_iocb = NULL;
871 	msg.msg_ubuf = NULL;
872 
873 	flags = sr->msg_flags;
874 	if (force_nonblock)
875 		flags |= MSG_DONTWAIT;
876 	if (flags & MSG_WAITALL)
877 		min_ret = iov_iter_count(&msg.msg_iter);
878 
879 	ret = sock_recvmsg(sock, &msg, flags);
880 	if (ret < min_ret) {
881 		if (ret == -EAGAIN && force_nonblock) {
882 			if (issue_flags & IO_URING_F_MULTISHOT) {
883 				io_kbuf_recycle(req, issue_flags);
884 				return IOU_ISSUE_SKIP_COMPLETE;
885 			}
886 
887 			return -EAGAIN;
888 		}
889 		if (ret > 0 && io_net_retry(sock, flags)) {
890 			sr->len -= ret;
891 			sr->buf += ret;
892 			sr->done_io += ret;
893 			req->flags |= REQ_F_PARTIAL_IO;
894 			return -EAGAIN;
895 		}
896 		if (ret == -ERESTARTSYS)
897 			ret = -EINTR;
898 		req_set_fail(req);
899 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
900 out_free:
901 		req_set_fail(req);
902 	}
903 
904 	if (ret > 0)
905 		ret += sr->done_io;
906 	else if (sr->done_io)
907 		ret = sr->done_io;
908 	else
909 		io_kbuf_recycle(req, issue_flags);
910 
911 	cflags = io_put_kbuf(req, issue_flags);
912 	if (msg.msg_inq)
913 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
914 
915 	if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
916 		goto retry_multishot;
917 
918 	return ret;
919 }
920 
io_send_zc_cleanup(struct io_kiocb * req)921 void io_send_zc_cleanup(struct io_kiocb *req)
922 {
923 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
924 	struct io_async_msghdr *io;
925 
926 	if (req_has_async_data(req)) {
927 		io = req->async_data;
928 		/* might be ->fast_iov if *msg_copy_hdr failed */
929 		if (io->free_iov != io->fast_iov)
930 			kfree(io->free_iov);
931 	}
932 	if (zc->notif) {
933 		io_notif_flush(zc->notif);
934 		zc->notif = NULL;
935 	}
936 }
937 
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)938 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
939 {
940 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
941 	struct io_ring_ctx *ctx = req->ctx;
942 	struct io_kiocb *notif;
943 
944 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
945 		return -EINVAL;
946 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
947 	if (req->flags & REQ_F_CQE_SKIP)
948 		return -EINVAL;
949 
950 	zc->flags = READ_ONCE(sqe->ioprio);
951 	if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
952 			  IORING_RECVSEND_FIXED_BUF |
953 			  IORING_SEND_ZC_REPORT_USAGE))
954 		return -EINVAL;
955 	notif = zc->notif = io_alloc_notif(ctx);
956 	if (!notif)
957 		return -ENOMEM;
958 	notif->cqe.user_data = req->cqe.user_data;
959 	notif->cqe.res = 0;
960 	notif->cqe.flags = IORING_CQE_F_NOTIF;
961 	req->flags |= REQ_F_NEED_CLEANUP;
962 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
963 		unsigned idx = READ_ONCE(sqe->buf_index);
964 
965 		if (unlikely(idx >= ctx->nr_user_bufs))
966 			return -EFAULT;
967 		idx = array_index_nospec(idx, ctx->nr_user_bufs);
968 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
969 		io_req_set_rsrc_node(notif, ctx, 0);
970 	}
971 	if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
972 		io_notif_to_data(notif)->zc_report = true;
973 	}
974 
975 	if (req->opcode == IORING_OP_SEND_ZC) {
976 		if (READ_ONCE(sqe->__pad3[0]))
977 			return -EINVAL;
978 		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
979 		zc->addr_len = READ_ONCE(sqe->addr_len);
980 	} else {
981 		if (unlikely(sqe->addr2 || sqe->file_index))
982 			return -EINVAL;
983 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
984 			return -EINVAL;
985 	}
986 
987 	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
988 	zc->len = READ_ONCE(sqe->len);
989 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
990 	if (zc->msg_flags & MSG_DONTWAIT)
991 		req->flags |= REQ_F_NOWAIT;
992 
993 	zc->done_io = 0;
994 
995 #ifdef CONFIG_COMPAT
996 	if (req->ctx->compat)
997 		zc->msg_flags |= MSG_CMSG_COMPAT;
998 #endif
999 	return 0;
1000 }
1001 
io_sg_from_iter_iovec(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1002 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1003 				 struct iov_iter *from, size_t length)
1004 {
1005 	skb_zcopy_downgrade_managed(skb);
1006 	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1007 }
1008 
io_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1009 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1010 			   struct iov_iter *from, size_t length)
1011 {
1012 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1013 	int frag = shinfo->nr_frags;
1014 	int ret = 0;
1015 	struct bvec_iter bi;
1016 	ssize_t copied = 0;
1017 	unsigned long truesize = 0;
1018 
1019 	if (!frag)
1020 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1021 	else if (unlikely(!skb_zcopy_managed(skb)))
1022 		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1023 
1024 	bi.bi_size = min(from->count, length);
1025 	bi.bi_bvec_done = from->iov_offset;
1026 	bi.bi_idx = 0;
1027 
1028 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1029 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1030 
1031 		copied += v.bv_len;
1032 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1033 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1034 					   v.bv_offset, v.bv_len);
1035 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1036 	}
1037 	if (bi.bi_size)
1038 		ret = -EMSGSIZE;
1039 
1040 	shinfo->nr_frags = frag;
1041 	from->bvec += bi.bi_idx;
1042 	from->nr_segs -= bi.bi_idx;
1043 	from->count -= copied;
1044 	from->iov_offset = bi.bi_bvec_done;
1045 
1046 	skb->data_len += copied;
1047 	skb->len += copied;
1048 	skb->truesize += truesize;
1049 
1050 	if (sk && sk->sk_type == SOCK_STREAM) {
1051 		sk_wmem_queued_add(sk, truesize);
1052 		if (!skb_zcopy_pure(skb))
1053 			sk_mem_charge(sk, truesize);
1054 	} else {
1055 		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1056 	}
1057 	return ret;
1058 }
1059 
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1060 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1061 {
1062 	struct sockaddr_storage __address;
1063 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1064 	struct msghdr msg;
1065 	struct iovec iov;
1066 	struct socket *sock;
1067 	unsigned msg_flags;
1068 	int ret, min_ret = 0;
1069 
1070 	sock = sock_from_file(req->file);
1071 	if (unlikely(!sock))
1072 		return -ENOTSOCK;
1073 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1074 		return -EOPNOTSUPP;
1075 
1076 	msg.msg_name = NULL;
1077 	msg.msg_control = NULL;
1078 	msg.msg_controllen = 0;
1079 	msg.msg_namelen = 0;
1080 
1081 	if (zc->addr) {
1082 		if (req_has_async_data(req)) {
1083 			struct io_async_msghdr *io = req->async_data;
1084 
1085 			msg.msg_name = &io->addr;
1086 		} else {
1087 			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1088 			if (unlikely(ret < 0))
1089 				return ret;
1090 			msg.msg_name = (struct sockaddr *)&__address;
1091 		}
1092 		msg.msg_namelen = zc->addr_len;
1093 	}
1094 
1095 	if (!(req->flags & REQ_F_POLLED) &&
1096 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1097 		return io_setup_async_addr(req, &__address, issue_flags);
1098 
1099 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1100 		ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1101 					(u64)(uintptr_t)zc->buf, zc->len);
1102 		if (unlikely(ret))
1103 			return ret;
1104 		msg.sg_from_iter = io_sg_from_iter;
1105 	} else {
1106 		ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1107 					  &msg.msg_iter);
1108 		if (unlikely(ret))
1109 			return ret;
1110 		ret = io_notif_account_mem(zc->notif, zc->len);
1111 		if (unlikely(ret))
1112 			return ret;
1113 		msg.sg_from_iter = io_sg_from_iter_iovec;
1114 	}
1115 
1116 	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1117 	if (issue_flags & IO_URING_F_NONBLOCK)
1118 		msg_flags |= MSG_DONTWAIT;
1119 	if (msg_flags & MSG_WAITALL)
1120 		min_ret = iov_iter_count(&msg.msg_iter);
1121 
1122 	msg.msg_flags = msg_flags;
1123 	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1124 	ret = sock_sendmsg(sock, &msg);
1125 
1126 	if (unlikely(ret < min_ret)) {
1127 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1128 			return io_setup_async_addr(req, &__address, issue_flags);
1129 
1130 		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1131 			zc->len -= ret;
1132 			zc->buf += ret;
1133 			zc->done_io += ret;
1134 			req->flags |= REQ_F_PARTIAL_IO;
1135 			return io_setup_async_addr(req, &__address, issue_flags);
1136 		}
1137 		if (ret == -ERESTARTSYS)
1138 			ret = -EINTR;
1139 		req_set_fail(req);
1140 	}
1141 
1142 	if (ret >= 0)
1143 		ret += zc->done_io;
1144 	else if (zc->done_io)
1145 		ret = zc->done_io;
1146 
1147 	/*
1148 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1149 	 * flushing notif to io_send_zc_cleanup()
1150 	 */
1151 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1152 		io_notif_flush(zc->notif);
1153 		req->flags &= ~REQ_F_NEED_CLEANUP;
1154 	}
1155 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1156 	return IOU_OK;
1157 }
1158 
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1159 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1160 {
1161 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1162 	struct io_async_msghdr iomsg, *kmsg;
1163 	struct socket *sock;
1164 	unsigned flags;
1165 	int ret, min_ret = 0;
1166 
1167 	sock = sock_from_file(req->file);
1168 	if (unlikely(!sock))
1169 		return -ENOTSOCK;
1170 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1171 		return -EOPNOTSUPP;
1172 
1173 	if (req_has_async_data(req)) {
1174 		kmsg = req->async_data;
1175 	} else {
1176 		ret = io_sendmsg_copy_hdr(req, &iomsg);
1177 		if (ret)
1178 			return ret;
1179 		kmsg = &iomsg;
1180 	}
1181 
1182 	if (!(req->flags & REQ_F_POLLED) &&
1183 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1184 		return io_setup_async_msg(req, kmsg, issue_flags);
1185 
1186 	flags = sr->msg_flags | MSG_ZEROCOPY;
1187 	if (issue_flags & IO_URING_F_NONBLOCK)
1188 		flags |= MSG_DONTWAIT;
1189 	if (flags & MSG_WAITALL)
1190 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1191 
1192 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1193 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1194 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1195 
1196 	if (unlikely(ret < min_ret)) {
1197 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1198 			return io_setup_async_msg(req, kmsg, issue_flags);
1199 
1200 		if (ret > 0 && io_net_retry(sock, flags)) {
1201 			sr->done_io += ret;
1202 			req->flags |= REQ_F_PARTIAL_IO;
1203 			return io_setup_async_msg(req, kmsg, issue_flags);
1204 		}
1205 		if (ret == -ERESTARTSYS)
1206 			ret = -EINTR;
1207 		req_set_fail(req);
1208 	}
1209 	/* fast path, check for non-NULL to avoid function call */
1210 	if (kmsg->free_iov) {
1211 		kfree(kmsg->free_iov);
1212 		kmsg->free_iov = NULL;
1213 	}
1214 
1215 	io_netmsg_recycle(req, issue_flags);
1216 	if (ret >= 0)
1217 		ret += sr->done_io;
1218 	else if (sr->done_io)
1219 		ret = sr->done_io;
1220 
1221 	/*
1222 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1223 	 * flushing notif to io_send_zc_cleanup()
1224 	 */
1225 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1226 		io_notif_flush(sr->notif);
1227 		req->flags &= ~REQ_F_NEED_CLEANUP;
1228 	}
1229 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1230 	return IOU_OK;
1231 }
1232 
io_sendrecv_fail(struct io_kiocb * req)1233 void io_sendrecv_fail(struct io_kiocb *req)
1234 {
1235 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1236 
1237 	if (req->flags & REQ_F_PARTIAL_IO)
1238 		req->cqe.res = sr->done_io;
1239 
1240 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1241 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1242 		req->cqe.flags |= IORING_CQE_F_MORE;
1243 }
1244 
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1245 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1246 {
1247 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1248 	unsigned flags;
1249 
1250 	if (sqe->len || sqe->buf_index)
1251 		return -EINVAL;
1252 
1253 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1254 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1255 	accept->flags = READ_ONCE(sqe->accept_flags);
1256 	accept->nofile = rlimit(RLIMIT_NOFILE);
1257 	flags = READ_ONCE(sqe->ioprio);
1258 	if (flags & ~IORING_ACCEPT_MULTISHOT)
1259 		return -EINVAL;
1260 
1261 	accept->file_slot = READ_ONCE(sqe->file_index);
1262 	if (accept->file_slot) {
1263 		if (accept->flags & SOCK_CLOEXEC)
1264 			return -EINVAL;
1265 		if (flags & IORING_ACCEPT_MULTISHOT &&
1266 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1267 			return -EINVAL;
1268 	}
1269 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1270 		return -EINVAL;
1271 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1272 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1273 	if (flags & IORING_ACCEPT_MULTISHOT)
1274 		req->flags |= REQ_F_APOLL_MULTISHOT;
1275 	return 0;
1276 }
1277 
io_accept(struct io_kiocb * req,unsigned int issue_flags)1278 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1279 {
1280 	struct io_ring_ctx *ctx = req->ctx;
1281 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1282 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1283 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1284 	bool fixed = !!accept->file_slot;
1285 	struct file *file;
1286 	int ret, fd;
1287 
1288 retry:
1289 	if (!fixed) {
1290 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1291 		if (unlikely(fd < 0))
1292 			return fd;
1293 	}
1294 	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1295 			 accept->flags);
1296 	if (IS_ERR(file)) {
1297 		if (!fixed)
1298 			put_unused_fd(fd);
1299 		ret = PTR_ERR(file);
1300 		if (ret == -EAGAIN && force_nonblock) {
1301 			/*
1302 			 * if it's multishot and polled, we don't need to
1303 			 * return EAGAIN to arm the poll infra since it
1304 			 * has already been done
1305 			 */
1306 			if (issue_flags & IO_URING_F_MULTISHOT)
1307 				ret = IOU_ISSUE_SKIP_COMPLETE;
1308 			return ret;
1309 		}
1310 		if (ret == -ERESTARTSYS)
1311 			ret = -EINTR;
1312 		req_set_fail(req);
1313 	} else if (!fixed) {
1314 		fd_install(fd, file);
1315 		ret = fd;
1316 	} else {
1317 		ret = io_fixed_fd_install(req, issue_flags, file,
1318 						accept->file_slot);
1319 	}
1320 
1321 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1322 		io_req_set_res(req, ret, 0);
1323 		return IOU_OK;
1324 	}
1325 
1326 	if (ret >= 0 &&
1327 	    io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1328 		goto retry;
1329 
1330 	io_req_set_res(req, ret, 0);
1331 	return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
1332 }
1333 
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1334 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1335 {
1336 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1337 
1338 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1339 		return -EINVAL;
1340 
1341 	sock->domain = READ_ONCE(sqe->fd);
1342 	sock->type = READ_ONCE(sqe->off);
1343 	sock->protocol = READ_ONCE(sqe->len);
1344 	sock->file_slot = READ_ONCE(sqe->file_index);
1345 	sock->nofile = rlimit(RLIMIT_NOFILE);
1346 
1347 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1348 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1349 		return -EINVAL;
1350 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1351 		return -EINVAL;
1352 	return 0;
1353 }
1354 
io_socket(struct io_kiocb * req,unsigned int issue_flags)1355 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1356 {
1357 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1358 	bool fixed = !!sock->file_slot;
1359 	struct file *file;
1360 	int ret, fd;
1361 
1362 	if (!fixed) {
1363 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1364 		if (unlikely(fd < 0))
1365 			return fd;
1366 	}
1367 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1368 	if (IS_ERR(file)) {
1369 		if (!fixed)
1370 			put_unused_fd(fd);
1371 		ret = PTR_ERR(file);
1372 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1373 			return -EAGAIN;
1374 		if (ret == -ERESTARTSYS)
1375 			ret = -EINTR;
1376 		req_set_fail(req);
1377 	} else if (!fixed) {
1378 		fd_install(fd, file);
1379 		ret = fd;
1380 	} else {
1381 		ret = io_fixed_fd_install(req, issue_flags, file,
1382 					    sock->file_slot);
1383 	}
1384 	io_req_set_res(req, ret, 0);
1385 	return IOU_OK;
1386 }
1387 
io_connect_prep_async(struct io_kiocb * req)1388 int io_connect_prep_async(struct io_kiocb *req)
1389 {
1390 	struct io_async_connect *io = req->async_data;
1391 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1392 
1393 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1394 }
1395 
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1396 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1397 {
1398 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1399 
1400 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1401 		return -EINVAL;
1402 
1403 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1404 	conn->addr_len =  READ_ONCE(sqe->addr2);
1405 	conn->in_progress = false;
1406 	return 0;
1407 }
1408 
io_connect(struct io_kiocb * req,unsigned int issue_flags)1409 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1410 {
1411 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1412 	struct io_async_connect __io, *io;
1413 	unsigned file_flags;
1414 	int ret;
1415 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1416 
1417 	if (connect->in_progress) {
1418 		struct socket *socket;
1419 
1420 		ret = -ENOTSOCK;
1421 		socket = sock_from_file(req->file);
1422 		if (socket)
1423 			ret = sock_error(socket->sk);
1424 		goto out;
1425 	}
1426 
1427 	if (req_has_async_data(req)) {
1428 		io = req->async_data;
1429 	} else {
1430 		ret = move_addr_to_kernel(connect->addr,
1431 						connect->addr_len,
1432 						&__io.address);
1433 		if (ret)
1434 			goto out;
1435 		io = &__io;
1436 	}
1437 
1438 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1439 
1440 	ret = __sys_connect_file(req->file, &io->address,
1441 					connect->addr_len, file_flags);
1442 	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1443 		if (ret == -EINPROGRESS) {
1444 			connect->in_progress = true;
1445 		} else {
1446 			if (req_has_async_data(req))
1447 				return -EAGAIN;
1448 			if (io_alloc_async_data(req)) {
1449 				ret = -ENOMEM;
1450 				goto out;
1451 			}
1452 			memcpy(req->async_data, &__io, sizeof(__io));
1453 		}
1454 		return -EAGAIN;
1455 	}
1456 	if (ret == -ERESTARTSYS)
1457 		ret = -EINTR;
1458 out:
1459 	if (ret < 0)
1460 		req_set_fail(req);
1461 	io_req_set_res(req, ret, 0);
1462 	return IOU_OK;
1463 }
1464 
io_netmsg_cache_free(struct io_cache_entry * entry)1465 void io_netmsg_cache_free(struct io_cache_entry *entry)
1466 {
1467 	kfree(container_of(entry, struct io_async_msghdr, cache));
1468 }
1469 #endif
1470