1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqe (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
50 
51 #include <linux/sched/signal.h>
52 #include <linux/fs.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/mm.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
61 #include <net/sock.h>
62 #include <net/af_unix.h>
63 #include <net/scm.h>
64 #include <linux/anon_inodes.h>
65 #include <linux/sched/mm.h>
66 #include <linux/uaccess.h>
67 #include <linux/nospec.h>
68 #include <linux/highmem.h>
69 #include <linux/fsnotify.h>
70 #include <linux/fadvise.h>
71 #include <linux/task_work.h>
72 #include <linux/io_uring.h>
73 #include <linux/audit.h>
74 #include <linux/security.h>
75 
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/io_uring.h>
78 
79 #include <uapi/linux/io_uring.h>
80 
81 #include "io-wq.h"
82 
83 #include "io_uring.h"
84 #include "opdef.h"
85 #include "refs.h"
86 #include "tctx.h"
87 #include "sqpoll.h"
88 #include "fdinfo.h"
89 #include "kbuf.h"
90 #include "rsrc.h"
91 #include "cancel.h"
92 #include "net.h"
93 #include "notif.h"
94 
95 #include "timeout.h"
96 #include "poll.h"
97 #include "alloc_cache.h"
98 
99 #define IORING_MAX_ENTRIES	32768
100 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
101 
102 #define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
103 				 IORING_REGISTER_LAST + IORING_OP_LAST)
104 
105 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
106 			  IOSQE_IO_HARDLINK | IOSQE_ASYNC)
107 
108 #define SQE_VALID_FLAGS	(SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
109 			IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
110 
111 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
112 				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
113 				REQ_F_ASYNC_DATA)
114 
115 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
116 				 IO_REQ_CLEAN_FLAGS)
117 
118 #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
119 
120 #define IO_COMPL_BATCH			32
121 #define IO_REQ_ALLOC_BATCH		8
122 
123 enum {
124 	IO_CHECK_CQ_OVERFLOW_BIT,
125 	IO_CHECK_CQ_DROPPED_BIT,
126 };
127 
128 enum {
129 	IO_EVENTFD_OP_SIGNAL_BIT,
130 	IO_EVENTFD_OP_FREE_BIT,
131 };
132 
133 struct io_defer_entry {
134 	struct list_head	list;
135 	struct io_kiocb		*req;
136 	u32			seq;
137 };
138 
139 /* requests with any of those set should undergo io_disarm_next() */
140 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
141 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
142 
143 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
144 					 struct task_struct *task,
145 					 bool cancel_all);
146 
147 static void io_dismantle_req(struct io_kiocb *req);
148 static void io_clean_op(struct io_kiocb *req);
149 static void io_queue_sqe(struct io_kiocb *req);
150 static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
151 static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
152 
153 static struct kmem_cache *req_cachep;
154 
io_uring_get_socket(struct file * file)155 struct sock *io_uring_get_socket(struct file *file)
156 {
157 #if defined(CONFIG_UNIX)
158 	if (io_is_uring_fops(file)) {
159 		struct io_ring_ctx *ctx = file->private_data;
160 
161 		return ctx->ring_sock->sk;
162 	}
163 #endif
164 	return NULL;
165 }
166 EXPORT_SYMBOL(io_uring_get_socket);
167 
io_submit_flush_completions(struct io_ring_ctx * ctx)168 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
169 {
170 	if (!wq_list_empty(&ctx->submit_state.compl_reqs))
171 		__io_submit_flush_completions(ctx);
172 }
173 
__io_cqring_events(struct io_ring_ctx * ctx)174 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
175 {
176 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
177 }
178 
__io_cqring_events_user(struct io_ring_ctx * ctx)179 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
180 {
181 	return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
182 }
183 
io_match_linked(struct io_kiocb * head)184 static bool io_match_linked(struct io_kiocb *head)
185 {
186 	struct io_kiocb *req;
187 
188 	io_for_each_link(req, head) {
189 		if (req->flags & REQ_F_INFLIGHT)
190 			return true;
191 	}
192 	return false;
193 }
194 
195 /*
196  * As io_match_task() but protected against racing with linked timeouts.
197  * User must not hold timeout_lock.
198  */
io_match_task_safe(struct io_kiocb * head,struct task_struct * task,bool cancel_all)199 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
200 			bool cancel_all)
201 {
202 	bool matched;
203 
204 	if (task && head->task != task)
205 		return false;
206 	if (cancel_all)
207 		return true;
208 
209 	if (head->flags & REQ_F_LINK_TIMEOUT) {
210 		struct io_ring_ctx *ctx = head->ctx;
211 
212 		/* protect against races with linked timeouts */
213 		spin_lock_irq(&ctx->timeout_lock);
214 		matched = io_match_linked(head);
215 		spin_unlock_irq(&ctx->timeout_lock);
216 	} else {
217 		matched = io_match_linked(head);
218 	}
219 	return matched;
220 }
221 
req_fail_link_node(struct io_kiocb * req,int res)222 static inline void req_fail_link_node(struct io_kiocb *req, int res)
223 {
224 	req_set_fail(req);
225 	io_req_set_res(req, res, 0);
226 }
227 
io_req_add_to_cache(struct io_kiocb * req,struct io_ring_ctx * ctx)228 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
229 {
230 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
231 }
232 
io_ring_ctx_ref_free(struct percpu_ref * ref)233 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
234 {
235 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
236 
237 	complete(&ctx->ref_comp);
238 }
239 
io_fallback_req_func(struct work_struct * work)240 static __cold void io_fallback_req_func(struct work_struct *work)
241 {
242 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
243 						fallback_work.work);
244 	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
245 	struct io_kiocb *req, *tmp;
246 	bool locked = false;
247 
248 	percpu_ref_get(&ctx->refs);
249 	llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
250 		req->io_task_work.func(req, &locked);
251 
252 	if (locked) {
253 		io_submit_flush_completions(ctx);
254 		mutex_unlock(&ctx->uring_lock);
255 	}
256 	percpu_ref_put(&ctx->refs);
257 }
258 
io_alloc_hash_table(struct io_hash_table * table,unsigned bits)259 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
260 {
261 	unsigned hash_buckets = 1U << bits;
262 	size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
263 
264 	table->hbs = kmalloc(hash_size, GFP_KERNEL);
265 	if (!table->hbs)
266 		return -ENOMEM;
267 
268 	table->hash_bits = bits;
269 	init_hash_table(table, hash_buckets);
270 	return 0;
271 }
272 
io_ring_ctx_alloc(struct io_uring_params * p)273 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
274 {
275 	struct io_ring_ctx *ctx;
276 	int hash_bits;
277 
278 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
279 	if (!ctx)
280 		return NULL;
281 
282 	xa_init(&ctx->io_bl_xa);
283 
284 	/*
285 	 * Use 5 bits less than the max cq entries, that should give us around
286 	 * 32 entries per hash list if totally full and uniformly spread, but
287 	 * don't keep too many buckets to not overconsume memory.
288 	 */
289 	hash_bits = ilog2(p->cq_entries) - 5;
290 	hash_bits = clamp(hash_bits, 1, 8);
291 	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
292 		goto err;
293 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
294 		goto err;
295 
296 	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
297 	if (!ctx->dummy_ubuf)
298 		goto err;
299 	/* set invalid range, so io_import_fixed() fails meeting it */
300 	ctx->dummy_ubuf->ubuf = -1UL;
301 
302 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
303 			    0, GFP_KERNEL))
304 		goto err;
305 
306 	ctx->flags = p->flags;
307 	init_waitqueue_head(&ctx->sqo_sq_wait);
308 	INIT_LIST_HEAD(&ctx->sqd_list);
309 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
310 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
311 	io_alloc_cache_init(&ctx->apoll_cache);
312 	io_alloc_cache_init(&ctx->netmsg_cache);
313 	init_completion(&ctx->ref_comp);
314 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
315 	mutex_init(&ctx->uring_lock);
316 	init_waitqueue_head(&ctx->cq_wait);
317 	spin_lock_init(&ctx->completion_lock);
318 	spin_lock_init(&ctx->timeout_lock);
319 	INIT_WQ_LIST(&ctx->iopoll_list);
320 	INIT_LIST_HEAD(&ctx->io_buffers_pages);
321 	INIT_LIST_HEAD(&ctx->io_buffers_comp);
322 	INIT_LIST_HEAD(&ctx->defer_list);
323 	INIT_LIST_HEAD(&ctx->timeout_list);
324 	INIT_LIST_HEAD(&ctx->ltimeout_list);
325 	spin_lock_init(&ctx->rsrc_ref_lock);
326 	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
327 	INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
328 	init_llist_head(&ctx->rsrc_put_llist);
329 	init_llist_head(&ctx->work_llist);
330 	INIT_LIST_HEAD(&ctx->tctx_list);
331 	ctx->submit_state.free_list.next = NULL;
332 	INIT_WQ_LIST(&ctx->locked_free_list);
333 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
334 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
335 	return ctx;
336 err:
337 	kfree(ctx->dummy_ubuf);
338 	kfree(ctx->cancel_table.hbs);
339 	kfree(ctx->cancel_table_locked.hbs);
340 	kfree(ctx->io_bl);
341 	xa_destroy(&ctx->io_bl_xa);
342 	kfree(ctx);
343 	return NULL;
344 }
345 
io_account_cq_overflow(struct io_ring_ctx * ctx)346 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
347 {
348 	struct io_rings *r = ctx->rings;
349 
350 	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
351 	ctx->cq_extra--;
352 }
353 
req_need_defer(struct io_kiocb * req,u32 seq)354 static bool req_need_defer(struct io_kiocb *req, u32 seq)
355 {
356 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
357 		struct io_ring_ctx *ctx = req->ctx;
358 
359 		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
360 	}
361 
362 	return false;
363 }
364 
io_req_track_inflight(struct io_kiocb * req)365 static inline void io_req_track_inflight(struct io_kiocb *req)
366 {
367 	if (!(req->flags & REQ_F_INFLIGHT)) {
368 		req->flags |= REQ_F_INFLIGHT;
369 		atomic_inc(&req->task->io_uring->inflight_tracked);
370 	}
371 }
372 
__io_prep_linked_timeout(struct io_kiocb * req)373 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
374 {
375 	if (WARN_ON_ONCE(!req->link))
376 		return NULL;
377 
378 	req->flags &= ~REQ_F_ARM_LTIMEOUT;
379 	req->flags |= REQ_F_LINK_TIMEOUT;
380 
381 	/* linked timeouts should have two refs once prep'ed */
382 	io_req_set_refcount(req);
383 	__io_req_set_refcount(req->link, 2);
384 	return req->link;
385 }
386 
io_prep_linked_timeout(struct io_kiocb * req)387 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
388 {
389 	if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
390 		return NULL;
391 	return __io_prep_linked_timeout(req);
392 }
393 
__io_arm_ltimeout(struct io_kiocb * req)394 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
395 {
396 	io_queue_linked_timeout(__io_prep_linked_timeout(req));
397 }
398 
io_arm_ltimeout(struct io_kiocb * req)399 static inline void io_arm_ltimeout(struct io_kiocb *req)
400 {
401 	if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
402 		__io_arm_ltimeout(req);
403 }
404 
io_prep_async_work(struct io_kiocb * req)405 static void io_prep_async_work(struct io_kiocb *req)
406 {
407 	const struct io_op_def *def = &io_op_defs[req->opcode];
408 	struct io_ring_ctx *ctx = req->ctx;
409 
410 	if (!(req->flags & REQ_F_CREDS)) {
411 		req->flags |= REQ_F_CREDS;
412 		req->creds = get_current_cred();
413 	}
414 
415 	req->work.list.next = NULL;
416 	req->work.flags = 0;
417 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
418 	if (req->flags & REQ_F_FORCE_ASYNC)
419 		req->work.flags |= IO_WQ_WORK_CONCURRENT;
420 
421 	if (req->file && !io_req_ffs_set(req))
422 		req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT;
423 
424 	if (req->flags & REQ_F_ISREG) {
425 		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
426 			io_wq_hash_work(&req->work, file_inode(req->file));
427 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
428 		if (def->unbound_nonreg_file)
429 			req->work.flags |= IO_WQ_WORK_UNBOUND;
430 	}
431 }
432 
io_prep_async_link(struct io_kiocb * req)433 static void io_prep_async_link(struct io_kiocb *req)
434 {
435 	struct io_kiocb *cur;
436 
437 	if (req->flags & REQ_F_LINK_TIMEOUT) {
438 		struct io_ring_ctx *ctx = req->ctx;
439 
440 		spin_lock_irq(&ctx->timeout_lock);
441 		io_for_each_link(cur, req)
442 			io_prep_async_work(cur);
443 		spin_unlock_irq(&ctx->timeout_lock);
444 	} else {
445 		io_for_each_link(cur, req)
446 			io_prep_async_work(cur);
447 	}
448 }
449 
io_queue_iowq(struct io_kiocb * req,bool * dont_use)450 void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
451 {
452 	struct io_kiocb *link = io_prep_linked_timeout(req);
453 	struct io_uring_task *tctx = req->task->io_uring;
454 
455 	BUG_ON(!tctx);
456 	BUG_ON(!tctx->io_wq);
457 
458 	/* init ->work of the whole link before punting */
459 	io_prep_async_link(req);
460 
461 	/*
462 	 * Not expected to happen, but if we do have a bug where this _can_
463 	 * happen, catch it here and ensure the request is marked as
464 	 * canceled. That will make io-wq go through the usual work cancel
465 	 * procedure rather than attempt to run this request (or create a new
466 	 * worker for it).
467 	 */
468 	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
469 		req->work.flags |= IO_WQ_WORK_CANCEL;
470 
471 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
472 	io_wq_enqueue(tctx->io_wq, &req->work);
473 	if (link)
474 		io_queue_linked_timeout(link);
475 }
476 
io_queue_deferred(struct io_ring_ctx * ctx)477 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
478 {
479 	while (!list_empty(&ctx->defer_list)) {
480 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
481 						struct io_defer_entry, list);
482 
483 		if (req_need_defer(de->req, de->seq))
484 			break;
485 		list_del_init(&de->list);
486 		io_req_task_queue(de->req);
487 		kfree(de);
488 	}
489 }
490 
491 
io_eventfd_ops(struct rcu_head * rcu)492 static void io_eventfd_ops(struct rcu_head *rcu)
493 {
494 	struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
495 	int ops = atomic_xchg(&ev_fd->ops, 0);
496 
497 	if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
498 		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
499 
500 	/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
501 	 * ordering in a race but if references are 0 we know we have to free
502 	 * it regardless.
503 	 */
504 	if (atomic_dec_and_test(&ev_fd->refs)) {
505 		eventfd_ctx_put(ev_fd->cq_ev_fd);
506 		kfree(ev_fd);
507 	}
508 }
509 
io_eventfd_signal(struct io_ring_ctx * ctx)510 static void io_eventfd_signal(struct io_ring_ctx *ctx)
511 {
512 	struct io_ev_fd *ev_fd = NULL;
513 
514 	rcu_read_lock();
515 	/*
516 	 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
517 	 * and eventfd_signal
518 	 */
519 	ev_fd = rcu_dereference(ctx->io_ev_fd);
520 
521 	/*
522 	 * Check again if ev_fd exists incase an io_eventfd_unregister call
523 	 * completed between the NULL check of ctx->io_ev_fd at the start of
524 	 * the function and rcu_read_lock.
525 	 */
526 	if (unlikely(!ev_fd))
527 		goto out;
528 	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
529 		goto out;
530 	if (ev_fd->eventfd_async && !io_wq_current_is_worker())
531 		goto out;
532 
533 	if (likely(eventfd_signal_allowed())) {
534 		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
535 	} else {
536 		atomic_inc(&ev_fd->refs);
537 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
538 			call_rcu(&ev_fd->rcu, io_eventfd_ops);
539 		else
540 			atomic_dec(&ev_fd->refs);
541 	}
542 
543 out:
544 	rcu_read_unlock();
545 }
546 
io_eventfd_flush_signal(struct io_ring_ctx * ctx)547 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
548 {
549 	bool skip;
550 
551 	spin_lock(&ctx->completion_lock);
552 
553 	/*
554 	 * Eventfd should only get triggered when at least one event has been
555 	 * posted. Some applications rely on the eventfd notification count
556 	 * only changing IFF a new CQE has been added to the CQ ring. There's
557 	 * no depedency on 1:1 relationship between how many times this
558 	 * function is called (and hence the eventfd count) and number of CQEs
559 	 * posted to the CQ ring.
560 	 */
561 	skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
562 	ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
563 	spin_unlock(&ctx->completion_lock);
564 	if (skip)
565 		return;
566 
567 	io_eventfd_signal(ctx);
568 }
569 
__io_commit_cqring_flush(struct io_ring_ctx * ctx)570 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
571 {
572 	if (ctx->off_timeout_used || ctx->drain_active) {
573 		spin_lock(&ctx->completion_lock);
574 		if (ctx->off_timeout_used)
575 			io_flush_timeouts(ctx);
576 		if (ctx->drain_active)
577 			io_queue_deferred(ctx);
578 		spin_unlock(&ctx->completion_lock);
579 	}
580 	if (ctx->has_evfd)
581 		io_eventfd_flush_signal(ctx);
582 }
583 
io_cqring_ev_posted(struct io_ring_ctx * ctx)584 static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
585 {
586 	io_commit_cqring_flush(ctx);
587 	io_cqring_wake(ctx);
588 }
589 
__io_cq_unlock_post(struct io_ring_ctx * ctx)590 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
591 	__releases(ctx->completion_lock)
592 {
593 	io_commit_cqring(ctx);
594 	spin_unlock(&ctx->completion_lock);
595 	io_cqring_ev_posted(ctx);
596 }
597 
io_cq_unlock_post(struct io_ring_ctx * ctx)598 void io_cq_unlock_post(struct io_ring_ctx *ctx)
599 {
600 	__io_cq_unlock_post(ctx);
601 }
602 
603 /* Returns true if there are no backlogged entries after the flush */
__io_cqring_overflow_flush(struct io_ring_ctx * ctx,bool force)604 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
605 {
606 	bool all_flushed;
607 	size_t cqe_size = sizeof(struct io_uring_cqe);
608 
609 	if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
610 		return false;
611 
612 	if (ctx->flags & IORING_SETUP_CQE32)
613 		cqe_size <<= 1;
614 
615 	io_cq_lock(ctx);
616 	while (!list_empty(&ctx->cq_overflow_list)) {
617 		struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
618 		struct io_overflow_cqe *ocqe;
619 
620 		if (!cqe && !force)
621 			break;
622 		ocqe = list_first_entry(&ctx->cq_overflow_list,
623 					struct io_overflow_cqe, list);
624 		if (cqe)
625 			memcpy(cqe, &ocqe->cqe, cqe_size);
626 		else
627 			io_account_cq_overflow(ctx);
628 
629 		list_del(&ocqe->list);
630 		kfree(ocqe);
631 	}
632 
633 	all_flushed = list_empty(&ctx->cq_overflow_list);
634 	if (all_flushed) {
635 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
636 		atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
637 	}
638 
639 	io_cq_unlock_post(ctx);
640 	return all_flushed;
641 }
642 
io_cqring_overflow_flush(struct io_ring_ctx * ctx)643 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
644 {
645 	bool ret = true;
646 
647 	if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
648 		/* iopoll syncs against uring_lock, not completion_lock */
649 		if (ctx->flags & IORING_SETUP_IOPOLL)
650 			mutex_lock(&ctx->uring_lock);
651 		ret = __io_cqring_overflow_flush(ctx, false);
652 		if (ctx->flags & IORING_SETUP_IOPOLL)
653 			mutex_unlock(&ctx->uring_lock);
654 	}
655 
656 	return ret;
657 }
658 
__io_put_task(struct task_struct * task,int nr)659 void __io_put_task(struct task_struct *task, int nr)
660 {
661 	struct io_uring_task *tctx = task->io_uring;
662 
663 	percpu_counter_sub(&tctx->inflight, nr);
664 	if (unlikely(atomic_read(&tctx->in_idle)))
665 		wake_up(&tctx->wait);
666 	put_task_struct_many(task, nr);
667 }
668 
io_task_refs_refill(struct io_uring_task * tctx)669 void io_task_refs_refill(struct io_uring_task *tctx)
670 {
671 	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
672 
673 	percpu_counter_add(&tctx->inflight, refill);
674 	refcount_add(refill, &current->usage);
675 	tctx->cached_refs += refill;
676 }
677 
io_uring_drop_tctx_refs(struct task_struct * task)678 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
679 {
680 	struct io_uring_task *tctx = task->io_uring;
681 	unsigned int refs = tctx->cached_refs;
682 
683 	if (refs) {
684 		tctx->cached_refs = 0;
685 		percpu_counter_sub(&tctx->inflight, refs);
686 		put_task_struct_many(task, refs);
687 	}
688 }
689 
io_cqring_event_overflow(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,u64 extra1,u64 extra2)690 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
691 				     s32 res, u32 cflags, u64 extra1, u64 extra2)
692 {
693 	struct io_overflow_cqe *ocqe;
694 	size_t ocq_size = sizeof(struct io_overflow_cqe);
695 	bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
696 
697 	if (is_cqe32)
698 		ocq_size += sizeof(struct io_uring_cqe);
699 
700 	ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
701 	trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
702 	if (!ocqe) {
703 		/*
704 		 * If we're in ring overflow flush mode, or in task cancel mode,
705 		 * or cannot allocate an overflow entry, then we need to drop it
706 		 * on the floor.
707 		 */
708 		io_account_cq_overflow(ctx);
709 		set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
710 		return false;
711 	}
712 	if (list_empty(&ctx->cq_overflow_list)) {
713 		set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
714 		atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
715 
716 	}
717 	ocqe->cqe.user_data = user_data;
718 	ocqe->cqe.res = res;
719 	ocqe->cqe.flags = cflags;
720 	if (is_cqe32) {
721 		ocqe->cqe.big_cqe[0] = extra1;
722 		ocqe->cqe.big_cqe[1] = extra2;
723 	}
724 	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
725 	return true;
726 }
727 
io_req_cqe_overflow(struct io_kiocb * req)728 bool io_req_cqe_overflow(struct io_kiocb *req)
729 {
730 	if (!(req->flags & REQ_F_CQE32_INIT)) {
731 		req->extra1 = 0;
732 		req->extra2 = 0;
733 	}
734 	return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
735 					req->cqe.res, req->cqe.flags,
736 					req->extra1, req->extra2);
737 }
738 
739 /*
740  * writes to the cq entry need to come after reading head; the
741  * control dependency is enough as we're using WRITE_ONCE to
742  * fill the cq entry
743  */
__io_get_cqe(struct io_ring_ctx * ctx,bool overflow)744 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
745 {
746 	struct io_rings *rings = ctx->rings;
747 	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
748 	unsigned int free, queued, len;
749 
750 	/*
751 	 * Posting into the CQ when there are pending overflowed CQEs may break
752 	 * ordering guarantees, which will affect links, F_MORE users and more.
753 	 * Force overflow the completion.
754 	 */
755 	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
756 		return NULL;
757 
758 	/* userspace may cheat modifying the tail, be safe and do min */
759 	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
760 	free = ctx->cq_entries - queued;
761 	/* we need a contiguous range, limit based on the current array offset */
762 	len = min(free, ctx->cq_entries - off);
763 	if (!len)
764 		return NULL;
765 
766 	if (ctx->flags & IORING_SETUP_CQE32) {
767 		off <<= 1;
768 		len <<= 1;
769 	}
770 
771 	ctx->cqe_cached = &rings->cqes[off];
772 	ctx->cqe_sentinel = ctx->cqe_cached + len;
773 
774 	ctx->cached_cq_tail++;
775 	ctx->cqe_cached++;
776 	if (ctx->flags & IORING_SETUP_CQE32)
777 		ctx->cqe_cached++;
778 	return &rings->cqes[off];
779 }
780 
io_fill_cqe_aux(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,bool allow_overflow)781 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
782 		     bool allow_overflow)
783 {
784 	struct io_uring_cqe *cqe;
785 
786 	ctx->cq_extra++;
787 
788 	/*
789 	 * If we can't get a cq entry, userspace overflowed the
790 	 * submission (by quite a lot). Increment the overflow count in
791 	 * the ring.
792 	 */
793 	cqe = io_get_cqe(ctx);
794 	if (likely(cqe)) {
795 		trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
796 
797 		WRITE_ONCE(cqe->user_data, user_data);
798 		WRITE_ONCE(cqe->res, res);
799 		WRITE_ONCE(cqe->flags, cflags);
800 
801 		if (ctx->flags & IORING_SETUP_CQE32) {
802 			WRITE_ONCE(cqe->big_cqe[0], 0);
803 			WRITE_ONCE(cqe->big_cqe[1], 0);
804 		}
805 		return true;
806 	}
807 
808 	if (allow_overflow)
809 		return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
810 
811 	return false;
812 }
813 
io_post_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,bool allow_overflow)814 bool io_post_aux_cqe(struct io_ring_ctx *ctx,
815 		     u64 user_data, s32 res, u32 cflags,
816 		     bool allow_overflow)
817 {
818 	bool filled;
819 
820 	io_cq_lock(ctx);
821 	filled = io_fill_cqe_aux(ctx, user_data, res, cflags, allow_overflow);
822 	io_cq_unlock_post(ctx);
823 	return filled;
824 }
825 
io_req_complete_post(struct io_kiocb * req)826 void io_req_complete_post(struct io_kiocb *req)
827 {
828 	struct io_ring_ctx *ctx = req->ctx;
829 
830 	io_cq_lock(ctx);
831 	if (!(req->flags & REQ_F_CQE_SKIP))
832 		__io_fill_cqe_req(ctx, req);
833 
834 	/*
835 	 * If we're the last reference to this request, add to our locked
836 	 * free_list cache.
837 	 */
838 	if (req_ref_put_and_test(req)) {
839 		if (req->flags & IO_REQ_LINK_FLAGS) {
840 			if (req->flags & IO_DISARM_MASK)
841 				io_disarm_next(req);
842 			if (req->link) {
843 				io_req_task_queue(req->link);
844 				req->link = NULL;
845 			}
846 		}
847 		io_req_put_rsrc(req);
848 		/*
849 		 * Selected buffer deallocation in io_clean_op() assumes that
850 		 * we don't hold ->completion_lock. Clean them here to avoid
851 		 * deadlocks.
852 		 */
853 		io_put_kbuf_comp(req);
854 		io_dismantle_req(req);
855 		io_put_task(req->task, 1);
856 		wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
857 		ctx->locked_free_nr++;
858 	}
859 	io_cq_unlock_post(ctx);
860 }
861 
__io_req_complete(struct io_kiocb * req,unsigned issue_flags)862 inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
863 {
864 	io_req_complete_post(req);
865 }
866 
io_req_complete_failed(struct io_kiocb * req,s32 res)867 void io_req_complete_failed(struct io_kiocb *req, s32 res)
868 	__must_hold(&ctx->uring_lock)
869 {
870 	const struct io_op_def *def = &io_op_defs[req->opcode];
871 
872 	lockdep_assert_held(&req->ctx->uring_lock);
873 
874 	req_set_fail(req);
875 	io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
876 	if (def->fail)
877 		def->fail(req);
878 	io_req_complete_post(req);
879 }
880 
881 /*
882  * Don't initialise the fields below on every allocation, but do that in
883  * advance and keep them valid across allocations.
884  */
io_preinit_req(struct io_kiocb * req,struct io_ring_ctx * ctx)885 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
886 {
887 	req->ctx = ctx;
888 	req->link = NULL;
889 	req->async_data = NULL;
890 	/* not necessary, but safer to zero */
891 	req->cqe.res = 0;
892 }
893 
io_flush_cached_locked_reqs(struct io_ring_ctx * ctx,struct io_submit_state * state)894 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
895 					struct io_submit_state *state)
896 {
897 	spin_lock(&ctx->completion_lock);
898 	wq_list_splice(&ctx->locked_free_list, &state->free_list);
899 	ctx->locked_free_nr = 0;
900 	spin_unlock(&ctx->completion_lock);
901 }
902 
903 /*
904  * A request might get retired back into the request caches even before opcode
905  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
906  * Because of that, io_alloc_req() should be called only under ->uring_lock
907  * and with extra caution to not get a request that is still worked on.
908  */
__io_alloc_req_refill(struct io_ring_ctx * ctx)909 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
910 	__must_hold(&ctx->uring_lock)
911 {
912 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
913 	void *reqs[IO_REQ_ALLOC_BATCH];
914 	int ret, i;
915 
916 	/*
917 	 * If we have more than a batch's worth of requests in our IRQ side
918 	 * locked cache, grab the lock and move them over to our submission
919 	 * side cache.
920 	 */
921 	if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
922 		io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
923 		if (!io_req_cache_empty(ctx))
924 			return true;
925 	}
926 
927 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
928 
929 	/*
930 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
931 	 * retry single alloc to be on the safe side.
932 	 */
933 	if (unlikely(ret <= 0)) {
934 		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
935 		if (!reqs[0])
936 			return false;
937 		ret = 1;
938 	}
939 
940 	percpu_ref_get_many(&ctx->refs, ret);
941 	for (i = 0; i < ret; i++) {
942 		struct io_kiocb *req = reqs[i];
943 
944 		io_preinit_req(req, ctx);
945 		io_req_add_to_cache(req, ctx);
946 	}
947 	return true;
948 }
949 
io_dismantle_req(struct io_kiocb * req)950 static inline void io_dismantle_req(struct io_kiocb *req)
951 {
952 	unsigned int flags = req->flags;
953 
954 	if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
955 		io_clean_op(req);
956 	if (!(flags & REQ_F_FIXED_FILE))
957 		io_put_file(req->file);
958 }
959 
io_free_req(struct io_kiocb * req)960 __cold void io_free_req(struct io_kiocb *req)
961 {
962 	struct io_ring_ctx *ctx = req->ctx;
963 
964 	io_req_put_rsrc(req);
965 	io_dismantle_req(req);
966 	io_put_task(req->task, 1);
967 
968 	spin_lock(&ctx->completion_lock);
969 	wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
970 	ctx->locked_free_nr++;
971 	spin_unlock(&ctx->completion_lock);
972 }
973 
__io_req_find_next_prep(struct io_kiocb * req)974 static void __io_req_find_next_prep(struct io_kiocb *req)
975 {
976 	struct io_ring_ctx *ctx = req->ctx;
977 
978 	io_cq_lock(ctx);
979 	io_disarm_next(req);
980 	io_cq_unlock_post(ctx);
981 }
982 
io_req_find_next(struct io_kiocb * req)983 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
984 {
985 	struct io_kiocb *nxt;
986 
987 	/*
988 	 * If LINK is set, we have dependent requests in this chain. If we
989 	 * didn't fail this request, queue the first one up, moving any other
990 	 * dependencies to the next request. In case of failure, fail the rest
991 	 * of the chain.
992 	 */
993 	if (unlikely(req->flags & IO_DISARM_MASK))
994 		__io_req_find_next_prep(req);
995 	nxt = req->link;
996 	req->link = NULL;
997 	return nxt;
998 }
999 
ctx_flush_and_put(struct io_ring_ctx * ctx,bool * locked)1000 static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
1001 {
1002 	if (!ctx)
1003 		return;
1004 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1005 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1006 	if (*locked) {
1007 		io_submit_flush_completions(ctx);
1008 		mutex_unlock(&ctx->uring_lock);
1009 		*locked = false;
1010 	}
1011 	percpu_ref_put(&ctx->refs);
1012 }
1013 
handle_tw_list(struct llist_node * node,struct io_ring_ctx ** ctx,bool * locked,struct llist_node * last)1014 static unsigned int handle_tw_list(struct llist_node *node,
1015 				   struct io_ring_ctx **ctx, bool *locked,
1016 				   struct llist_node *last)
1017 {
1018 	unsigned int count = 0;
1019 
1020 	while (node != last) {
1021 		struct llist_node *next = node->next;
1022 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1023 						    io_task_work.node);
1024 
1025 		prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1026 
1027 		if (req->ctx != *ctx) {
1028 			ctx_flush_and_put(*ctx, locked);
1029 			*ctx = req->ctx;
1030 			/* if not contended, grab and improve batching */
1031 			*locked = mutex_trylock(&(*ctx)->uring_lock);
1032 			percpu_ref_get(&(*ctx)->refs);
1033 		}
1034 		req->io_task_work.func(req, locked);
1035 		node = next;
1036 		count++;
1037 	}
1038 
1039 	return count;
1040 }
1041 
1042 /**
1043  * io_llist_xchg - swap all entries in a lock-less list
1044  * @head:	the head of lock-less list to delete all entries
1045  * @new:	new entry as the head of the list
1046  *
1047  * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1048  * The order of entries returned is from the newest to the oldest added one.
1049  */
io_llist_xchg(struct llist_head * head,struct llist_node * new)1050 static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1051 					       struct llist_node *new)
1052 {
1053 	return xchg(&head->first, new);
1054 }
1055 
1056 /**
1057  * io_llist_cmpxchg - possibly swap all entries in a lock-less list
1058  * @head:	the head of lock-less list to delete all entries
1059  * @old:	expected old value of the first entry of the list
1060  * @new:	new entry as the head of the list
1061  *
1062  * perform a cmpxchg on the first entry of the list.
1063  */
1064 
io_llist_cmpxchg(struct llist_head * head,struct llist_node * old,struct llist_node * new)1065 static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
1066 						  struct llist_node *old,
1067 						  struct llist_node *new)
1068 {
1069 	return cmpxchg(&head->first, old, new);
1070 }
1071 
tctx_task_work(struct callback_head * cb)1072 void tctx_task_work(struct callback_head *cb)
1073 {
1074 	bool uring_locked = false;
1075 	struct io_ring_ctx *ctx = NULL;
1076 	struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1077 						  task_work);
1078 	struct llist_node fake = {};
1079 	struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake);
1080 	unsigned int loops = 1;
1081 	unsigned int count = handle_tw_list(node, &ctx, &uring_locked, NULL);
1082 
1083 	node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1084 	while (node != &fake) {
1085 		loops++;
1086 		node = io_llist_xchg(&tctx->task_list, &fake);
1087 		count += handle_tw_list(node, &ctx, &uring_locked, &fake);
1088 		node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1089 	}
1090 
1091 	ctx_flush_and_put(ctx, &uring_locked);
1092 
1093 	/* relaxed read is enough as only the task itself sets ->in_idle */
1094 	if (unlikely(atomic_read(&tctx->in_idle)))
1095 		io_uring_drop_tctx_refs(current);
1096 
1097 	trace_io_uring_task_work_run(tctx, count, loops);
1098 }
1099 
io_req_local_work_add(struct io_kiocb * req)1100 static void io_req_local_work_add(struct io_kiocb *req)
1101 {
1102 	struct io_ring_ctx *ctx = req->ctx;
1103 
1104 	percpu_ref_get(&ctx->refs);
1105 
1106 	if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
1107 		percpu_ref_put(&ctx->refs);
1108 		return;
1109 	}
1110 	/* need it for the following io_cqring_wake() */
1111 	smp_mb__after_atomic();
1112 
1113 	if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
1114 		io_move_task_work_from_local(ctx);
1115 		percpu_ref_put(&ctx->refs);
1116 		return;
1117 	}
1118 
1119 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1120 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1121 
1122 	if (ctx->has_evfd)
1123 		io_eventfd_signal(ctx);
1124 	__io_cqring_wake(ctx);
1125 	percpu_ref_put(&ctx->refs);
1126 }
1127 
__io_req_task_work_add(struct io_kiocb * req,bool allow_local)1128 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
1129 {
1130 	struct io_uring_task *tctx = req->task->io_uring;
1131 	struct io_ring_ctx *ctx = req->ctx;
1132 	struct llist_node *node;
1133 
1134 	if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
1135 		io_req_local_work_add(req);
1136 		return;
1137 	}
1138 
1139 	/* task_work already pending, we're done */
1140 	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1141 		return;
1142 
1143 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1144 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1145 
1146 	if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1147 		return;
1148 
1149 	node = llist_del_all(&tctx->task_list);
1150 
1151 	while (node) {
1152 		req = container_of(node, struct io_kiocb, io_task_work.node);
1153 		node = node->next;
1154 		if (llist_add(&req->io_task_work.node,
1155 			      &req->ctx->fallback_llist))
1156 			schedule_delayed_work(&req->ctx->fallback_work, 1);
1157 	}
1158 }
1159 
io_move_task_work_from_local(struct io_ring_ctx * ctx)1160 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1161 {
1162 	struct llist_node *node;
1163 
1164 	node = llist_del_all(&ctx->work_llist);
1165 	while (node) {
1166 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1167 						    io_task_work.node);
1168 
1169 		node = node->next;
1170 		__io_req_task_work_add(req, false);
1171 	}
1172 }
1173 
__io_run_local_work(struct io_ring_ctx * ctx,bool * locked)1174 int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked)
1175 {
1176 	struct llist_node *node;
1177 	struct llist_node fake;
1178 	struct llist_node *current_final = NULL;
1179 	int ret;
1180 	unsigned int loops = 1;
1181 
1182 	if (unlikely(ctx->submitter_task != current))
1183 		return -EEXIST;
1184 
1185 	node = io_llist_xchg(&ctx->work_llist, &fake);
1186 	ret = 0;
1187 again:
1188 	while (node != current_final) {
1189 		struct llist_node *next = node->next;
1190 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1191 						    io_task_work.node);
1192 		prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1193 		req->io_task_work.func(req, locked);
1194 		ret++;
1195 		node = next;
1196 	}
1197 
1198 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1199 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1200 
1201 	node = io_llist_cmpxchg(&ctx->work_llist, &fake, NULL);
1202 	if (node != &fake) {
1203 		loops++;
1204 		current_final = &fake;
1205 		node = io_llist_xchg(&ctx->work_llist, &fake);
1206 		goto again;
1207 	}
1208 
1209 	if (*locked)
1210 		io_submit_flush_completions(ctx);
1211 	trace_io_uring_local_work_run(ctx, ret, loops);
1212 	return ret;
1213 
1214 }
1215 
io_run_local_work(struct io_ring_ctx * ctx)1216 int io_run_local_work(struct io_ring_ctx *ctx)
1217 {
1218 	bool locked;
1219 	int ret;
1220 
1221 	if (llist_empty(&ctx->work_llist))
1222 		return 0;
1223 
1224 	__set_current_state(TASK_RUNNING);
1225 	locked = mutex_trylock(&ctx->uring_lock);
1226 	ret = __io_run_local_work(ctx, &locked);
1227 	if (locked)
1228 		mutex_unlock(&ctx->uring_lock);
1229 
1230 	return ret;
1231 }
1232 
io_req_task_cancel(struct io_kiocb * req,bool * locked)1233 static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
1234 {
1235 	/* not needed for normal modes, but SQPOLL depends on it */
1236 	io_tw_lock(req->ctx, locked);
1237 	io_req_complete_failed(req, req->cqe.res);
1238 }
1239 
io_req_task_submit(struct io_kiocb * req,bool * locked)1240 void io_req_task_submit(struct io_kiocb *req, bool *locked)
1241 {
1242 	io_tw_lock(req->ctx, locked);
1243 	/* req->task == current here, checking PF_EXITING is safe */
1244 	if (likely(!(req->task->flags & PF_EXITING)))
1245 		io_queue_sqe(req);
1246 	else
1247 		io_req_complete_failed(req, -EFAULT);
1248 }
1249 
io_req_task_queue_fail(struct io_kiocb * req,int ret)1250 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1251 {
1252 	io_req_set_res(req, ret, 0);
1253 	req->io_task_work.func = io_req_task_cancel;
1254 	io_req_task_work_add(req);
1255 }
1256 
io_req_task_queue(struct io_kiocb * req)1257 void io_req_task_queue(struct io_kiocb *req)
1258 {
1259 	req->io_task_work.func = io_req_task_submit;
1260 	io_req_task_work_add(req);
1261 }
1262 
io_queue_next(struct io_kiocb * req)1263 void io_queue_next(struct io_kiocb *req)
1264 {
1265 	struct io_kiocb *nxt = io_req_find_next(req);
1266 
1267 	if (nxt)
1268 		io_req_task_queue(nxt);
1269 }
1270 
io_free_batch_list(struct io_ring_ctx * ctx,struct io_wq_work_node * node)1271 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
1272 	__must_hold(&ctx->uring_lock)
1273 {
1274 	struct task_struct *task = NULL;
1275 	int task_refs = 0;
1276 
1277 	do {
1278 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1279 						    comp_list);
1280 
1281 		if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1282 			if (req->flags & REQ_F_REFCOUNT) {
1283 				node = req->comp_list.next;
1284 				if (!req_ref_put_and_test(req))
1285 					continue;
1286 			}
1287 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
1288 				struct async_poll *apoll = req->apoll;
1289 
1290 				if (apoll->double_poll)
1291 					kfree(apoll->double_poll);
1292 				if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
1293 					kfree(apoll);
1294 				req->flags &= ~REQ_F_POLLED;
1295 			}
1296 			if (req->flags & IO_REQ_LINK_FLAGS)
1297 				io_queue_next(req);
1298 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1299 				io_clean_op(req);
1300 		}
1301 		if (!(req->flags & REQ_F_FIXED_FILE))
1302 			io_put_file(req->file);
1303 
1304 		io_req_put_rsrc_locked(req, ctx);
1305 
1306 		if (req->task != task) {
1307 			if (task)
1308 				io_put_task(task, task_refs);
1309 			task = req->task;
1310 			task_refs = 0;
1311 		}
1312 		task_refs++;
1313 		node = req->comp_list.next;
1314 		io_req_add_to_cache(req, ctx);
1315 	} while (node);
1316 
1317 	if (task)
1318 		io_put_task(task, task_refs);
1319 }
1320 
__io_submit_flush_completions(struct io_ring_ctx * ctx)1321 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1322 	__must_hold(&ctx->uring_lock)
1323 {
1324 	struct io_wq_work_node *node, *prev;
1325 	struct io_submit_state *state = &ctx->submit_state;
1326 
1327 	io_cq_lock(ctx);
1328 	wq_list_for_each(node, prev, &state->compl_reqs) {
1329 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1330 					    comp_list);
1331 
1332 		if (!(req->flags & REQ_F_CQE_SKIP))
1333 			__io_fill_cqe_req(ctx, req);
1334 	}
1335 	__io_cq_unlock_post(ctx);
1336 
1337 	io_free_batch_list(ctx, state->compl_reqs.first);
1338 	INIT_WQ_LIST(&state->compl_reqs);
1339 }
1340 
1341 /*
1342  * Drop reference to request, return next in chain (if there is one) if this
1343  * was the last reference to this request.
1344  */
io_put_req_find_next(struct io_kiocb * req)1345 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
1346 {
1347 	struct io_kiocb *nxt = NULL;
1348 
1349 	if (req_ref_put_and_test(req)) {
1350 		if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
1351 			nxt = io_req_find_next(req);
1352 		io_free_req(req);
1353 	}
1354 	return nxt;
1355 }
1356 
io_cqring_events(struct io_ring_ctx * ctx)1357 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1358 {
1359 	/* See comment at the top of this file */
1360 	smp_rmb();
1361 	return __io_cqring_events(ctx);
1362 }
1363 
1364 /*
1365  * We can't just wait for polled events to come to us, we have to actively
1366  * find and complete them.
1367  */
io_iopoll_try_reap_events(struct io_ring_ctx * ctx)1368 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1369 {
1370 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
1371 		return;
1372 
1373 	mutex_lock(&ctx->uring_lock);
1374 	while (!wq_list_empty(&ctx->iopoll_list)) {
1375 		/* let it sleep and repeat later if can't complete a request */
1376 		if (io_do_iopoll(ctx, true) == 0)
1377 			break;
1378 		/*
1379 		 * Ensure we allow local-to-the-cpu processing to take place,
1380 		 * in this case we need to ensure that we reap all events.
1381 		 * Also let task_work, etc. to progress by releasing the mutex
1382 		 */
1383 		if (need_resched()) {
1384 			mutex_unlock(&ctx->uring_lock);
1385 			cond_resched();
1386 			mutex_lock(&ctx->uring_lock);
1387 		}
1388 	}
1389 	mutex_unlock(&ctx->uring_lock);
1390 }
1391 
io_iopoll_check(struct io_ring_ctx * ctx,long min)1392 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1393 {
1394 	unsigned int nr_events = 0;
1395 	int ret = 0;
1396 	unsigned long check_cq;
1397 
1398 	if (!io_allowed_run_tw(ctx))
1399 		return -EEXIST;
1400 
1401 	check_cq = READ_ONCE(ctx->check_cq);
1402 	if (unlikely(check_cq)) {
1403 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1404 			__io_cqring_overflow_flush(ctx, false);
1405 		/*
1406 		 * Similarly do not spin if we have not informed the user of any
1407 		 * dropped CQE.
1408 		 */
1409 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1410 			return -EBADR;
1411 	}
1412 	/*
1413 	 * Don't enter poll loop if we already have events pending.
1414 	 * If we do, we can potentially be spinning for commands that
1415 	 * already triggered a CQE (eg in error).
1416 	 */
1417 	if (io_cqring_events(ctx))
1418 		return 0;
1419 
1420 	do {
1421 		/*
1422 		 * If a submit got punted to a workqueue, we can have the
1423 		 * application entering polling for a command before it gets
1424 		 * issued. That app will hold the uring_lock for the duration
1425 		 * of the poll right here, so we need to take a breather every
1426 		 * now and then to ensure that the issue has a chance to add
1427 		 * the poll to the issued list. Otherwise we can spin here
1428 		 * forever, while the workqueue is stuck trying to acquire the
1429 		 * very same mutex.
1430 		 */
1431 		if (wq_list_empty(&ctx->iopoll_list) ||
1432 		    io_task_work_pending(ctx)) {
1433 			u32 tail = ctx->cached_cq_tail;
1434 
1435 			(void) io_run_local_work_locked(ctx);
1436 
1437 			if (task_work_pending(current) ||
1438 			    wq_list_empty(&ctx->iopoll_list)) {
1439 				mutex_unlock(&ctx->uring_lock);
1440 				io_run_task_work();
1441 				mutex_lock(&ctx->uring_lock);
1442 			}
1443 			/* some requests don't go through iopoll_list */
1444 			if (tail != ctx->cached_cq_tail ||
1445 			    wq_list_empty(&ctx->iopoll_list))
1446 				break;
1447 		}
1448 		ret = io_do_iopoll(ctx, !min);
1449 		if (ret < 0)
1450 			break;
1451 		nr_events += ret;
1452 		ret = 0;
1453 	} while (nr_events < min && !need_resched());
1454 
1455 	return ret;
1456 }
1457 
io_req_task_complete(struct io_kiocb * req,bool * locked)1458 void io_req_task_complete(struct io_kiocb *req, bool *locked)
1459 {
1460 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
1461 		unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
1462 
1463 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
1464 	}
1465 
1466 	if (*locked)
1467 		io_req_complete_defer(req);
1468 	else
1469 		io_req_complete_post(req);
1470 }
1471 
1472 /*
1473  * After the iocb has been issued, it's safe to be found on the poll list.
1474  * Adding the kiocb to the list AFTER submission ensures that we don't
1475  * find it from a io_do_iopoll() thread before the issuer is done
1476  * accessing the kiocb cookie.
1477  */
io_iopoll_req_issued(struct io_kiocb * req,unsigned int issue_flags)1478 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1479 {
1480 	struct io_ring_ctx *ctx = req->ctx;
1481 	const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1482 
1483 	/* workqueue context doesn't hold uring_lock, grab it now */
1484 	if (unlikely(needs_lock))
1485 		mutex_lock(&ctx->uring_lock);
1486 
1487 	/*
1488 	 * Track whether we have multiple files in our lists. This will impact
1489 	 * how we do polling eventually, not spinning if we're on potentially
1490 	 * different devices.
1491 	 */
1492 	if (wq_list_empty(&ctx->iopoll_list)) {
1493 		ctx->poll_multi_queue = false;
1494 	} else if (!ctx->poll_multi_queue) {
1495 		struct io_kiocb *list_req;
1496 
1497 		list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1498 					comp_list);
1499 		if (list_req->file != req->file)
1500 			ctx->poll_multi_queue = true;
1501 	}
1502 
1503 	/*
1504 	 * For fast devices, IO may have already completed. If it has, add
1505 	 * it to the front so we find it first.
1506 	 */
1507 	if (READ_ONCE(req->iopoll_completed))
1508 		wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1509 	else
1510 		wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1511 
1512 	if (unlikely(needs_lock)) {
1513 		/*
1514 		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1515 		 * in sq thread task context or in io worker task context. If
1516 		 * current task context is sq thread, we don't need to check
1517 		 * whether should wake up sq thread.
1518 		 */
1519 		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1520 		    wq_has_sleeper(&ctx->sq_data->wait))
1521 			wake_up(&ctx->sq_data->wait);
1522 
1523 		mutex_unlock(&ctx->uring_lock);
1524 	}
1525 }
1526 
io_bdev_nowait(struct block_device * bdev)1527 static bool io_bdev_nowait(struct block_device *bdev)
1528 {
1529 	return !bdev || bdev_nowait(bdev);
1530 }
1531 
1532 /*
1533  * If we tracked the file through the SCM inflight mechanism, we could support
1534  * any file. For now, just ensure that anything potentially problematic is done
1535  * inline.
1536  */
__io_file_supports_nowait(struct file * file,umode_t mode)1537 static bool __io_file_supports_nowait(struct file *file, umode_t mode)
1538 {
1539 	if (S_ISBLK(mode)) {
1540 		if (IS_ENABLED(CONFIG_BLOCK) &&
1541 		    io_bdev_nowait(I_BDEV(file->f_mapping->host)))
1542 			return true;
1543 		return false;
1544 	}
1545 	if (S_ISSOCK(mode))
1546 		return true;
1547 	if (S_ISREG(mode)) {
1548 		if (IS_ENABLED(CONFIG_BLOCK) &&
1549 		    io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
1550 		    !io_is_uring_fops(file))
1551 			return true;
1552 		return false;
1553 	}
1554 
1555 	/* any ->read/write should understand O_NONBLOCK */
1556 	if (file->f_flags & O_NONBLOCK)
1557 		return true;
1558 	return file->f_mode & FMODE_NOWAIT;
1559 }
1560 
1561 /*
1562  * If we tracked the file through the SCM inflight mechanism, we could support
1563  * any file. For now, just ensure that anything potentially problematic is done
1564  * inline.
1565  */
io_file_get_flags(struct file * file)1566 unsigned int io_file_get_flags(struct file *file)
1567 {
1568 	umode_t mode = file_inode(file)->i_mode;
1569 	unsigned int res = 0;
1570 
1571 	if (S_ISREG(mode))
1572 		res |= FFS_ISREG;
1573 	if (__io_file_supports_nowait(file, mode))
1574 		res |= FFS_NOWAIT;
1575 	return res;
1576 }
1577 
io_alloc_async_data(struct io_kiocb * req)1578 bool io_alloc_async_data(struct io_kiocb *req)
1579 {
1580 	WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
1581 	req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
1582 	if (req->async_data) {
1583 		req->flags |= REQ_F_ASYNC_DATA;
1584 		return false;
1585 	}
1586 	return true;
1587 }
1588 
io_req_prep_async(struct io_kiocb * req)1589 int io_req_prep_async(struct io_kiocb *req)
1590 {
1591 	const struct io_op_def *def = &io_op_defs[req->opcode];
1592 
1593 	/* assign early for deferred execution for non-fixed file */
1594 	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
1595 		req->file = io_file_get_normal(req, req->cqe.fd);
1596 	if (!def->prep_async)
1597 		return 0;
1598 	if (WARN_ON_ONCE(req_has_async_data(req)))
1599 		return -EFAULT;
1600 	if (!io_op_defs[req->opcode].manual_alloc) {
1601 		if (io_alloc_async_data(req))
1602 			return -EAGAIN;
1603 	}
1604 	return def->prep_async(req);
1605 }
1606 
io_get_sequence(struct io_kiocb * req)1607 static u32 io_get_sequence(struct io_kiocb *req)
1608 {
1609 	u32 seq = req->ctx->cached_sq_head;
1610 	struct io_kiocb *cur;
1611 
1612 	/* need original cached_sq_head, but it was increased for each req */
1613 	io_for_each_link(cur, req)
1614 		seq--;
1615 	return seq;
1616 }
1617 
io_drain_req(struct io_kiocb * req)1618 static __cold void io_drain_req(struct io_kiocb *req)
1619 	__must_hold(&ctx->uring_lock)
1620 {
1621 	struct io_ring_ctx *ctx = req->ctx;
1622 	struct io_defer_entry *de;
1623 	int ret;
1624 	u32 seq = io_get_sequence(req);
1625 
1626 	/* Still need defer if there is pending req in defer list. */
1627 	spin_lock(&ctx->completion_lock);
1628 	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1629 		spin_unlock(&ctx->completion_lock);
1630 queue:
1631 		ctx->drain_active = false;
1632 		io_req_task_queue(req);
1633 		return;
1634 	}
1635 	spin_unlock(&ctx->completion_lock);
1636 
1637 	io_prep_async_link(req);
1638 	de = kmalloc(sizeof(*de), GFP_KERNEL);
1639 	if (!de) {
1640 		ret = -ENOMEM;
1641 		io_req_complete_failed(req, ret);
1642 		return;
1643 	}
1644 
1645 	spin_lock(&ctx->completion_lock);
1646 	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1647 		spin_unlock(&ctx->completion_lock);
1648 		kfree(de);
1649 		goto queue;
1650 	}
1651 
1652 	trace_io_uring_defer(req);
1653 	de->req = req;
1654 	de->seq = seq;
1655 	list_add_tail(&de->list, &ctx->defer_list);
1656 	spin_unlock(&ctx->completion_lock);
1657 }
1658 
io_clean_op(struct io_kiocb * req)1659 static void io_clean_op(struct io_kiocb *req)
1660 {
1661 	if (req->flags & REQ_F_BUFFER_SELECTED) {
1662 		spin_lock(&req->ctx->completion_lock);
1663 		io_put_kbuf_comp(req);
1664 		spin_unlock(&req->ctx->completion_lock);
1665 	}
1666 
1667 	if (req->flags & REQ_F_NEED_CLEANUP) {
1668 		const struct io_op_def *def = &io_op_defs[req->opcode];
1669 
1670 		if (def->cleanup)
1671 			def->cleanup(req);
1672 	}
1673 	if ((req->flags & REQ_F_POLLED) && req->apoll) {
1674 		kfree(req->apoll->double_poll);
1675 		kfree(req->apoll);
1676 		req->apoll = NULL;
1677 	}
1678 	if (req->flags & REQ_F_INFLIGHT) {
1679 		struct io_uring_task *tctx = req->task->io_uring;
1680 
1681 		atomic_dec(&tctx->inflight_tracked);
1682 	}
1683 	if (req->flags & REQ_F_CREDS)
1684 		put_cred(req->creds);
1685 	if (req->flags & REQ_F_ASYNC_DATA) {
1686 		kfree(req->async_data);
1687 		req->async_data = NULL;
1688 	}
1689 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
1690 }
1691 
io_assign_file(struct io_kiocb * req,unsigned int issue_flags)1692 static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
1693 {
1694 	if (req->file || !io_op_defs[req->opcode].needs_file)
1695 		return true;
1696 
1697 	if (req->flags & REQ_F_FIXED_FILE)
1698 		req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1699 	else
1700 		req->file = io_file_get_normal(req, req->cqe.fd);
1701 
1702 	return !!req->file;
1703 }
1704 
io_issue_sqe(struct io_kiocb * req,unsigned int issue_flags)1705 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1706 {
1707 	const struct io_op_def *def = &io_op_defs[req->opcode];
1708 	const struct cred *creds = NULL;
1709 	int ret;
1710 
1711 	if (unlikely(!io_assign_file(req, issue_flags)))
1712 		return -EBADF;
1713 
1714 	if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1715 		creds = override_creds(req->creds);
1716 
1717 	if (!def->audit_skip)
1718 		audit_uring_entry(req->opcode);
1719 
1720 	ret = def->issue(req, issue_flags);
1721 
1722 	if (!def->audit_skip)
1723 		audit_uring_exit(!ret, ret);
1724 
1725 	if (creds)
1726 		revert_creds(creds);
1727 
1728 	if (ret == IOU_OK) {
1729 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1730 			io_req_complete_defer(req);
1731 		else
1732 			io_req_complete_post(req);
1733 	} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
1734 		return ret;
1735 
1736 	/* If the op doesn't have a file, we're not polling for it */
1737 	if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1738 		io_iopoll_req_issued(req, issue_flags);
1739 
1740 	return 0;
1741 }
1742 
io_poll_issue(struct io_kiocb * req,bool * locked)1743 int io_poll_issue(struct io_kiocb *req, bool *locked)
1744 {
1745 	io_tw_lock(req->ctx, locked);
1746 	if (unlikely(req->task->flags & PF_EXITING))
1747 		return -EFAULT;
1748 	return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
1749 }
1750 
io_wq_free_work(struct io_wq_work * work)1751 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1752 {
1753 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1754 
1755 	req = io_put_req_find_next(req);
1756 	return req ? &req->work : NULL;
1757 }
1758 
io_wq_submit_work(struct io_wq_work * work)1759 void io_wq_submit_work(struct io_wq_work *work)
1760 {
1761 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1762 	const struct io_op_def *def = &io_op_defs[req->opcode];
1763 	unsigned int issue_flags = IO_URING_F_UNLOCKED;
1764 	bool needs_poll = false;
1765 	int ret = 0, err = -ECANCELED;
1766 
1767 	/* one will be dropped by ->io_free_work() after returning to io-wq */
1768 	if (!(req->flags & REQ_F_REFCOUNT))
1769 		__io_req_set_refcount(req, 2);
1770 	else
1771 		req_ref_get(req);
1772 
1773 	io_arm_ltimeout(req);
1774 
1775 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1776 	if (work->flags & IO_WQ_WORK_CANCEL) {
1777 fail:
1778 		io_req_task_queue_fail(req, err);
1779 		return;
1780 	}
1781 	if (!io_assign_file(req, issue_flags)) {
1782 		err = -EBADF;
1783 		work->flags |= IO_WQ_WORK_CANCEL;
1784 		goto fail;
1785 	}
1786 
1787 	if (req->flags & REQ_F_FORCE_ASYNC) {
1788 		bool opcode_poll = def->pollin || def->pollout;
1789 
1790 		if (opcode_poll && file_can_poll(req->file)) {
1791 			needs_poll = true;
1792 			issue_flags |= IO_URING_F_NONBLOCK;
1793 		}
1794 	}
1795 
1796 	do {
1797 		ret = io_issue_sqe(req, issue_flags);
1798 		if (ret != -EAGAIN)
1799 			break;
1800 		/*
1801 		 * We can get EAGAIN for iopolled IO even though we're
1802 		 * forcing a sync submission from here, since we can't
1803 		 * wait for request slots on the block side.
1804 		 */
1805 		if (!needs_poll) {
1806 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1807 				break;
1808 			cond_resched();
1809 			continue;
1810 		}
1811 
1812 		if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1813 			return;
1814 		/* aborted or ready, in either case retry blocking */
1815 		needs_poll = false;
1816 		issue_flags &= ~IO_URING_F_NONBLOCK;
1817 	} while (1);
1818 
1819 	/* avoid locking problems by failing it from a clean context */
1820 	if (ret < 0)
1821 		io_req_task_queue_fail(req, ret);
1822 }
1823 
io_file_get_fixed(struct io_kiocb * req,int fd,unsigned int issue_flags)1824 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1825 				      unsigned int issue_flags)
1826 {
1827 	struct io_ring_ctx *ctx = req->ctx;
1828 	struct file *file = NULL;
1829 	unsigned long file_ptr;
1830 
1831 	io_ring_submit_lock(ctx, issue_flags);
1832 
1833 	if (unlikely((unsigned int)fd >= ctx->nr_user_files))
1834 		goto out;
1835 	fd = array_index_nospec(fd, ctx->nr_user_files);
1836 	file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
1837 	file = (struct file *) (file_ptr & FFS_MASK);
1838 	file_ptr &= ~FFS_MASK;
1839 	/* mask in overlapping REQ_F and FFS bits */
1840 	req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
1841 	io_req_set_rsrc_node(req, ctx, 0);
1842 out:
1843 	io_ring_submit_unlock(ctx, issue_flags);
1844 	return file;
1845 }
1846 
io_file_get_normal(struct io_kiocb * req,int fd)1847 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1848 {
1849 	struct file *file = fget(fd);
1850 
1851 	trace_io_uring_file_get(req, fd);
1852 
1853 	/* we don't allow fixed io_uring files */
1854 	if (file && io_is_uring_fops(file))
1855 		io_req_track_inflight(req);
1856 	return file;
1857 }
1858 
io_queue_async(struct io_kiocb * req,int ret)1859 static void io_queue_async(struct io_kiocb *req, int ret)
1860 	__must_hold(&req->ctx->uring_lock)
1861 {
1862 	struct io_kiocb *linked_timeout;
1863 
1864 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1865 		io_req_complete_failed(req, ret);
1866 		return;
1867 	}
1868 
1869 	linked_timeout = io_prep_linked_timeout(req);
1870 
1871 	switch (io_arm_poll_handler(req, 0)) {
1872 	case IO_APOLL_READY:
1873 		io_kbuf_recycle(req, 0);
1874 		io_req_task_queue(req);
1875 		break;
1876 	case IO_APOLL_ABORTED:
1877 		io_kbuf_recycle(req, 0);
1878 		io_queue_iowq(req, NULL);
1879 		break;
1880 	case IO_APOLL_OK:
1881 		break;
1882 	}
1883 
1884 	if (linked_timeout)
1885 		io_queue_linked_timeout(linked_timeout);
1886 }
1887 
io_queue_sqe(struct io_kiocb * req)1888 static inline void io_queue_sqe(struct io_kiocb *req)
1889 	__must_hold(&req->ctx->uring_lock)
1890 {
1891 	int ret;
1892 
1893 	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
1894 
1895 	/*
1896 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
1897 	 * doesn't support non-blocking read/write attempts
1898 	 */
1899 	if (likely(!ret))
1900 		io_arm_ltimeout(req);
1901 	else
1902 		io_queue_async(req, ret);
1903 }
1904 
io_queue_sqe_fallback(struct io_kiocb * req)1905 static void io_queue_sqe_fallback(struct io_kiocb *req)
1906 	__must_hold(&req->ctx->uring_lock)
1907 {
1908 	if (unlikely(req->flags & REQ_F_FAIL)) {
1909 		/*
1910 		 * We don't submit, fail them all, for that replace hardlinks
1911 		 * with normal links. Extra REQ_F_LINK is tolerated.
1912 		 */
1913 		req->flags &= ~REQ_F_HARDLINK;
1914 		req->flags |= REQ_F_LINK;
1915 		io_req_complete_failed(req, req->cqe.res);
1916 	} else {
1917 		int ret = io_req_prep_async(req);
1918 
1919 		if (unlikely(ret)) {
1920 			io_req_complete_failed(req, ret);
1921 			return;
1922 		}
1923 
1924 		if (unlikely(req->ctx->drain_active))
1925 			io_drain_req(req);
1926 		else
1927 			io_queue_iowq(req, NULL);
1928 	}
1929 }
1930 
1931 /*
1932  * Check SQE restrictions (opcode and flags).
1933  *
1934  * Returns 'true' if SQE is allowed, 'false' otherwise.
1935  */
io_check_restriction(struct io_ring_ctx * ctx,struct io_kiocb * req,unsigned int sqe_flags)1936 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
1937 					struct io_kiocb *req,
1938 					unsigned int sqe_flags)
1939 {
1940 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
1941 		return false;
1942 
1943 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
1944 	    ctx->restrictions.sqe_flags_required)
1945 		return false;
1946 
1947 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
1948 			  ctx->restrictions.sqe_flags_required))
1949 		return false;
1950 
1951 	return true;
1952 }
1953 
io_init_req_drain(struct io_kiocb * req)1954 static void io_init_req_drain(struct io_kiocb *req)
1955 {
1956 	struct io_ring_ctx *ctx = req->ctx;
1957 	struct io_kiocb *head = ctx->submit_state.link.head;
1958 
1959 	ctx->drain_active = true;
1960 	if (head) {
1961 		/*
1962 		 * If we need to drain a request in the middle of a link, drain
1963 		 * the head request and the next request/link after the current
1964 		 * link. Considering sequential execution of links,
1965 		 * REQ_F_IO_DRAIN will be maintained for every request of our
1966 		 * link.
1967 		 */
1968 		head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
1969 		ctx->drain_next = true;
1970 	}
1971 }
1972 
io_init_req(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe)1973 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
1974 		       const struct io_uring_sqe *sqe)
1975 	__must_hold(&ctx->uring_lock)
1976 {
1977 	const struct io_op_def *def;
1978 	unsigned int sqe_flags;
1979 	int personality;
1980 	u8 opcode;
1981 
1982 	/* req is partially pre-initialised, see io_preinit_req() */
1983 	req->opcode = opcode = READ_ONCE(sqe->opcode);
1984 	/* same numerical values with corresponding REQ_F_*, safe to copy */
1985 	req->flags = sqe_flags = READ_ONCE(sqe->flags);
1986 	req->cqe.user_data = READ_ONCE(sqe->user_data);
1987 	req->file = NULL;
1988 	req->rsrc_node = NULL;
1989 	req->task = current;
1990 
1991 	if (unlikely(opcode >= IORING_OP_LAST)) {
1992 		req->opcode = 0;
1993 		return -EINVAL;
1994 	}
1995 	def = &io_op_defs[opcode];
1996 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
1997 		/* enforce forwards compatibility on users */
1998 		if (sqe_flags & ~SQE_VALID_FLAGS)
1999 			return -EINVAL;
2000 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
2001 			if (!def->buffer_select)
2002 				return -EOPNOTSUPP;
2003 			req->buf_index = READ_ONCE(sqe->buf_group);
2004 		}
2005 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2006 			ctx->drain_disabled = true;
2007 		if (sqe_flags & IOSQE_IO_DRAIN) {
2008 			if (ctx->drain_disabled)
2009 				return -EOPNOTSUPP;
2010 			io_init_req_drain(req);
2011 		}
2012 	}
2013 	if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2014 		if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2015 			return -EACCES;
2016 		/* knock it to the slow queue path, will be drained there */
2017 		if (ctx->drain_active)
2018 			req->flags |= REQ_F_FORCE_ASYNC;
2019 		/* if there is no link, we're at "next" request and need to drain */
2020 		if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2021 			ctx->drain_next = false;
2022 			ctx->drain_active = true;
2023 			req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2024 		}
2025 	}
2026 
2027 	if (!def->ioprio && sqe->ioprio)
2028 		return -EINVAL;
2029 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2030 		return -EINVAL;
2031 
2032 	if (def->needs_file) {
2033 		struct io_submit_state *state = &ctx->submit_state;
2034 
2035 		req->cqe.fd = READ_ONCE(sqe->fd);
2036 
2037 		/*
2038 		 * Plug now if we have more than 2 IO left after this, and the
2039 		 * target is potentially a read/write to block based storage.
2040 		 */
2041 		if (state->need_plug && def->plug) {
2042 			state->plug_started = true;
2043 			state->need_plug = false;
2044 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2045 		}
2046 	}
2047 
2048 	personality = READ_ONCE(sqe->personality);
2049 	if (personality) {
2050 		int ret;
2051 
2052 		req->creds = xa_load(&ctx->personalities, personality);
2053 		if (!req->creds)
2054 			return -EINVAL;
2055 		get_cred(req->creds);
2056 		ret = security_uring_override_creds(req->creds);
2057 		if (ret) {
2058 			put_cred(req->creds);
2059 			return ret;
2060 		}
2061 		req->flags |= REQ_F_CREDS;
2062 	}
2063 
2064 	return def->prep(req, sqe);
2065 }
2066 
io_submit_fail_init(const struct io_uring_sqe * sqe,struct io_kiocb * req,int ret)2067 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2068 				      struct io_kiocb *req, int ret)
2069 {
2070 	struct io_ring_ctx *ctx = req->ctx;
2071 	struct io_submit_link *link = &ctx->submit_state.link;
2072 	struct io_kiocb *head = link->head;
2073 
2074 	trace_io_uring_req_failed(sqe, req, ret);
2075 
2076 	/*
2077 	 * Avoid breaking links in the middle as it renders links with SQPOLL
2078 	 * unusable. Instead of failing eagerly, continue assembling the link if
2079 	 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2080 	 * should find the flag and handle the rest.
2081 	 */
2082 	req_fail_link_node(req, ret);
2083 	if (head && !(head->flags & REQ_F_FAIL))
2084 		req_fail_link_node(head, -ECANCELED);
2085 
2086 	if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2087 		if (head) {
2088 			link->last->link = req;
2089 			link->head = NULL;
2090 			req = head;
2091 		}
2092 		io_queue_sqe_fallback(req);
2093 		return ret;
2094 	}
2095 
2096 	if (head)
2097 		link->last->link = req;
2098 	else
2099 		link->head = req;
2100 	link->last = req;
2101 	return 0;
2102 }
2103 
io_submit_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe)2104 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2105 			 const struct io_uring_sqe *sqe)
2106 	__must_hold(&ctx->uring_lock)
2107 {
2108 	struct io_submit_link *link = &ctx->submit_state.link;
2109 	int ret;
2110 
2111 	ret = io_init_req(ctx, req, sqe);
2112 	if (unlikely(ret))
2113 		return io_submit_fail_init(sqe, req, ret);
2114 
2115 	/* don't need @sqe from now on */
2116 	trace_io_uring_submit_sqe(req, true);
2117 
2118 	/*
2119 	 * If we already have a head request, queue this one for async
2120 	 * submittal once the head completes. If we don't have a head but
2121 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2122 	 * submitted sync once the chain is complete. If none of those
2123 	 * conditions are true (normal request), then just queue it.
2124 	 */
2125 	if (unlikely(link->head)) {
2126 		ret = io_req_prep_async(req);
2127 		if (unlikely(ret))
2128 			return io_submit_fail_init(sqe, req, ret);
2129 
2130 		trace_io_uring_link(req, link->head);
2131 		link->last->link = req;
2132 		link->last = req;
2133 
2134 		if (req->flags & IO_REQ_LINK_FLAGS)
2135 			return 0;
2136 		/* last request of the link, flush it */
2137 		req = link->head;
2138 		link->head = NULL;
2139 		if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2140 			goto fallback;
2141 
2142 	} else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2143 					  REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2144 		if (req->flags & IO_REQ_LINK_FLAGS) {
2145 			link->head = req;
2146 			link->last = req;
2147 		} else {
2148 fallback:
2149 			io_queue_sqe_fallback(req);
2150 		}
2151 		return 0;
2152 	}
2153 
2154 	io_queue_sqe(req);
2155 	return 0;
2156 }
2157 
2158 /*
2159  * Batched submission is done, ensure local IO is flushed out.
2160  */
io_submit_state_end(struct io_ring_ctx * ctx)2161 static void io_submit_state_end(struct io_ring_ctx *ctx)
2162 {
2163 	struct io_submit_state *state = &ctx->submit_state;
2164 
2165 	if (unlikely(state->link.head))
2166 		io_queue_sqe_fallback(state->link.head);
2167 	/* flush only after queuing links as they can generate completions */
2168 	io_submit_flush_completions(ctx);
2169 	if (state->plug_started)
2170 		blk_finish_plug(&state->plug);
2171 }
2172 
2173 /*
2174  * Start submission side cache.
2175  */
io_submit_state_start(struct io_submit_state * state,unsigned int max_ios)2176 static void io_submit_state_start(struct io_submit_state *state,
2177 				  unsigned int max_ios)
2178 {
2179 	state->plug_started = false;
2180 	state->need_plug = max_ios > 2;
2181 	state->submit_nr = max_ios;
2182 	/* set only head, no need to init link_last in advance */
2183 	state->link.head = NULL;
2184 }
2185 
io_commit_sqring(struct io_ring_ctx * ctx)2186 static void io_commit_sqring(struct io_ring_ctx *ctx)
2187 {
2188 	struct io_rings *rings = ctx->rings;
2189 
2190 	/*
2191 	 * Ensure any loads from the SQEs are done at this point,
2192 	 * since once we write the new head, the application could
2193 	 * write new data to them.
2194 	 */
2195 	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2196 }
2197 
2198 /*
2199  * Fetch an sqe, if one is available. Note this returns a pointer to memory
2200  * that is mapped by userspace. This means that care needs to be taken to
2201  * ensure that reads are stable, as we cannot rely on userspace always
2202  * being a good citizen. If members of the sqe are validated and then later
2203  * used, it's important that those reads are done through READ_ONCE() to
2204  * prevent a re-load down the line.
2205  */
io_get_sqe(struct io_ring_ctx * ctx)2206 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2207 {
2208 	unsigned head, mask = ctx->sq_entries - 1;
2209 	unsigned sq_idx = ctx->cached_sq_head++ & mask;
2210 
2211 	/*
2212 	 * The cached sq head (or cq tail) serves two purposes:
2213 	 *
2214 	 * 1) allows us to batch the cost of updating the user visible
2215 	 *    head updates.
2216 	 * 2) allows the kernel side to track the head on its own, even
2217 	 *    though the application is the one updating it.
2218 	 */
2219 	head = READ_ONCE(ctx->sq_array[sq_idx]);
2220 	if (likely(head < ctx->sq_entries)) {
2221 		/* double index for 128-byte SQEs, twice as long */
2222 		if (ctx->flags & IORING_SETUP_SQE128)
2223 			head <<= 1;
2224 		return &ctx->sq_sqes[head];
2225 	}
2226 
2227 	/* drop invalid entries */
2228 	ctx->cq_extra--;
2229 	WRITE_ONCE(ctx->rings->sq_dropped,
2230 		   READ_ONCE(ctx->rings->sq_dropped) + 1);
2231 	return NULL;
2232 }
2233 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr)2234 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2235 	__must_hold(&ctx->uring_lock)
2236 {
2237 	unsigned int entries = io_sqring_entries(ctx);
2238 	unsigned int left;
2239 	int ret;
2240 
2241 	if (unlikely(!entries))
2242 		return 0;
2243 	/* make sure SQ entry isn't read before tail */
2244 	ret = left = min3(nr, ctx->sq_entries, entries);
2245 	io_get_task_refs(left);
2246 	io_submit_state_start(&ctx->submit_state, left);
2247 
2248 	do {
2249 		const struct io_uring_sqe *sqe;
2250 		struct io_kiocb *req;
2251 
2252 		if (unlikely(!io_alloc_req_refill(ctx)))
2253 			break;
2254 		req = io_alloc_req(ctx);
2255 		sqe = io_get_sqe(ctx);
2256 		if (unlikely(!sqe)) {
2257 			io_req_add_to_cache(req, ctx);
2258 			break;
2259 		}
2260 
2261 		/*
2262 		 * Continue submitting even for sqe failure if the
2263 		 * ring was setup with IORING_SETUP_SUBMIT_ALL
2264 		 */
2265 		if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2266 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2267 			left--;
2268 			break;
2269 		}
2270 	} while (--left);
2271 
2272 	if (unlikely(left)) {
2273 		ret -= left;
2274 		/* try again if it submitted nothing and can't allocate a req */
2275 		if (!ret && io_req_cache_empty(ctx))
2276 			ret = -EAGAIN;
2277 		current->io_uring->cached_refs += left;
2278 	}
2279 
2280 	io_submit_state_end(ctx);
2281 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2282 	io_commit_sqring(ctx);
2283 	return ret;
2284 }
2285 
2286 struct io_wait_queue {
2287 	struct wait_queue_entry wq;
2288 	struct io_ring_ctx *ctx;
2289 	unsigned cq_tail;
2290 	unsigned nr_timeouts;
2291 };
2292 
io_has_work(struct io_ring_ctx * ctx)2293 static inline bool io_has_work(struct io_ring_ctx *ctx)
2294 {
2295 	return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
2296 	       ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
2297 		!llist_empty(&ctx->work_llist));
2298 }
2299 
io_should_wake(struct io_wait_queue * iowq)2300 static inline bool io_should_wake(struct io_wait_queue *iowq)
2301 {
2302 	struct io_ring_ctx *ctx = iowq->ctx;
2303 	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2304 
2305 	/*
2306 	 * Wake up if we have enough events, or if a timeout occurred since we
2307 	 * started waiting. For timeouts, we always want to return to userspace,
2308 	 * regardless of event count.
2309 	 */
2310 	return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2311 }
2312 
io_wake_function(struct wait_queue_entry * curr,unsigned int mode,int wake_flags,void * key)2313 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2314 			    int wake_flags, void *key)
2315 {
2316 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2317 							wq);
2318 	struct io_ring_ctx *ctx = iowq->ctx;
2319 
2320 	/*
2321 	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2322 	 * the task, and the next invocation will do it.
2323 	 */
2324 	if (io_should_wake(iowq) || io_has_work(ctx))
2325 		return autoremove_wake_function(curr, mode, wake_flags, key);
2326 	return -1;
2327 }
2328 
io_run_task_work_sig(struct io_ring_ctx * ctx)2329 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2330 {
2331 	if (io_run_task_work_ctx(ctx) > 0)
2332 		return 1;
2333 	if (task_sigpending(current))
2334 		return -EINTR;
2335 	return 0;
2336 }
2337 
2338 /* when returns >0, the caller should retry */
io_cqring_wait_schedule(struct io_ring_ctx * ctx,struct io_wait_queue * iowq,ktime_t * timeout)2339 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2340 					  struct io_wait_queue *iowq,
2341 					  ktime_t *timeout)
2342 {
2343 	int ret;
2344 	unsigned long check_cq;
2345 
2346 	/* make sure we run task_work before checking for signals */
2347 	ret = io_run_task_work_sig(ctx);
2348 	if (ret || io_should_wake(iowq))
2349 		return ret;
2350 
2351 	check_cq = READ_ONCE(ctx->check_cq);
2352 	if (unlikely(check_cq)) {
2353 		/* let the caller flush overflows, retry */
2354 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2355 			return 1;
2356 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
2357 			return -EBADR;
2358 	}
2359 	if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
2360 		return -ETIME;
2361 	return 1;
2362 }
2363 
2364 /*
2365  * Wait until events become available, if we don't already have some. The
2366  * application must reap them itself, as they reside on the shared cq ring.
2367  */
io_cqring_wait(struct io_ring_ctx * ctx,int min_events,const sigset_t __user * sig,size_t sigsz,struct __kernel_timespec __user * uts)2368 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2369 			  const sigset_t __user *sig, size_t sigsz,
2370 			  struct __kernel_timespec __user *uts)
2371 {
2372 	struct io_wait_queue iowq;
2373 	struct io_rings *rings = ctx->rings;
2374 	ktime_t timeout = KTIME_MAX;
2375 	int ret;
2376 
2377 	if (!io_allowed_run_tw(ctx))
2378 		return -EEXIST;
2379 
2380 	do {
2381 		/* always run at least 1 task work to process local work */
2382 		ret = io_run_task_work_ctx(ctx);
2383 		if (ret < 0)
2384 			return ret;
2385 		io_cqring_overflow_flush(ctx);
2386 
2387 		/* if user messes with these they will just get an early return */
2388 		if (__io_cqring_events_user(ctx) >= min_events)
2389 			return 0;
2390 	} while (ret > 0);
2391 
2392 	if (sig) {
2393 #ifdef CONFIG_COMPAT
2394 		if (in_compat_syscall())
2395 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2396 						      sigsz);
2397 		else
2398 #endif
2399 			ret = set_user_sigmask(sig, sigsz);
2400 
2401 		if (ret)
2402 			return ret;
2403 	}
2404 
2405 	if (uts) {
2406 		struct timespec64 ts;
2407 
2408 		if (get_timespec64(&ts, uts))
2409 			return -EFAULT;
2410 		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2411 	}
2412 
2413 	init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2414 	iowq.wq.private = current;
2415 	INIT_LIST_HEAD(&iowq.wq.entry);
2416 	iowq.ctx = ctx;
2417 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2418 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2419 
2420 	trace_io_uring_cqring_wait(ctx, min_events);
2421 	do {
2422 		/* if we can't even flush overflow, don't wait for more */
2423 		if (!io_cqring_overflow_flush(ctx)) {
2424 			ret = -EBUSY;
2425 			break;
2426 		}
2427 		prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2428 						TASK_INTERRUPTIBLE);
2429 		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
2430 		cond_resched();
2431 	} while (ret > 0);
2432 
2433 	finish_wait(&ctx->cq_wait, &iowq.wq);
2434 	restore_saved_sigmask_unless(ret == -EINTR);
2435 
2436 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2437 }
2438 
io_mem_free(void * ptr)2439 static void io_mem_free(void *ptr)
2440 {
2441 	struct page *page;
2442 
2443 	if (!ptr)
2444 		return;
2445 
2446 	page = virt_to_head_page(ptr);
2447 	if (put_page_testzero(page))
2448 		free_compound_page(page);
2449 }
2450 
io_mem_alloc(size_t size)2451 static void *io_mem_alloc(size_t size)
2452 {
2453 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2454 
2455 	return (void *) __get_free_pages(gfp, get_order(size));
2456 }
2457 
rings_size(struct io_ring_ctx * ctx,unsigned int sq_entries,unsigned int cq_entries,size_t * sq_offset)2458 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2459 				unsigned int cq_entries, size_t *sq_offset)
2460 {
2461 	struct io_rings *rings;
2462 	size_t off, sq_array_size;
2463 
2464 	off = struct_size(rings, cqes, cq_entries);
2465 	if (off == SIZE_MAX)
2466 		return SIZE_MAX;
2467 	if (ctx->flags & IORING_SETUP_CQE32) {
2468 		if (check_shl_overflow(off, 1, &off))
2469 			return SIZE_MAX;
2470 	}
2471 
2472 #ifdef CONFIG_SMP
2473 	off = ALIGN(off, SMP_CACHE_BYTES);
2474 	if (off == 0)
2475 		return SIZE_MAX;
2476 #endif
2477 
2478 	if (sq_offset)
2479 		*sq_offset = off;
2480 
2481 	sq_array_size = array_size(sizeof(u32), sq_entries);
2482 	if (sq_array_size == SIZE_MAX)
2483 		return SIZE_MAX;
2484 
2485 	if (check_add_overflow(off, sq_array_size, &off))
2486 		return SIZE_MAX;
2487 
2488 	return off;
2489 }
2490 
io_eventfd_register(struct io_ring_ctx * ctx,void __user * arg,unsigned int eventfd_async)2491 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2492 			       unsigned int eventfd_async)
2493 {
2494 	struct io_ev_fd *ev_fd;
2495 	__s32 __user *fds = arg;
2496 	int fd;
2497 
2498 	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2499 					lockdep_is_held(&ctx->uring_lock));
2500 	if (ev_fd)
2501 		return -EBUSY;
2502 
2503 	if (copy_from_user(&fd, fds, sizeof(*fds)))
2504 		return -EFAULT;
2505 
2506 	ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
2507 	if (!ev_fd)
2508 		return -ENOMEM;
2509 
2510 	ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
2511 	if (IS_ERR(ev_fd->cq_ev_fd)) {
2512 		int ret = PTR_ERR(ev_fd->cq_ev_fd);
2513 		kfree(ev_fd);
2514 		return ret;
2515 	}
2516 
2517 	spin_lock(&ctx->completion_lock);
2518 	ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
2519 	spin_unlock(&ctx->completion_lock);
2520 
2521 	ev_fd->eventfd_async = eventfd_async;
2522 	ctx->has_evfd = true;
2523 	rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
2524 	atomic_set(&ev_fd->refs, 1);
2525 	atomic_set(&ev_fd->ops, 0);
2526 	return 0;
2527 }
2528 
io_eventfd_unregister(struct io_ring_ctx * ctx)2529 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2530 {
2531 	struct io_ev_fd *ev_fd;
2532 
2533 	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2534 					lockdep_is_held(&ctx->uring_lock));
2535 	if (ev_fd) {
2536 		ctx->has_evfd = false;
2537 		rcu_assign_pointer(ctx->io_ev_fd, NULL);
2538 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
2539 			call_rcu(&ev_fd->rcu, io_eventfd_ops);
2540 		return 0;
2541 	}
2542 
2543 	return -ENXIO;
2544 }
2545 
io_req_caches_free(struct io_ring_ctx * ctx)2546 static void io_req_caches_free(struct io_ring_ctx *ctx)
2547 {
2548 	int nr = 0;
2549 
2550 	mutex_lock(&ctx->uring_lock);
2551 	io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
2552 
2553 	while (!io_req_cache_empty(ctx)) {
2554 		struct io_kiocb *req = io_alloc_req(ctx);
2555 
2556 		kmem_cache_free(req_cachep, req);
2557 		nr++;
2558 	}
2559 	if (nr)
2560 		percpu_ref_put_many(&ctx->refs, nr);
2561 	mutex_unlock(&ctx->uring_lock);
2562 }
2563 
io_ring_ctx_free(struct io_ring_ctx * ctx)2564 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2565 {
2566 	io_sq_thread_finish(ctx);
2567 	io_rsrc_refs_drop(ctx);
2568 	/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2569 	io_wait_rsrc_data(ctx->buf_data);
2570 	io_wait_rsrc_data(ctx->file_data);
2571 
2572 	mutex_lock(&ctx->uring_lock);
2573 	if (ctx->buf_data)
2574 		__io_sqe_buffers_unregister(ctx);
2575 	if (ctx->file_data)
2576 		__io_sqe_files_unregister(ctx);
2577 	if (ctx->rings)
2578 		__io_cqring_overflow_flush(ctx, true);
2579 	io_eventfd_unregister(ctx);
2580 	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
2581 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2582 	mutex_unlock(&ctx->uring_lock);
2583 	io_destroy_buffers(ctx);
2584 	if (ctx->sq_creds)
2585 		put_cred(ctx->sq_creds);
2586 	if (ctx->submitter_task)
2587 		put_task_struct(ctx->submitter_task);
2588 
2589 	/* there are no registered resources left, nobody uses it */
2590 	if (ctx->rsrc_node)
2591 		io_rsrc_node_destroy(ctx->rsrc_node);
2592 	if (ctx->rsrc_backup_node)
2593 		io_rsrc_node_destroy(ctx->rsrc_backup_node);
2594 	flush_delayed_work(&ctx->rsrc_put_work);
2595 	flush_delayed_work(&ctx->fallback_work);
2596 
2597 	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2598 	WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
2599 
2600 #if defined(CONFIG_UNIX)
2601 	if (ctx->ring_sock) {
2602 		ctx->ring_sock->file = NULL; /* so that iput() is called */
2603 		sock_release(ctx->ring_sock);
2604 	}
2605 #endif
2606 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2607 
2608 	if (ctx->mm_account) {
2609 		mmdrop(ctx->mm_account);
2610 		ctx->mm_account = NULL;
2611 	}
2612 	io_mem_free(ctx->rings);
2613 	io_mem_free(ctx->sq_sqes);
2614 
2615 	percpu_ref_exit(&ctx->refs);
2616 	free_uid(ctx->user);
2617 	io_req_caches_free(ctx);
2618 	if (ctx->hash_map)
2619 		io_wq_put_hash(ctx->hash_map);
2620 	kfree(ctx->cancel_table.hbs);
2621 	kfree(ctx->cancel_table_locked.hbs);
2622 	kfree(ctx->dummy_ubuf);
2623 	kfree(ctx->io_bl);
2624 	xa_destroy(&ctx->io_bl_xa);
2625 	kfree(ctx);
2626 }
2627 
io_uring_poll(struct file * file,poll_table * wait)2628 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2629 {
2630 	struct io_ring_ctx *ctx = file->private_data;
2631 	__poll_t mask = 0;
2632 
2633 	poll_wait(file, &ctx->cq_wait, wait);
2634 	/*
2635 	 * synchronizes with barrier from wq_has_sleeper call in
2636 	 * io_commit_cqring
2637 	 */
2638 	smp_rmb();
2639 	if (!io_sqring_full(ctx))
2640 		mask |= EPOLLOUT | EPOLLWRNORM;
2641 
2642 	/*
2643 	 * Don't flush cqring overflow list here, just do a simple check.
2644 	 * Otherwise there could possible be ABBA deadlock:
2645 	 *      CPU0                    CPU1
2646 	 *      ----                    ----
2647 	 * lock(&ctx->uring_lock);
2648 	 *                              lock(&ep->mtx);
2649 	 *                              lock(&ctx->uring_lock);
2650 	 * lock(&ep->mtx);
2651 	 *
2652 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2653 	 * pushs them to do the flush.
2654 	 */
2655 
2656 	if (io_cqring_events(ctx) || io_has_work(ctx))
2657 		mask |= EPOLLIN | EPOLLRDNORM;
2658 
2659 	return mask;
2660 }
2661 
io_unregister_personality(struct io_ring_ctx * ctx,unsigned id)2662 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
2663 {
2664 	const struct cred *creds;
2665 
2666 	creds = xa_erase(&ctx->personalities, id);
2667 	if (creds) {
2668 		put_cred(creds);
2669 		return 0;
2670 	}
2671 
2672 	return -EINVAL;
2673 }
2674 
2675 struct io_tctx_exit {
2676 	struct callback_head		task_work;
2677 	struct completion		completion;
2678 	struct io_ring_ctx		*ctx;
2679 };
2680 
io_tctx_exit_cb(struct callback_head * cb)2681 static __cold void io_tctx_exit_cb(struct callback_head *cb)
2682 {
2683 	struct io_uring_task *tctx = current->io_uring;
2684 	struct io_tctx_exit *work;
2685 
2686 	work = container_of(cb, struct io_tctx_exit, task_work);
2687 	/*
2688 	 * When @in_idle, we're in cancellation and it's racy to remove the
2689 	 * node. It'll be removed by the end of cancellation, just ignore it.
2690 	 * tctx can be NULL if the queueing of this task_work raced with
2691 	 * work cancelation off the exec path.
2692 	 */
2693 	if (tctx && !atomic_read(&tctx->in_idle))
2694 		io_uring_del_tctx_node((unsigned long)work->ctx);
2695 	complete(&work->completion);
2696 }
2697 
io_cancel_ctx_cb(struct io_wq_work * work,void * data)2698 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
2699 {
2700 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2701 
2702 	return req->ctx == data;
2703 }
2704 
io_ring_exit_work(struct work_struct * work)2705 static __cold void io_ring_exit_work(struct work_struct *work)
2706 {
2707 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2708 	unsigned long timeout = jiffies + HZ * 60 * 5;
2709 	unsigned long interval = HZ / 20;
2710 	struct io_tctx_exit exit;
2711 	struct io_tctx_node *node;
2712 	int ret;
2713 
2714 	/*
2715 	 * If we're doing polled IO and end up having requests being
2716 	 * submitted async (out-of-line), then completions can come in while
2717 	 * we're waiting for refs to drop. We need to reap these manually,
2718 	 * as nobody else will be looking for them.
2719 	 */
2720 	do {
2721 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2722 			io_move_task_work_from_local(ctx);
2723 
2724 		while (io_uring_try_cancel_requests(ctx, NULL, true))
2725 			cond_resched();
2726 
2727 		if (ctx->sq_data) {
2728 			struct io_sq_data *sqd = ctx->sq_data;
2729 			struct task_struct *tsk;
2730 
2731 			io_sq_thread_park(sqd);
2732 			tsk = sqd->thread;
2733 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
2734 				io_wq_cancel_cb(tsk->io_uring->io_wq,
2735 						io_cancel_ctx_cb, ctx, true);
2736 			io_sq_thread_unpark(sqd);
2737 		}
2738 
2739 		io_req_caches_free(ctx);
2740 
2741 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
2742 			/* there is little hope left, don't run it too often */
2743 			interval = HZ * 60;
2744 		}
2745 	} while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
2746 
2747 	init_completion(&exit.completion);
2748 	init_task_work(&exit.task_work, io_tctx_exit_cb);
2749 	exit.ctx = ctx;
2750 	/*
2751 	 * Some may use context even when all refs and requests have been put,
2752 	 * and they are free to do so while still holding uring_lock or
2753 	 * completion_lock, see io_req_task_submit(). Apart from other work,
2754 	 * this lock/unlock section also waits them to finish.
2755 	 */
2756 	mutex_lock(&ctx->uring_lock);
2757 	while (!list_empty(&ctx->tctx_list)) {
2758 		WARN_ON_ONCE(time_after(jiffies, timeout));
2759 
2760 		node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
2761 					ctx_node);
2762 		/* don't spin on a single task if cancellation failed */
2763 		list_rotate_left(&ctx->tctx_list);
2764 		ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
2765 		if (WARN_ON_ONCE(ret))
2766 			continue;
2767 
2768 		mutex_unlock(&ctx->uring_lock);
2769 		wait_for_completion(&exit.completion);
2770 		mutex_lock(&ctx->uring_lock);
2771 	}
2772 	mutex_unlock(&ctx->uring_lock);
2773 	spin_lock(&ctx->completion_lock);
2774 	spin_unlock(&ctx->completion_lock);
2775 
2776 	io_ring_ctx_free(ctx);
2777 }
2778 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)2779 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2780 {
2781 	unsigned long index;
2782 	struct creds *creds;
2783 
2784 	mutex_lock(&ctx->uring_lock);
2785 	percpu_ref_kill(&ctx->refs);
2786 	if (ctx->rings)
2787 		__io_cqring_overflow_flush(ctx, true);
2788 	xa_for_each(&ctx->personalities, index, creds)
2789 		io_unregister_personality(ctx, index);
2790 	if (ctx->rings)
2791 		io_poll_remove_all(ctx, NULL, true);
2792 	mutex_unlock(&ctx->uring_lock);
2793 
2794 	/*
2795 	 * If we failed setting up the ctx, we might not have any rings
2796 	 * and therefore did not submit any requests
2797 	 */
2798 	if (ctx->rings)
2799 		io_kill_timeouts(ctx, NULL, true);
2800 
2801 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
2802 	/*
2803 	 * Use system_unbound_wq to avoid spawning tons of event kworkers
2804 	 * if we're exiting a ton of rings at the same time. It just adds
2805 	 * noise and overhead, there's no discernable change in runtime
2806 	 * over using system_wq.
2807 	 */
2808 	queue_work(system_unbound_wq, &ctx->exit_work);
2809 }
2810 
io_uring_release(struct inode * inode,struct file * file)2811 static int io_uring_release(struct inode *inode, struct file *file)
2812 {
2813 	struct io_ring_ctx *ctx = file->private_data;
2814 
2815 	file->private_data = NULL;
2816 	io_ring_ctx_wait_and_kill(ctx);
2817 	return 0;
2818 }
2819 
2820 struct io_task_cancel {
2821 	struct task_struct *task;
2822 	bool all;
2823 };
2824 
io_cancel_task_cb(struct io_wq_work * work,void * data)2825 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
2826 {
2827 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2828 	struct io_task_cancel *cancel = data;
2829 
2830 	return io_match_task_safe(req, cancel->task, cancel->all);
2831 }
2832 
io_cancel_defer_files(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)2833 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
2834 					 struct task_struct *task,
2835 					 bool cancel_all)
2836 {
2837 	struct io_defer_entry *de;
2838 	LIST_HEAD(list);
2839 
2840 	spin_lock(&ctx->completion_lock);
2841 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
2842 		if (io_match_task_safe(de->req, task, cancel_all)) {
2843 			list_cut_position(&list, &ctx->defer_list, &de->list);
2844 			break;
2845 		}
2846 	}
2847 	spin_unlock(&ctx->completion_lock);
2848 	if (list_empty(&list))
2849 		return false;
2850 
2851 	while (!list_empty(&list)) {
2852 		de = list_first_entry(&list, struct io_defer_entry, list);
2853 		list_del_init(&de->list);
2854 		io_req_task_queue_fail(de->req, -ECANCELED);
2855 		kfree(de);
2856 	}
2857 	return true;
2858 }
2859 
io_uring_try_cancel_iowq(struct io_ring_ctx * ctx)2860 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
2861 {
2862 	struct io_tctx_node *node;
2863 	enum io_wq_cancel cret;
2864 	bool ret = false;
2865 
2866 	mutex_lock(&ctx->uring_lock);
2867 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
2868 		struct io_uring_task *tctx = node->task->io_uring;
2869 
2870 		/*
2871 		 * io_wq will stay alive while we hold uring_lock, because it's
2872 		 * killed after ctx nodes, which requires to take the lock.
2873 		 */
2874 		if (!tctx || !tctx->io_wq)
2875 			continue;
2876 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
2877 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
2878 	}
2879 	mutex_unlock(&ctx->uring_lock);
2880 
2881 	return ret;
2882 }
2883 
io_uring_try_cancel_requests(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)2884 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
2885 						struct task_struct *task,
2886 						bool cancel_all)
2887 {
2888 	struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
2889 	struct io_uring_task *tctx = task ? task->io_uring : NULL;
2890 	enum io_wq_cancel cret;
2891 	bool ret = false;
2892 
2893 	/* failed during ring init, it couldn't have issued any requests */
2894 	if (!ctx->rings)
2895 		return false;
2896 
2897 	if (!task) {
2898 		ret |= io_uring_try_cancel_iowq(ctx);
2899 	} else if (tctx && tctx->io_wq) {
2900 		/*
2901 		 * Cancels requests of all rings, not only @ctx, but
2902 		 * it's fine as the task is in exit/exec.
2903 		 */
2904 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
2905 				       &cancel, true);
2906 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
2907 	}
2908 
2909 	/* SQPOLL thread does its own polling */
2910 	if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
2911 	    (ctx->sq_data && ctx->sq_data->thread == current)) {
2912 		while (!wq_list_empty(&ctx->iopoll_list)) {
2913 			io_iopoll_try_reap_events(ctx);
2914 			ret = true;
2915 		}
2916 	}
2917 
2918 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2919 		ret |= io_run_local_work(ctx) > 0;
2920 	ret |= io_cancel_defer_files(ctx, task, cancel_all);
2921 	mutex_lock(&ctx->uring_lock);
2922 	ret |= io_poll_remove_all(ctx, task, cancel_all);
2923 	mutex_unlock(&ctx->uring_lock);
2924 	ret |= io_kill_timeouts(ctx, task, cancel_all);
2925 	if (task)
2926 		ret |= io_run_task_work() > 0;
2927 	return ret;
2928 }
2929 
tctx_inflight(struct io_uring_task * tctx,bool tracked)2930 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
2931 {
2932 	if (tracked)
2933 		return atomic_read(&tctx->inflight_tracked);
2934 	return percpu_counter_sum(&tctx->inflight);
2935 }
2936 
2937 /*
2938  * Find any io_uring ctx that this task has registered or done IO on, and cancel
2939  * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
2940  */
io_uring_cancel_generic(bool cancel_all,struct io_sq_data * sqd)2941 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
2942 {
2943 	struct io_uring_task *tctx = current->io_uring;
2944 	struct io_ring_ctx *ctx;
2945 	s64 inflight;
2946 	DEFINE_WAIT(wait);
2947 
2948 	WARN_ON_ONCE(sqd && sqd->thread != current);
2949 
2950 	if (!current->io_uring)
2951 		return;
2952 	if (tctx->io_wq)
2953 		io_wq_exit_start(tctx->io_wq);
2954 
2955 	atomic_inc(&tctx->in_idle);
2956 	do {
2957 		bool loop = false;
2958 
2959 		io_uring_drop_tctx_refs(current);
2960 		/* read completions before cancelations */
2961 		inflight = tctx_inflight(tctx, !cancel_all);
2962 		if (!inflight)
2963 			break;
2964 
2965 		if (!sqd) {
2966 			struct io_tctx_node *node;
2967 			unsigned long index;
2968 
2969 			xa_for_each(&tctx->xa, index, node) {
2970 				/* sqpoll task will cancel all its requests */
2971 				if (node->ctx->sq_data)
2972 					continue;
2973 				loop |= io_uring_try_cancel_requests(node->ctx,
2974 							current, cancel_all);
2975 			}
2976 		} else {
2977 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
2978 				loop |= io_uring_try_cancel_requests(ctx,
2979 								     current,
2980 								     cancel_all);
2981 		}
2982 
2983 		if (loop) {
2984 			cond_resched();
2985 			continue;
2986 		}
2987 
2988 		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
2989 		io_run_task_work();
2990 		io_uring_drop_tctx_refs(current);
2991 
2992 		/*
2993 		 * If we've seen completions, retry without waiting. This
2994 		 * avoids a race where a completion comes in before we did
2995 		 * prepare_to_wait().
2996 		 */
2997 		if (inflight == tctx_inflight(tctx, !cancel_all))
2998 			schedule();
2999 		finish_wait(&tctx->wait, &wait);
3000 	} while (1);
3001 
3002 	io_uring_clean_tctx(tctx);
3003 	if (cancel_all) {
3004 		/*
3005 		 * We shouldn't run task_works after cancel, so just leave
3006 		 * ->in_idle set for normal exit.
3007 		 */
3008 		atomic_dec(&tctx->in_idle);
3009 		/* for exec all current's requests should be gone, kill tctx */
3010 		__io_uring_free(current);
3011 	}
3012 }
3013 
__io_uring_cancel(bool cancel_all)3014 void __io_uring_cancel(bool cancel_all)
3015 {
3016 	io_uring_cancel_generic(cancel_all, NULL);
3017 }
3018 
io_uring_validate_mmap_request(struct file * file,loff_t pgoff,size_t sz)3019 static void *io_uring_validate_mmap_request(struct file *file,
3020 					    loff_t pgoff, size_t sz)
3021 {
3022 	struct io_ring_ctx *ctx = file->private_data;
3023 	loff_t offset = pgoff << PAGE_SHIFT;
3024 	struct page *page;
3025 	void *ptr;
3026 
3027 	switch (offset) {
3028 	case IORING_OFF_SQ_RING:
3029 	case IORING_OFF_CQ_RING:
3030 		ptr = ctx->rings;
3031 		break;
3032 	case IORING_OFF_SQES:
3033 		ptr = ctx->sq_sqes;
3034 		break;
3035 	default:
3036 		return ERR_PTR(-EINVAL);
3037 	}
3038 
3039 	page = virt_to_head_page(ptr);
3040 	if (sz > page_size(page))
3041 		return ERR_PTR(-EINVAL);
3042 
3043 	return ptr;
3044 }
3045 
3046 #ifdef CONFIG_MMU
3047 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)3048 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3049 {
3050 	size_t sz = vma->vm_end - vma->vm_start;
3051 	unsigned long pfn;
3052 	void *ptr;
3053 
3054 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
3055 	if (IS_ERR(ptr))
3056 		return PTR_ERR(ptr);
3057 
3058 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3059 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3060 }
3061 
3062 #else /* !CONFIG_MMU */
3063 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)3064 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3065 {
3066 	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
3067 }
3068 
io_uring_nommu_mmap_capabilities(struct file * file)3069 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
3070 {
3071 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
3072 }
3073 
io_uring_nommu_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)3074 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
3075 	unsigned long addr, unsigned long len,
3076 	unsigned long pgoff, unsigned long flags)
3077 {
3078 	void *ptr;
3079 
3080 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
3081 	if (IS_ERR(ptr))
3082 		return PTR_ERR(ptr);
3083 
3084 	return (unsigned long) ptr;
3085 }
3086 
3087 #endif /* !CONFIG_MMU */
3088 
io_validate_ext_arg(unsigned flags,const void __user * argp,size_t argsz)3089 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3090 {
3091 	if (flags & IORING_ENTER_EXT_ARG) {
3092 		struct io_uring_getevents_arg arg;
3093 
3094 		if (argsz != sizeof(arg))
3095 			return -EINVAL;
3096 		if (copy_from_user(&arg, argp, sizeof(arg)))
3097 			return -EFAULT;
3098 	}
3099 	return 0;
3100 }
3101 
io_get_ext_arg(unsigned flags,const void __user * argp,size_t * argsz,struct __kernel_timespec __user ** ts,const sigset_t __user ** sig)3102 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3103 			  struct __kernel_timespec __user **ts,
3104 			  const sigset_t __user **sig)
3105 {
3106 	struct io_uring_getevents_arg arg;
3107 
3108 	/*
3109 	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3110 	 * is just a pointer to the sigset_t.
3111 	 */
3112 	if (!(flags & IORING_ENTER_EXT_ARG)) {
3113 		*sig = (const sigset_t __user *) argp;
3114 		*ts = NULL;
3115 		return 0;
3116 	}
3117 
3118 	/*
3119 	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3120 	 * timespec and sigset_t pointers if good.
3121 	 */
3122 	if (*argsz != sizeof(arg))
3123 		return -EINVAL;
3124 	if (copy_from_user(&arg, argp, sizeof(arg)))
3125 		return -EFAULT;
3126 	if (arg.pad)
3127 		return -EINVAL;
3128 	*sig = u64_to_user_ptr(arg.sigmask);
3129 	*argsz = arg.sigmask_sz;
3130 	*ts = u64_to_user_ptr(arg.ts);
3131 	return 0;
3132 }
3133 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const void __user *,argp,size_t,argsz)3134 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3135 		u32, min_complete, u32, flags, const void __user *, argp,
3136 		size_t, argsz)
3137 {
3138 	struct io_ring_ctx *ctx;
3139 	struct fd f;
3140 	long ret;
3141 
3142 	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3143 			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3144 			       IORING_ENTER_REGISTERED_RING)))
3145 		return -EINVAL;
3146 
3147 	/*
3148 	 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3149 	 * need only dereference our task private array to find it.
3150 	 */
3151 	if (flags & IORING_ENTER_REGISTERED_RING) {
3152 		struct io_uring_task *tctx = current->io_uring;
3153 
3154 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3155 			return -EINVAL;
3156 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3157 		f.file = tctx->registered_rings[fd];
3158 		f.flags = 0;
3159 		if (unlikely(!f.file))
3160 			return -EBADF;
3161 	} else {
3162 		f = fdget(fd);
3163 		if (unlikely(!f.file))
3164 			return -EBADF;
3165 		ret = -EOPNOTSUPP;
3166 		if (unlikely(!io_is_uring_fops(f.file)))
3167 			goto out;
3168 	}
3169 
3170 	ctx = f.file->private_data;
3171 	ret = -EBADFD;
3172 	if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3173 		goto out;
3174 
3175 	/*
3176 	 * For SQ polling, the thread will do all submissions and completions.
3177 	 * Just return the requested submit count, and wake the thread if
3178 	 * we were asked to.
3179 	 */
3180 	ret = 0;
3181 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3182 		io_cqring_overflow_flush(ctx);
3183 
3184 		if (unlikely(ctx->sq_data->thread == NULL)) {
3185 			ret = -EOWNERDEAD;
3186 			goto out;
3187 		}
3188 		if (flags & IORING_ENTER_SQ_WAKEUP)
3189 			wake_up(&ctx->sq_data->wait);
3190 		if (flags & IORING_ENTER_SQ_WAIT) {
3191 			ret = io_sqpoll_wait_sq(ctx);
3192 			if (ret)
3193 				goto out;
3194 		}
3195 		ret = to_submit;
3196 	} else if (to_submit) {
3197 		ret = io_uring_add_tctx_node(ctx);
3198 		if (unlikely(ret))
3199 			goto out;
3200 
3201 		mutex_lock(&ctx->uring_lock);
3202 		ret = io_submit_sqes(ctx, to_submit);
3203 		if (ret != to_submit) {
3204 			mutex_unlock(&ctx->uring_lock);
3205 			goto out;
3206 		}
3207 		if (flags & IORING_ENTER_GETEVENTS) {
3208 			if (ctx->syscall_iopoll)
3209 				goto iopoll_locked;
3210 			/*
3211 			 * Ignore errors, we'll soon call io_cqring_wait() and
3212 			 * it should handle ownership problems if any.
3213 			 */
3214 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3215 				(void)io_run_local_work_locked(ctx);
3216 		}
3217 		mutex_unlock(&ctx->uring_lock);
3218 	}
3219 
3220 	if (flags & IORING_ENTER_GETEVENTS) {
3221 		int ret2;
3222 
3223 		if (ctx->syscall_iopoll) {
3224 			/*
3225 			 * We disallow the app entering submit/complete with
3226 			 * polling, but we still need to lock the ring to
3227 			 * prevent racing with polled issue that got punted to
3228 			 * a workqueue.
3229 			 */
3230 			mutex_lock(&ctx->uring_lock);
3231 iopoll_locked:
3232 			ret2 = io_validate_ext_arg(flags, argp, argsz);
3233 			if (likely(!ret2)) {
3234 				min_complete = min(min_complete,
3235 						   ctx->cq_entries);
3236 				ret2 = io_iopoll_check(ctx, min_complete);
3237 			}
3238 			mutex_unlock(&ctx->uring_lock);
3239 		} else {
3240 			const sigset_t __user *sig;
3241 			struct __kernel_timespec __user *ts;
3242 
3243 			ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
3244 			if (likely(!ret2)) {
3245 				min_complete = min(min_complete,
3246 						   ctx->cq_entries);
3247 				ret2 = io_cqring_wait(ctx, min_complete, sig,
3248 						      argsz, ts);
3249 			}
3250 		}
3251 
3252 		if (!ret) {
3253 			ret = ret2;
3254 
3255 			/*
3256 			 * EBADR indicates that one or more CQE were dropped.
3257 			 * Once the user has been informed we can clear the bit
3258 			 * as they are obviously ok with those drops.
3259 			 */
3260 			if (unlikely(ret2 == -EBADR))
3261 				clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3262 					  &ctx->check_cq);
3263 		}
3264 	}
3265 out:
3266 	fdput(f);
3267 	return ret;
3268 }
3269 
3270 static const struct file_operations io_uring_fops = {
3271 	.release	= io_uring_release,
3272 	.mmap		= io_uring_mmap,
3273 #ifndef CONFIG_MMU
3274 	.get_unmapped_area = io_uring_nommu_get_unmapped_area,
3275 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
3276 #endif
3277 	.poll		= io_uring_poll,
3278 #ifdef CONFIG_PROC_FS
3279 	.show_fdinfo	= io_uring_show_fdinfo,
3280 #endif
3281 };
3282 
io_is_uring_fops(struct file * file)3283 bool io_is_uring_fops(struct file *file)
3284 {
3285 	return file->f_op == &io_uring_fops;
3286 }
3287 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_uring_params * p)3288 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3289 					 struct io_uring_params *p)
3290 {
3291 	struct io_rings *rings;
3292 	size_t size, sq_array_offset;
3293 
3294 	/* make sure these are sane, as we already accounted them */
3295 	ctx->sq_entries = p->sq_entries;
3296 	ctx->cq_entries = p->cq_entries;
3297 
3298 	size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3299 	if (size == SIZE_MAX)
3300 		return -EOVERFLOW;
3301 
3302 	rings = io_mem_alloc(size);
3303 	if (!rings)
3304 		return -ENOMEM;
3305 
3306 	ctx->rings = rings;
3307 	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3308 	rings->sq_ring_mask = p->sq_entries - 1;
3309 	rings->cq_ring_mask = p->cq_entries - 1;
3310 	rings->sq_ring_entries = p->sq_entries;
3311 	rings->cq_ring_entries = p->cq_entries;
3312 
3313 	if (p->flags & IORING_SETUP_SQE128)
3314 		size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3315 	else
3316 		size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3317 	if (size == SIZE_MAX) {
3318 		io_mem_free(ctx->rings);
3319 		ctx->rings = NULL;
3320 		return -EOVERFLOW;
3321 	}
3322 
3323 	ctx->sq_sqes = io_mem_alloc(size);
3324 	if (!ctx->sq_sqes) {
3325 		io_mem_free(ctx->rings);
3326 		ctx->rings = NULL;
3327 		return -ENOMEM;
3328 	}
3329 
3330 	return 0;
3331 }
3332 
io_uring_install_fd(struct io_ring_ctx * ctx,struct file * file)3333 static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
3334 {
3335 	int ret, fd;
3336 
3337 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3338 	if (fd < 0)
3339 		return fd;
3340 
3341 	ret = __io_uring_add_tctx_node(ctx);
3342 	if (ret) {
3343 		put_unused_fd(fd);
3344 		return ret;
3345 	}
3346 	fd_install(fd, file);
3347 	return fd;
3348 }
3349 
3350 /*
3351  * Allocate an anonymous fd, this is what constitutes the application
3352  * visible backing of an io_uring instance. The application mmaps this
3353  * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3354  * we have to tie this fd to a socket for file garbage collection purposes.
3355  */
io_uring_get_file(struct io_ring_ctx * ctx)3356 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3357 {
3358 	struct file *file;
3359 #if defined(CONFIG_UNIX)
3360 	int ret;
3361 
3362 	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3363 				&ctx->ring_sock);
3364 	if (ret)
3365 		return ERR_PTR(ret);
3366 #endif
3367 
3368 	file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
3369 					 O_RDWR | O_CLOEXEC, NULL);
3370 #if defined(CONFIG_UNIX)
3371 	if (IS_ERR(file)) {
3372 		sock_release(ctx->ring_sock);
3373 		ctx->ring_sock = NULL;
3374 	} else {
3375 		ctx->ring_sock->file = file;
3376 	}
3377 #endif
3378 	return file;
3379 }
3380 
io_uring_create(unsigned entries,struct io_uring_params * p,struct io_uring_params __user * params)3381 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3382 				  struct io_uring_params __user *params)
3383 {
3384 	struct io_ring_ctx *ctx;
3385 	struct file *file;
3386 	int ret;
3387 
3388 	if (!entries)
3389 		return -EINVAL;
3390 	if (entries > IORING_MAX_ENTRIES) {
3391 		if (!(p->flags & IORING_SETUP_CLAMP))
3392 			return -EINVAL;
3393 		entries = IORING_MAX_ENTRIES;
3394 	}
3395 
3396 	/*
3397 	 * Use twice as many entries for the CQ ring. It's possible for the
3398 	 * application to drive a higher depth than the size of the SQ ring,
3399 	 * since the sqes are only used at submission time. This allows for
3400 	 * some flexibility in overcommitting a bit. If the application has
3401 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3402 	 * of CQ ring entries manually.
3403 	 */
3404 	p->sq_entries = roundup_pow_of_two(entries);
3405 	if (p->flags & IORING_SETUP_CQSIZE) {
3406 		/*
3407 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3408 		 * to a power-of-two, if it isn't already. We do NOT impose
3409 		 * any cq vs sq ring sizing.
3410 		 */
3411 		if (!p->cq_entries)
3412 			return -EINVAL;
3413 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3414 			if (!(p->flags & IORING_SETUP_CLAMP))
3415 				return -EINVAL;
3416 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
3417 		}
3418 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
3419 		if (p->cq_entries < p->sq_entries)
3420 			return -EINVAL;
3421 	} else {
3422 		p->cq_entries = 2 * p->sq_entries;
3423 	}
3424 
3425 	ctx = io_ring_ctx_alloc(p);
3426 	if (!ctx)
3427 		return -ENOMEM;
3428 
3429 	/*
3430 	 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3431 	 * space applications don't need to do io completion events
3432 	 * polling again, they can rely on io_sq_thread to do polling
3433 	 * work, which can reduce cpu usage and uring_lock contention.
3434 	 */
3435 	if (ctx->flags & IORING_SETUP_IOPOLL &&
3436 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3437 		ctx->syscall_iopoll = 1;
3438 
3439 	ctx->compat = in_compat_syscall();
3440 	if (!capable(CAP_IPC_LOCK))
3441 		ctx->user = get_uid(current_user());
3442 
3443 	/*
3444 	 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3445 	 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3446 	 */
3447 	ret = -EINVAL;
3448 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3449 		/* IPI related flags don't make sense with SQPOLL */
3450 		if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3451 				  IORING_SETUP_TASKRUN_FLAG |
3452 				  IORING_SETUP_DEFER_TASKRUN))
3453 			goto err;
3454 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3455 	} else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3456 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3457 	} else {
3458 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3459 		    !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3460 			goto err;
3461 		ctx->notify_method = TWA_SIGNAL;
3462 	}
3463 
3464 	/*
3465 	 * For DEFER_TASKRUN we require the completion task to be the same as the
3466 	 * submission task. This implies that there is only one submitter, so enforce
3467 	 * that.
3468 	 */
3469 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3470 	    !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3471 		goto err;
3472 	}
3473 
3474 	/*
3475 	 * This is just grabbed for accounting purposes. When a process exits,
3476 	 * the mm is exited and dropped before the files, hence we need to hang
3477 	 * on to this mm purely for the purposes of being able to unaccount
3478 	 * memory (locked/pinned vm). It's not used for anything else.
3479 	 */
3480 	mmgrab(current->mm);
3481 	ctx->mm_account = current->mm;
3482 
3483 	ret = io_allocate_scq_urings(ctx, p);
3484 	if (ret)
3485 		goto err;
3486 
3487 	ret = io_sq_offload_create(ctx, p);
3488 	if (ret)
3489 		goto err;
3490 	/* always set a rsrc node */
3491 	ret = io_rsrc_node_switch_start(ctx);
3492 	if (ret)
3493 		goto err;
3494 	io_rsrc_node_switch(ctx, NULL);
3495 
3496 	memset(&p->sq_off, 0, sizeof(p->sq_off));
3497 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3498 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3499 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3500 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3501 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3502 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3503 	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3504 
3505 	memset(&p->cq_off, 0, sizeof(p->cq_off));
3506 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3507 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3508 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3509 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3510 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3511 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3512 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3513 
3514 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
3515 			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
3516 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
3517 			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
3518 			IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
3519 			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3520 			IORING_FEAT_LINKED_FILE;
3521 
3522 	if (copy_to_user(params, p, sizeof(*p))) {
3523 		ret = -EFAULT;
3524 		goto err;
3525 	}
3526 
3527 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3528 	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
3529 		ctx->submitter_task = get_task_struct(current);
3530 
3531 	file = io_uring_get_file(ctx);
3532 	if (IS_ERR(file)) {
3533 		ret = PTR_ERR(file);
3534 		goto err;
3535 	}
3536 
3537 	/*
3538 	 * Install ring fd as the very last thing, so we don't risk someone
3539 	 * having closed it before we finish setup
3540 	 */
3541 	ret = io_uring_install_fd(ctx, file);
3542 	if (ret < 0) {
3543 		/* fput will clean it up */
3544 		fput(file);
3545 		return ret;
3546 	}
3547 
3548 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3549 	return ret;
3550 err:
3551 	io_ring_ctx_wait_and_kill(ctx);
3552 	return ret;
3553 }
3554 
3555 /*
3556  * Sets up an aio uring context, and returns the fd. Applications asks for a
3557  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3558  * params structure passed in.
3559  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)3560 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3561 {
3562 	struct io_uring_params p;
3563 	int i;
3564 
3565 	if (copy_from_user(&p, params, sizeof(p)))
3566 		return -EFAULT;
3567 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3568 		if (p.resv[i])
3569 			return -EINVAL;
3570 	}
3571 
3572 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3573 			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
3574 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
3575 			IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
3576 			IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
3577 			IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
3578 			IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
3579 		return -EINVAL;
3580 
3581 	return io_uring_create(entries, &p, params);
3582 }
3583 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)3584 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3585 		struct io_uring_params __user *, params)
3586 {
3587 	return io_uring_setup(entries, params);
3588 }
3589 
io_probe(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)3590 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
3591 			   unsigned nr_args)
3592 {
3593 	struct io_uring_probe *p;
3594 	size_t size;
3595 	int i, ret;
3596 
3597 	size = struct_size(p, ops, nr_args);
3598 	if (size == SIZE_MAX)
3599 		return -EOVERFLOW;
3600 	p = kzalloc(size, GFP_KERNEL);
3601 	if (!p)
3602 		return -ENOMEM;
3603 
3604 	ret = -EFAULT;
3605 	if (copy_from_user(p, arg, size))
3606 		goto out;
3607 	ret = -EINVAL;
3608 	if (memchr_inv(p, 0, size))
3609 		goto out;
3610 
3611 	p->last_op = IORING_OP_LAST - 1;
3612 	if (nr_args > IORING_OP_LAST)
3613 		nr_args = IORING_OP_LAST;
3614 
3615 	for (i = 0; i < nr_args; i++) {
3616 		p->ops[i].op = i;
3617 		if (!io_op_defs[i].not_supported)
3618 			p->ops[i].flags = IO_URING_OP_SUPPORTED;
3619 	}
3620 	p->ops_len = i;
3621 
3622 	ret = 0;
3623 	if (copy_to_user(arg, p, size))
3624 		ret = -EFAULT;
3625 out:
3626 	kfree(p);
3627 	return ret;
3628 }
3629 
io_register_personality(struct io_ring_ctx * ctx)3630 static int io_register_personality(struct io_ring_ctx *ctx)
3631 {
3632 	const struct cred *creds;
3633 	u32 id;
3634 	int ret;
3635 
3636 	creds = get_current_cred();
3637 
3638 	ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
3639 			XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
3640 	if (ret < 0) {
3641 		put_cred(creds);
3642 		return ret;
3643 	}
3644 	return id;
3645 }
3646 
io_register_restrictions(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args)3647 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
3648 					   void __user *arg, unsigned int nr_args)
3649 {
3650 	struct io_uring_restriction *res;
3651 	size_t size;
3652 	int i, ret;
3653 
3654 	/* Restrictions allowed only if rings started disabled */
3655 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
3656 		return -EBADFD;
3657 
3658 	/* We allow only a single restrictions registration */
3659 	if (ctx->restrictions.registered)
3660 		return -EBUSY;
3661 
3662 	if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
3663 		return -EINVAL;
3664 
3665 	size = array_size(nr_args, sizeof(*res));
3666 	if (size == SIZE_MAX)
3667 		return -EOVERFLOW;
3668 
3669 	res = memdup_user(arg, size);
3670 	if (IS_ERR(res))
3671 		return PTR_ERR(res);
3672 
3673 	ret = 0;
3674 
3675 	for (i = 0; i < nr_args; i++) {
3676 		switch (res[i].opcode) {
3677 		case IORING_RESTRICTION_REGISTER_OP:
3678 			if (res[i].register_op >= IORING_REGISTER_LAST) {
3679 				ret = -EINVAL;
3680 				goto out;
3681 			}
3682 
3683 			__set_bit(res[i].register_op,
3684 				  ctx->restrictions.register_op);
3685 			break;
3686 		case IORING_RESTRICTION_SQE_OP:
3687 			if (res[i].sqe_op >= IORING_OP_LAST) {
3688 				ret = -EINVAL;
3689 				goto out;
3690 			}
3691 
3692 			__set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
3693 			break;
3694 		case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
3695 			ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
3696 			break;
3697 		case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
3698 			ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
3699 			break;
3700 		default:
3701 			ret = -EINVAL;
3702 			goto out;
3703 		}
3704 	}
3705 
3706 out:
3707 	/* Reset all restrictions if an error happened */
3708 	if (ret != 0)
3709 		memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
3710 	else
3711 		ctx->restrictions.registered = true;
3712 
3713 	kfree(res);
3714 	return ret;
3715 }
3716 
io_register_enable_rings(struct io_ring_ctx * ctx)3717 static int io_register_enable_rings(struct io_ring_ctx *ctx)
3718 {
3719 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
3720 		return -EBADFD;
3721 
3722 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
3723 		ctx->submitter_task = get_task_struct(current);
3724 
3725 	if (ctx->restrictions.registered)
3726 		ctx->restricted = 1;
3727 
3728 	ctx->flags &= ~IORING_SETUP_R_DISABLED;
3729 	if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
3730 		wake_up(&ctx->sq_data->wait);
3731 	return 0;
3732 }
3733 
io_register_iowq_aff(struct io_ring_ctx * ctx,void __user * arg,unsigned len)3734 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
3735 				       void __user *arg, unsigned len)
3736 {
3737 	struct io_uring_task *tctx = current->io_uring;
3738 	cpumask_var_t new_mask;
3739 	int ret;
3740 
3741 	if (!tctx || !tctx->io_wq)
3742 		return -EINVAL;
3743 
3744 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3745 		return -ENOMEM;
3746 
3747 	cpumask_clear(new_mask);
3748 	if (len > cpumask_size())
3749 		len = cpumask_size();
3750 
3751 	if (in_compat_syscall()) {
3752 		ret = compat_get_bitmap(cpumask_bits(new_mask),
3753 					(const compat_ulong_t __user *)arg,
3754 					len * 8 /* CHAR_BIT */);
3755 	} else {
3756 		ret = copy_from_user(new_mask, arg, len);
3757 	}
3758 
3759 	if (ret) {
3760 		free_cpumask_var(new_mask);
3761 		return -EFAULT;
3762 	}
3763 
3764 	ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
3765 	free_cpumask_var(new_mask);
3766 	return ret;
3767 }
3768 
io_unregister_iowq_aff(struct io_ring_ctx * ctx)3769 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
3770 {
3771 	struct io_uring_task *tctx = current->io_uring;
3772 
3773 	if (!tctx || !tctx->io_wq)
3774 		return -EINVAL;
3775 
3776 	return io_wq_cpu_affinity(tctx->io_wq, NULL);
3777 }
3778 
io_register_iowq_max_workers(struct io_ring_ctx * ctx,void __user * arg)3779 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
3780 					       void __user *arg)
3781 	__must_hold(&ctx->uring_lock)
3782 {
3783 	struct io_tctx_node *node;
3784 	struct io_uring_task *tctx = NULL;
3785 	struct io_sq_data *sqd = NULL;
3786 	__u32 new_count[2];
3787 	int i, ret;
3788 
3789 	if (copy_from_user(new_count, arg, sizeof(new_count)))
3790 		return -EFAULT;
3791 	for (i = 0; i < ARRAY_SIZE(new_count); i++)
3792 		if (new_count[i] > INT_MAX)
3793 			return -EINVAL;
3794 
3795 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3796 		sqd = ctx->sq_data;
3797 		if (sqd) {
3798 			/*
3799 			 * Observe the correct sqd->lock -> ctx->uring_lock
3800 			 * ordering. Fine to drop uring_lock here, we hold
3801 			 * a ref to the ctx.
3802 			 */
3803 			refcount_inc(&sqd->refs);
3804 			mutex_unlock(&ctx->uring_lock);
3805 			mutex_lock(&sqd->lock);
3806 			mutex_lock(&ctx->uring_lock);
3807 			if (sqd->thread)
3808 				tctx = sqd->thread->io_uring;
3809 		}
3810 	} else {
3811 		tctx = current->io_uring;
3812 	}
3813 
3814 	BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
3815 
3816 	for (i = 0; i < ARRAY_SIZE(new_count); i++)
3817 		if (new_count[i])
3818 			ctx->iowq_limits[i] = new_count[i];
3819 	ctx->iowq_limits_set = true;
3820 
3821 	if (tctx && tctx->io_wq) {
3822 		ret = io_wq_max_workers(tctx->io_wq, new_count);
3823 		if (ret)
3824 			goto err;
3825 	} else {
3826 		memset(new_count, 0, sizeof(new_count));
3827 	}
3828 
3829 	if (sqd) {
3830 		mutex_unlock(&sqd->lock);
3831 		io_put_sq_data(sqd);
3832 	}
3833 
3834 	if (copy_to_user(arg, new_count, sizeof(new_count)))
3835 		return -EFAULT;
3836 
3837 	/* that's it for SQPOLL, only the SQPOLL task creates requests */
3838 	if (sqd)
3839 		return 0;
3840 
3841 	/* now propagate the restriction to all registered users */
3842 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3843 		struct io_uring_task *tctx = node->task->io_uring;
3844 
3845 		if (WARN_ON_ONCE(!tctx->io_wq))
3846 			continue;
3847 
3848 		for (i = 0; i < ARRAY_SIZE(new_count); i++)
3849 			new_count[i] = ctx->iowq_limits[i];
3850 		/* ignore errors, it always returns zero anyway */
3851 		(void)io_wq_max_workers(tctx->io_wq, new_count);
3852 	}
3853 	return 0;
3854 err:
3855 	if (sqd) {
3856 		mutex_unlock(&sqd->lock);
3857 		io_put_sq_data(sqd);
3858 	}
3859 	return ret;
3860 }
3861 
__io_uring_register(struct io_ring_ctx * ctx,unsigned opcode,void __user * arg,unsigned nr_args)3862 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3863 			       void __user *arg, unsigned nr_args)
3864 	__releases(ctx->uring_lock)
3865 	__acquires(ctx->uring_lock)
3866 {
3867 	int ret;
3868 
3869 	/*
3870 	 * We don't quiesce the refs for register anymore and so it can't be
3871 	 * dying as we're holding a file ref here.
3872 	 */
3873 	if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
3874 		return -ENXIO;
3875 
3876 	if (ctx->submitter_task && ctx->submitter_task != current)
3877 		return -EEXIST;
3878 
3879 	if (ctx->restricted) {
3880 		opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
3881 		if (!test_bit(opcode, ctx->restrictions.register_op))
3882 			return -EACCES;
3883 	}
3884 
3885 	switch (opcode) {
3886 	case IORING_REGISTER_BUFFERS:
3887 		ret = -EFAULT;
3888 		if (!arg)
3889 			break;
3890 		ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
3891 		break;
3892 	case IORING_UNREGISTER_BUFFERS:
3893 		ret = -EINVAL;
3894 		if (arg || nr_args)
3895 			break;
3896 		ret = io_sqe_buffers_unregister(ctx);
3897 		break;
3898 	case IORING_REGISTER_FILES:
3899 		ret = -EFAULT;
3900 		if (!arg)
3901 			break;
3902 		ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
3903 		break;
3904 	case IORING_UNREGISTER_FILES:
3905 		ret = -EINVAL;
3906 		if (arg || nr_args)
3907 			break;
3908 		ret = io_sqe_files_unregister(ctx);
3909 		break;
3910 	case IORING_REGISTER_FILES_UPDATE:
3911 		ret = io_register_files_update(ctx, arg, nr_args);
3912 		break;
3913 	case IORING_REGISTER_EVENTFD:
3914 		ret = -EINVAL;
3915 		if (nr_args != 1)
3916 			break;
3917 		ret = io_eventfd_register(ctx, arg, 0);
3918 		break;
3919 	case IORING_REGISTER_EVENTFD_ASYNC:
3920 		ret = -EINVAL;
3921 		if (nr_args != 1)
3922 			break;
3923 		ret = io_eventfd_register(ctx, arg, 1);
3924 		break;
3925 	case IORING_UNREGISTER_EVENTFD:
3926 		ret = -EINVAL;
3927 		if (arg || nr_args)
3928 			break;
3929 		ret = io_eventfd_unregister(ctx);
3930 		break;
3931 	case IORING_REGISTER_PROBE:
3932 		ret = -EINVAL;
3933 		if (!arg || nr_args > 256)
3934 			break;
3935 		ret = io_probe(ctx, arg, nr_args);
3936 		break;
3937 	case IORING_REGISTER_PERSONALITY:
3938 		ret = -EINVAL;
3939 		if (arg || nr_args)
3940 			break;
3941 		ret = io_register_personality(ctx);
3942 		break;
3943 	case IORING_UNREGISTER_PERSONALITY:
3944 		ret = -EINVAL;
3945 		if (arg)
3946 			break;
3947 		ret = io_unregister_personality(ctx, nr_args);
3948 		break;
3949 	case IORING_REGISTER_ENABLE_RINGS:
3950 		ret = -EINVAL;
3951 		if (arg || nr_args)
3952 			break;
3953 		ret = io_register_enable_rings(ctx);
3954 		break;
3955 	case IORING_REGISTER_RESTRICTIONS:
3956 		ret = io_register_restrictions(ctx, arg, nr_args);
3957 		break;
3958 	case IORING_REGISTER_FILES2:
3959 		ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
3960 		break;
3961 	case IORING_REGISTER_FILES_UPDATE2:
3962 		ret = io_register_rsrc_update(ctx, arg, nr_args,
3963 					      IORING_RSRC_FILE);
3964 		break;
3965 	case IORING_REGISTER_BUFFERS2:
3966 		ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
3967 		break;
3968 	case IORING_REGISTER_BUFFERS_UPDATE:
3969 		ret = io_register_rsrc_update(ctx, arg, nr_args,
3970 					      IORING_RSRC_BUFFER);
3971 		break;
3972 	case IORING_REGISTER_IOWQ_AFF:
3973 		ret = -EINVAL;
3974 		if (!arg || !nr_args)
3975 			break;
3976 		ret = io_register_iowq_aff(ctx, arg, nr_args);
3977 		break;
3978 	case IORING_UNREGISTER_IOWQ_AFF:
3979 		ret = -EINVAL;
3980 		if (arg || nr_args)
3981 			break;
3982 		ret = io_unregister_iowq_aff(ctx);
3983 		break;
3984 	case IORING_REGISTER_IOWQ_MAX_WORKERS:
3985 		ret = -EINVAL;
3986 		if (!arg || nr_args != 2)
3987 			break;
3988 		ret = io_register_iowq_max_workers(ctx, arg);
3989 		break;
3990 	case IORING_REGISTER_RING_FDS:
3991 		ret = io_ringfd_register(ctx, arg, nr_args);
3992 		break;
3993 	case IORING_UNREGISTER_RING_FDS:
3994 		ret = io_ringfd_unregister(ctx, arg, nr_args);
3995 		break;
3996 	case IORING_REGISTER_PBUF_RING:
3997 		ret = -EINVAL;
3998 		if (!arg || nr_args != 1)
3999 			break;
4000 		ret = io_register_pbuf_ring(ctx, arg);
4001 		break;
4002 	case IORING_UNREGISTER_PBUF_RING:
4003 		ret = -EINVAL;
4004 		if (!arg || nr_args != 1)
4005 			break;
4006 		ret = io_unregister_pbuf_ring(ctx, arg);
4007 		break;
4008 	case IORING_REGISTER_SYNC_CANCEL:
4009 		ret = -EINVAL;
4010 		if (!arg || nr_args != 1)
4011 			break;
4012 		ret = io_sync_cancel(ctx, arg);
4013 		break;
4014 	case IORING_REGISTER_FILE_ALLOC_RANGE:
4015 		ret = -EINVAL;
4016 		if (!arg || nr_args)
4017 			break;
4018 		ret = io_register_file_alloc_range(ctx, arg);
4019 		break;
4020 	default:
4021 		ret = -EINVAL;
4022 		break;
4023 	}
4024 
4025 	return ret;
4026 }
4027 
SYSCALL_DEFINE4(io_uring_register,unsigned int,fd,unsigned int,opcode,void __user *,arg,unsigned int,nr_args)4028 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4029 		void __user *, arg, unsigned int, nr_args)
4030 {
4031 	struct io_ring_ctx *ctx;
4032 	long ret = -EBADF;
4033 	struct fd f;
4034 
4035 	if (opcode >= IORING_REGISTER_LAST)
4036 		return -EINVAL;
4037 
4038 	f = fdget(fd);
4039 	if (!f.file)
4040 		return -EBADF;
4041 
4042 	ret = -EOPNOTSUPP;
4043 	if (!io_is_uring_fops(f.file))
4044 		goto out_fput;
4045 
4046 	ctx = f.file->private_data;
4047 
4048 	io_run_task_work_ctx(ctx);
4049 
4050 	mutex_lock(&ctx->uring_lock);
4051 	ret = __io_uring_register(ctx, opcode, arg, nr_args);
4052 	mutex_unlock(&ctx->uring_lock);
4053 	trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
4054 out_fput:
4055 	fdput(f);
4056 	return ret;
4057 }
4058 
io_uring_init(void)4059 static int __init io_uring_init(void)
4060 {
4061 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
4062 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
4063 	BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
4064 } while (0)
4065 
4066 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
4067 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
4068 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
4069 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
4070 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
4071 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
4072 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
4073 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
4074 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
4075 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
4076 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
4077 	BUILD_BUG_SQE_ELEM(8,  __u32,  cmd_op);
4078 	BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
4079 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
4080 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
4081 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
4082 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
4083 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
4084 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
4085 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
4086 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
4087 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
4088 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
4089 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
4090 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
4091 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
4092 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
4093 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
4094 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
4095 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
4096 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
4097 	BUILD_BUG_SQE_ELEM(28, __u32,  rename_flags);
4098 	BUILD_BUG_SQE_ELEM(28, __u32,  unlink_flags);
4099 	BUILD_BUG_SQE_ELEM(28, __u32,  hardlink_flags);
4100 	BUILD_BUG_SQE_ELEM(28, __u32,  xattr_flags);
4101 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_ring_flags);
4102 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
4103 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
4104 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
4105 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
4106 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
4107 	BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
4108 	BUILD_BUG_SQE_ELEM(44, __u16,  addr_len);
4109 	BUILD_BUG_SQE_ELEM(46, __u16,  __pad3[0]);
4110 	BUILD_BUG_SQE_ELEM(48, __u64,  addr3);
4111 	BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
4112 	BUILD_BUG_SQE_ELEM(56, __u64,  __pad2);
4113 
4114 	BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
4115 		     sizeof(struct io_uring_rsrc_update));
4116 	BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
4117 		     sizeof(struct io_uring_rsrc_update2));
4118 
4119 	/* ->buf_index is u16 */
4120 	BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
4121 	BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
4122 		     offsetof(struct io_uring_buf_ring, tail));
4123 
4124 	/* should fit into one byte */
4125 	BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
4126 	BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
4127 	BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
4128 
4129 	BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
4130 
4131 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
4132 
4133 	io_uring_optable_init();
4134 
4135 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4136 				SLAB_ACCOUNT);
4137 	return 0;
4138 };
4139 __initcall(io_uring_init);
4140