1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11 
12 #include <trace/events/io_uring.h>
13 
14 #include <uapi/linux/io_uring.h>
15 
16 #include "io_uring.h"
17 #include "refs.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "poll.h"
21 #include "cancel.h"
22 
23 struct io_poll_update {
24 	struct file			*file;
25 	u64				old_user_data;
26 	u64				new_user_data;
27 	__poll_t			events;
28 	bool				update_events;
29 	bool				update_user_data;
30 };
31 
32 struct io_poll_table {
33 	struct poll_table_struct pt;
34 	struct io_kiocb *req;
35 	int nr_entries;
36 	int error;
37 	bool owning;
38 	/* output value, set only if arm poll returns >0 */
39 	__poll_t result_mask;
40 };
41 
42 #define IO_POLL_CANCEL_FLAG	BIT(31)
43 #define IO_POLL_RETRY_FLAG	BIT(30)
44 #define IO_POLL_REF_MASK	GENMASK(29, 0)
45 
46 /*
47  * We usually have 1-2 refs taken, 128 is more than enough and we want to
48  * maximise the margin between this amount and the moment when it overflows.
49  */
50 #define IO_POLL_REF_BIAS	128
51 
52 #define IO_WQE_F_DOUBLE		1
53 
54 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
55 			void *key);
56 
wqe_to_req(struct wait_queue_entry * wqe)57 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
58 {
59 	unsigned long priv = (unsigned long)wqe->private;
60 
61 	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
62 }
63 
wqe_is_double(struct wait_queue_entry * wqe)64 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
65 {
66 	unsigned long priv = (unsigned long)wqe->private;
67 
68 	return priv & IO_WQE_F_DOUBLE;
69 }
70 
io_poll_get_ownership_slowpath(struct io_kiocb * req)71 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
72 {
73 	int v;
74 
75 	/*
76 	 * poll_refs are already elevated and we don't have much hope for
77 	 * grabbing the ownership. Instead of incrementing set a retry flag
78 	 * to notify the loop that there might have been some change.
79 	 */
80 	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
81 	if (v & IO_POLL_REF_MASK)
82 		return false;
83 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
84 }
85 
86 /*
87  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
88  * bump it and acquire ownership. It's disallowed to modify requests while not
89  * owning it, that prevents from races for enqueueing task_work's and b/w
90  * arming poll and wakeups.
91  */
io_poll_get_ownership(struct io_kiocb * req)92 static inline bool io_poll_get_ownership(struct io_kiocb *req)
93 {
94 	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
95 		return io_poll_get_ownership_slowpath(req);
96 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
97 }
98 
io_poll_mark_cancelled(struct io_kiocb * req)99 static void io_poll_mark_cancelled(struct io_kiocb *req)
100 {
101 	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
102 }
103 
io_poll_get_double(struct io_kiocb * req)104 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
105 {
106 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
107 	if (req->opcode == IORING_OP_POLL_ADD)
108 		return req->async_data;
109 	return req->apoll->double_poll;
110 }
111 
io_poll_get_single(struct io_kiocb * req)112 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
113 {
114 	if (req->opcode == IORING_OP_POLL_ADD)
115 		return io_kiocb_to_cmd(req, struct io_poll);
116 	return &req->apoll->poll;
117 }
118 
io_poll_req_insert(struct io_kiocb * req)119 static void io_poll_req_insert(struct io_kiocb *req)
120 {
121 	struct io_hash_table *table = &req->ctx->cancel_table;
122 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
123 	struct io_hash_bucket *hb = &table->hbs[index];
124 
125 	spin_lock(&hb->lock);
126 	hlist_add_head(&req->hash_node, &hb->list);
127 	spin_unlock(&hb->lock);
128 }
129 
io_poll_req_delete(struct io_kiocb * req,struct io_ring_ctx * ctx)130 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
131 {
132 	struct io_hash_table *table = &req->ctx->cancel_table;
133 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
134 	spinlock_t *lock = &table->hbs[index].lock;
135 
136 	spin_lock(lock);
137 	hash_del(&req->hash_node);
138 	spin_unlock(lock);
139 }
140 
io_poll_req_insert_locked(struct io_kiocb * req)141 static void io_poll_req_insert_locked(struct io_kiocb *req)
142 {
143 	struct io_hash_table *table = &req->ctx->cancel_table_locked;
144 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
145 
146 	lockdep_assert_held(&req->ctx->uring_lock);
147 
148 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
149 }
150 
io_poll_tw_hash_eject(struct io_kiocb * req,struct io_tw_state * ts)151 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
152 {
153 	struct io_ring_ctx *ctx = req->ctx;
154 
155 	if (req->flags & REQ_F_HASH_LOCKED) {
156 		/*
157 		 * ->cancel_table_locked is protected by ->uring_lock in
158 		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
159 		 * already grabbed the mutex for us, but there is a chance it
160 		 * failed.
161 		 */
162 		io_tw_lock(ctx, ts);
163 		hash_del(&req->hash_node);
164 		req->flags &= ~REQ_F_HASH_LOCKED;
165 	} else {
166 		io_poll_req_delete(req, ctx);
167 	}
168 }
169 
io_init_poll_iocb(struct io_poll * poll,__poll_t events)170 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
171 {
172 	poll->head = NULL;
173 #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
174 	/* mask in events that we always want/need */
175 	poll->events = events | IO_POLL_UNMASK;
176 	INIT_LIST_HEAD(&poll->wait.entry);
177 	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
178 }
179 
io_poll_remove_entry(struct io_poll * poll)180 static inline void io_poll_remove_entry(struct io_poll *poll)
181 {
182 	struct wait_queue_head *head = smp_load_acquire(&poll->head);
183 
184 	if (head) {
185 		spin_lock_irq(&head->lock);
186 		list_del_init(&poll->wait.entry);
187 		poll->head = NULL;
188 		spin_unlock_irq(&head->lock);
189 	}
190 }
191 
io_poll_remove_entries(struct io_kiocb * req)192 static void io_poll_remove_entries(struct io_kiocb *req)
193 {
194 	/*
195 	 * Nothing to do if neither of those flags are set. Avoid dipping
196 	 * into the poll/apoll/double cachelines if we can.
197 	 */
198 	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
199 		return;
200 
201 	/*
202 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
203 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
204 	 * lock in the first place can race with the waitqueue being freed.
205 	 *
206 	 * We solve this as eventpoll does: by taking advantage of the fact that
207 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
208 	 * we enter rcu_read_lock() and see that the pointer to the queue is
209 	 * non-NULL, we can then lock it without the memory being freed out from
210 	 * under us.
211 	 *
212 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
213 	 * case the caller deletes the entry from the queue, leaving it empty.
214 	 * In that case, only RCU prevents the queue memory from being freed.
215 	 */
216 	rcu_read_lock();
217 	if (req->flags & REQ_F_SINGLE_POLL)
218 		io_poll_remove_entry(io_poll_get_single(req));
219 	if (req->flags & REQ_F_DOUBLE_POLL)
220 		io_poll_remove_entry(io_poll_get_double(req));
221 	rcu_read_unlock();
222 }
223 
224 enum {
225 	IOU_POLL_DONE = 0,
226 	IOU_POLL_NO_ACTION = 1,
227 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
228 	IOU_POLL_REISSUE = 3,
229 	IOU_POLL_REQUEUE = 4,
230 };
231 
__io_poll_execute(struct io_kiocb * req,int mask)232 static void __io_poll_execute(struct io_kiocb *req, int mask)
233 {
234 	io_req_set_res(req, mask, 0);
235 	req->io_task_work.func = io_poll_task_func;
236 
237 	trace_io_uring_task_add(req, mask);
238 	io_req_task_work_add(req);
239 }
240 
io_poll_execute(struct io_kiocb * req,int res)241 static inline void io_poll_execute(struct io_kiocb *req, int res)
242 {
243 	if (io_poll_get_ownership(req))
244 		__io_poll_execute(req, res);
245 }
246 
247 /*
248  * All poll tw should go through this. Checks for poll events, manages
249  * references, does rewait, etc.
250  *
251  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
252  * require, which is either spurious wakeup or multishot CQE is served.
253  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
254  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
255  * poll and that the result is stored in req->cqe.
256  */
io_poll_check_events(struct io_kiocb * req,struct io_tw_state * ts)257 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
258 {
259 	int v;
260 
261 	/* req->task == current here, checking PF_EXITING is safe */
262 	if (unlikely(req->task->flags & PF_EXITING))
263 		return -ECANCELED;
264 
265 	do {
266 		v = atomic_read(&req->poll_refs);
267 
268 		if (unlikely(v != 1)) {
269 			/* tw should be the owner and so have some refs */
270 			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
271 				return IOU_POLL_NO_ACTION;
272 			if (v & IO_POLL_CANCEL_FLAG)
273 				return -ECANCELED;
274 			/*
275 			 * cqe.res contains only events of the first wake up
276 			 * and all others are to be lost. Redo vfs_poll() to get
277 			 * up to date state.
278 			 */
279 			if ((v & IO_POLL_REF_MASK) != 1)
280 				req->cqe.res = 0;
281 
282 			if (v & IO_POLL_RETRY_FLAG) {
283 				req->cqe.res = 0;
284 				/*
285 				 * We won't find new events that came in between
286 				 * vfs_poll and the ref put unless we clear the
287 				 * flag in advance.
288 				 */
289 				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
290 				v &= ~IO_POLL_RETRY_FLAG;
291 			}
292 		}
293 
294 		/* the mask was stashed in __io_poll_execute */
295 		if (!req->cqe.res) {
296 			struct poll_table_struct pt = { ._key = req->apoll_events };
297 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
298 			/*
299 			 * We got woken with a mask, but someone else got to
300 			 * it first. The above vfs_poll() doesn't add us back
301 			 * to the waitqueue, so if we get nothing back, we
302 			 * should be safe and attempt a reissue.
303 			 */
304 			if (unlikely(!req->cqe.res)) {
305 				/* Multishot armed need not reissue */
306 				if (!(req->apoll_events & EPOLLONESHOT))
307 					continue;
308 				return IOU_POLL_REISSUE;
309 			}
310 		}
311 		if (req->apoll_events & EPOLLONESHOT)
312 			return IOU_POLL_DONE;
313 
314 		/* multishot, just fill a CQE and proceed */
315 		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
316 			__poll_t mask = mangle_poll(req->cqe.res &
317 						    req->apoll_events);
318 
319 			if (!io_fill_cqe_req_aux(req, ts->locked, mask,
320 						 IORING_CQE_F_MORE)) {
321 				io_req_set_res(req, mask, 0);
322 				return IOU_POLL_REMOVE_POLL_USE_RES;
323 			}
324 		} else {
325 			int ret = io_poll_issue(req, ts);
326 			if (ret == IOU_STOP_MULTISHOT)
327 				return IOU_POLL_REMOVE_POLL_USE_RES;
328 			else if (ret == IOU_REQUEUE)
329 				return IOU_POLL_REQUEUE;
330 			if (ret < 0)
331 				return ret;
332 		}
333 
334 		/* force the next iteration to vfs_poll() */
335 		req->cqe.res = 0;
336 
337 		/*
338 		 * Release all references, retry if someone tried to restart
339 		 * task_work while we were executing it.
340 		 */
341 	} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
342 					IO_POLL_REF_MASK);
343 
344 	return IOU_POLL_NO_ACTION;
345 }
346 
io_poll_task_func(struct io_kiocb * req,struct io_tw_state * ts)347 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
348 {
349 	int ret;
350 
351 	ret = io_poll_check_events(req, ts);
352 	if (ret == IOU_POLL_NO_ACTION) {
353 		return;
354 	} else if (ret == IOU_POLL_REQUEUE) {
355 		__io_poll_execute(req, 0);
356 		return;
357 	}
358 	io_poll_remove_entries(req);
359 	io_poll_tw_hash_eject(req, ts);
360 
361 	if (req->opcode == IORING_OP_POLL_ADD) {
362 		if (ret == IOU_POLL_DONE) {
363 			struct io_poll *poll;
364 
365 			poll = io_kiocb_to_cmd(req, struct io_poll);
366 			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
367 		} else if (ret == IOU_POLL_REISSUE) {
368 			io_req_task_submit(req, ts);
369 			return;
370 		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
371 			req->cqe.res = ret;
372 			req_set_fail(req);
373 		}
374 
375 		io_req_set_res(req, req->cqe.res, 0);
376 		io_req_task_complete(req, ts);
377 	} else {
378 		io_tw_lock(req->ctx, ts);
379 
380 		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
381 			io_req_task_complete(req, ts);
382 		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
383 			io_req_task_submit(req, ts);
384 		else
385 			io_req_defer_failed(req, ret);
386 	}
387 }
388 
io_poll_cancel_req(struct io_kiocb * req)389 static void io_poll_cancel_req(struct io_kiocb *req)
390 {
391 	io_poll_mark_cancelled(req);
392 	/* kick tw, which should complete the request */
393 	io_poll_execute(req, 0);
394 }
395 
396 #define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
397 
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)398 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
399 {
400 	io_poll_mark_cancelled(req);
401 	/* we have to kick tw in case it's not already */
402 	io_poll_execute(req, 0);
403 
404 	/*
405 	 * If the waitqueue is being freed early but someone is already
406 	 * holds ownership over it, we have to tear down the request as
407 	 * best we can. That means immediately removing the request from
408 	 * its waitqueue and preventing all further accesses to the
409 	 * waitqueue via the request.
410 	 */
411 	list_del_init(&poll->wait.entry);
412 
413 	/*
414 	 * Careful: this *must* be the last step, since as soon
415 	 * as req->head is NULL'ed out, the request can be
416 	 * completed and freed, since aio_poll_complete_work()
417 	 * will no longer need to take the waitqueue lock.
418 	 */
419 	smp_store_release(&poll->head, NULL);
420 	return 1;
421 }
422 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)423 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
424 			void *key)
425 {
426 	struct io_kiocb *req = wqe_to_req(wait);
427 	struct io_poll *poll = container_of(wait, struct io_poll, wait);
428 	__poll_t mask = key_to_poll(key);
429 
430 	if (unlikely(mask & POLLFREE))
431 		return io_pollfree_wake(req, poll);
432 
433 	/* for instances that support it check for an event match first */
434 	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
435 		return 0;
436 
437 	if (io_poll_get_ownership(req)) {
438 		/*
439 		 * If we trigger a multishot poll off our own wakeup path,
440 		 * disable multishot as there is a circular dependency between
441 		 * CQ posting and triggering the event.
442 		 */
443 		if (mask & EPOLL_URING_WAKE)
444 			poll->events |= EPOLLONESHOT;
445 
446 		/* optional, saves extra locking for removal in tw handler */
447 		if (mask && poll->events & EPOLLONESHOT) {
448 			list_del_init(&poll->wait.entry);
449 			poll->head = NULL;
450 			if (wqe_is_double(wait))
451 				req->flags &= ~REQ_F_DOUBLE_POLL;
452 			else
453 				req->flags &= ~REQ_F_SINGLE_POLL;
454 		}
455 		__io_poll_execute(req, mask);
456 	}
457 	return 1;
458 }
459 
460 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)461 static bool io_poll_double_prepare(struct io_kiocb *req)
462 {
463 	struct wait_queue_head *head;
464 	struct io_poll *poll = io_poll_get_single(req);
465 
466 	/* head is RCU protected, see io_poll_remove_entries() comments */
467 	rcu_read_lock();
468 	head = smp_load_acquire(&poll->head);
469 	/*
470 	 * poll arm might not hold ownership and so race for req->flags with
471 	 * io_poll_wake(). There is only one poll entry queued, serialise with
472 	 * it by taking its head lock. As we're still arming the tw hanlder
473 	 * is not going to be run, so there are no races with it.
474 	 */
475 	if (head) {
476 		spin_lock_irq(&head->lock);
477 		req->flags |= REQ_F_DOUBLE_POLL;
478 		if (req->opcode == IORING_OP_POLL_ADD)
479 			req->flags |= REQ_F_ASYNC_DATA;
480 		spin_unlock_irq(&head->lock);
481 	}
482 	rcu_read_unlock();
483 	return !!head;
484 }
485 
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)486 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
487 			    struct wait_queue_head *head,
488 			    struct io_poll **poll_ptr)
489 {
490 	struct io_kiocb *req = pt->req;
491 	unsigned long wqe_private = (unsigned long) req;
492 
493 	/*
494 	 * The file being polled uses multiple waitqueues for poll handling
495 	 * (e.g. one for read, one for write). Setup a separate io_poll
496 	 * if this happens.
497 	 */
498 	if (unlikely(pt->nr_entries)) {
499 		struct io_poll *first = poll;
500 
501 		/* double add on the same waitqueue head, ignore */
502 		if (first->head == head)
503 			return;
504 		/* already have a 2nd entry, fail a third attempt */
505 		if (*poll_ptr) {
506 			if ((*poll_ptr)->head == head)
507 				return;
508 			pt->error = -EINVAL;
509 			return;
510 		}
511 
512 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
513 		if (!poll) {
514 			pt->error = -ENOMEM;
515 			return;
516 		}
517 
518 		/* mark as double wq entry */
519 		wqe_private |= IO_WQE_F_DOUBLE;
520 		io_init_poll_iocb(poll, first->events);
521 		if (!io_poll_double_prepare(req)) {
522 			/* the request is completing, just back off */
523 			kfree(poll);
524 			return;
525 		}
526 		*poll_ptr = poll;
527 	} else {
528 		/* fine to modify, there is no poll queued to race with us */
529 		req->flags |= REQ_F_SINGLE_POLL;
530 	}
531 
532 	pt->nr_entries++;
533 	poll->head = head;
534 	poll->wait.private = (void *) wqe_private;
535 
536 	if (poll->events & EPOLLEXCLUSIVE)
537 		add_wait_queue_exclusive(head, &poll->wait);
538 	else
539 		add_wait_queue(head, &poll->wait);
540 }
541 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)542 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
543 			       struct poll_table_struct *p)
544 {
545 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
546 	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
547 
548 	__io_queue_proc(poll, pt, head,
549 			(struct io_poll **) &pt->req->async_data);
550 }
551 
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)552 static bool io_poll_can_finish_inline(struct io_kiocb *req,
553 				      struct io_poll_table *pt)
554 {
555 	return pt->owning || io_poll_get_ownership(req);
556 }
557 
io_poll_add_hash(struct io_kiocb * req)558 static void io_poll_add_hash(struct io_kiocb *req)
559 {
560 	if (req->flags & REQ_F_HASH_LOCKED)
561 		io_poll_req_insert_locked(req);
562 	else
563 		io_poll_req_insert(req);
564 }
565 
566 /*
567  * Returns 0 when it's handed over for polling. The caller owns the requests if
568  * it returns non-zero, but otherwise should not touch it. Negative values
569  * contain an error code. When the result is >0, the polling has completed
570  * inline and ipt.result_mask is set to the mask.
571  */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)572 static int __io_arm_poll_handler(struct io_kiocb *req,
573 				 struct io_poll *poll,
574 				 struct io_poll_table *ipt, __poll_t mask,
575 				 unsigned issue_flags)
576 {
577 	struct io_ring_ctx *ctx = req->ctx;
578 
579 	INIT_HLIST_NODE(&req->hash_node);
580 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
581 	io_init_poll_iocb(poll, mask);
582 	poll->file = req->file;
583 	req->apoll_events = poll->events;
584 
585 	ipt->pt._key = mask;
586 	ipt->req = req;
587 	ipt->error = 0;
588 	ipt->nr_entries = 0;
589 	/*
590 	 * Polling is either completed here or via task_work, so if we're in the
591 	 * task context we're naturally serialised with tw by merit of running
592 	 * the same task. When it's io-wq, take the ownership to prevent tw
593 	 * from running. However, when we're in the task context, skip taking
594 	 * it as an optimisation.
595 	 *
596 	 * Note: even though the request won't be completed/freed, without
597 	 * ownership we still can race with io_poll_wake().
598 	 * io_poll_can_finish_inline() tries to deal with that.
599 	 */
600 	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
601 	atomic_set(&req->poll_refs, (int)ipt->owning);
602 
603 	/* io-wq doesn't hold uring_lock */
604 	if (issue_flags & IO_URING_F_UNLOCKED)
605 		req->flags &= ~REQ_F_HASH_LOCKED;
606 
607 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
608 
609 	if (unlikely(ipt->error || !ipt->nr_entries)) {
610 		io_poll_remove_entries(req);
611 
612 		if (!io_poll_can_finish_inline(req, ipt)) {
613 			io_poll_mark_cancelled(req);
614 			return 0;
615 		} else if (mask && (poll->events & EPOLLET)) {
616 			ipt->result_mask = mask;
617 			return 1;
618 		}
619 		return ipt->error ?: -EINVAL;
620 	}
621 
622 	if (mask &&
623 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
624 		if (!io_poll_can_finish_inline(req, ipt)) {
625 			io_poll_add_hash(req);
626 			return 0;
627 		}
628 		io_poll_remove_entries(req);
629 		ipt->result_mask = mask;
630 		/* no one else has access to the req, forget about the ref */
631 		return 1;
632 	}
633 
634 	io_poll_add_hash(req);
635 
636 	if (mask && (poll->events & EPOLLET) &&
637 	    io_poll_can_finish_inline(req, ipt)) {
638 		__io_poll_execute(req, mask);
639 		return 0;
640 	}
641 
642 	if (ipt->owning) {
643 		/*
644 		 * Try to release ownership. If we see a change of state, e.g.
645 		 * poll was waken up, queue up a tw, it'll deal with it.
646 		 */
647 		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
648 			__io_poll_execute(req, 0);
649 	}
650 	return 0;
651 }
652 
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)653 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
654 			       struct poll_table_struct *p)
655 {
656 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
657 	struct async_poll *apoll = pt->req->apoll;
658 
659 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
660 }
661 
662 /*
663  * We can't reliably detect loops in repeated poll triggers and issue
664  * subsequently failing. But rather than fail these immediately, allow a
665  * certain amount of retries before we give up. Given that this condition
666  * should _rarely_ trigger even once, we should be fine with a larger value.
667  */
668 #define APOLL_MAX_RETRY		128
669 
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)670 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
671 					     unsigned issue_flags)
672 {
673 	struct io_ring_ctx *ctx = req->ctx;
674 	struct io_cache_entry *entry;
675 	struct async_poll *apoll;
676 
677 	if (req->flags & REQ_F_POLLED) {
678 		apoll = req->apoll;
679 		kfree(apoll->double_poll);
680 	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
681 		entry = io_alloc_cache_get(&ctx->apoll_cache);
682 		if (entry == NULL)
683 			goto alloc_apoll;
684 		apoll = container_of(entry, struct async_poll, cache);
685 		apoll->poll.retries = APOLL_MAX_RETRY;
686 	} else {
687 alloc_apoll:
688 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
689 		if (unlikely(!apoll))
690 			return NULL;
691 		apoll->poll.retries = APOLL_MAX_RETRY;
692 	}
693 	apoll->double_poll = NULL;
694 	req->apoll = apoll;
695 	if (unlikely(!--apoll->poll.retries))
696 		return NULL;
697 	return apoll;
698 }
699 
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)700 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
701 {
702 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
703 	struct async_poll *apoll;
704 	struct io_poll_table ipt;
705 	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
706 	int ret;
707 
708 	/*
709 	 * apoll requests already grab the mutex to complete in the tw handler,
710 	 * so removal from the mutex-backed hash is free, use it by default.
711 	 */
712 	req->flags |= REQ_F_HASH_LOCKED;
713 
714 	if (!def->pollin && !def->pollout)
715 		return IO_APOLL_ABORTED;
716 	if (!file_can_poll(req->file))
717 		return IO_APOLL_ABORTED;
718 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
719 		mask |= EPOLLONESHOT;
720 
721 	if (def->pollin) {
722 		mask |= EPOLLIN | EPOLLRDNORM;
723 
724 		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
725 		if (req->flags & REQ_F_CLEAR_POLLIN)
726 			mask &= ~EPOLLIN;
727 	} else {
728 		mask |= EPOLLOUT | EPOLLWRNORM;
729 	}
730 	if (def->poll_exclusive)
731 		mask |= EPOLLEXCLUSIVE;
732 
733 	apoll = io_req_alloc_apoll(req, issue_flags);
734 	if (!apoll)
735 		return IO_APOLL_ABORTED;
736 	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
737 	req->flags |= REQ_F_POLLED;
738 	ipt.pt._qproc = io_async_queue_proc;
739 
740 	io_kbuf_recycle(req, issue_flags);
741 
742 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
743 	if (ret)
744 		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
745 	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
746 	return IO_APOLL_OK;
747 }
748 
io_poll_remove_all_table(struct task_struct * tsk,struct io_hash_table * table,bool cancel_all)749 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
750 					    struct io_hash_table *table,
751 					    bool cancel_all)
752 {
753 	unsigned nr_buckets = 1U << table->hash_bits;
754 	struct hlist_node *tmp;
755 	struct io_kiocb *req;
756 	bool found = false;
757 	int i;
758 
759 	for (i = 0; i < nr_buckets; i++) {
760 		struct io_hash_bucket *hb = &table->hbs[i];
761 
762 		spin_lock(&hb->lock);
763 		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
764 			if (io_match_task_safe(req, tsk, cancel_all)) {
765 				hlist_del_init(&req->hash_node);
766 				io_poll_cancel_req(req);
767 				found = true;
768 			}
769 		}
770 		spin_unlock(&hb->lock);
771 	}
772 	return found;
773 }
774 
775 /*
776  * Returns true if we found and killed one or more poll requests
777  */
io_poll_remove_all(struct io_ring_ctx * ctx,struct task_struct * tsk,bool cancel_all)778 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
779 			       bool cancel_all)
780 	__must_hold(&ctx->uring_lock)
781 {
782 	bool ret;
783 
784 	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
785 	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
786 	return ret;
787 }
788 
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)789 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
790 				     struct io_cancel_data *cd,
791 				     struct io_hash_table *table,
792 				     struct io_hash_bucket **out_bucket)
793 {
794 	struct io_kiocb *req;
795 	u32 index = hash_long(cd->data, table->hash_bits);
796 	struct io_hash_bucket *hb = &table->hbs[index];
797 
798 	*out_bucket = NULL;
799 
800 	spin_lock(&hb->lock);
801 	hlist_for_each_entry(req, &hb->list, hash_node) {
802 		if (cd->data != req->cqe.user_data)
803 			continue;
804 		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
805 			continue;
806 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
807 			if (cd->seq == req->work.cancel_seq)
808 				continue;
809 			req->work.cancel_seq = cd->seq;
810 		}
811 		*out_bucket = hb;
812 		return req;
813 	}
814 	spin_unlock(&hb->lock);
815 	return NULL;
816 }
817 
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)818 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
819 					  struct io_cancel_data *cd,
820 					  struct io_hash_table *table,
821 					  struct io_hash_bucket **out_bucket)
822 {
823 	unsigned nr_buckets = 1U << table->hash_bits;
824 	struct io_kiocb *req;
825 	int i;
826 
827 	*out_bucket = NULL;
828 
829 	for (i = 0; i < nr_buckets; i++) {
830 		struct io_hash_bucket *hb = &table->hbs[i];
831 
832 		spin_lock(&hb->lock);
833 		hlist_for_each_entry(req, &hb->list, hash_node) {
834 			if (io_cancel_req_match(req, cd)) {
835 				*out_bucket = hb;
836 				return req;
837 			}
838 		}
839 		spin_unlock(&hb->lock);
840 	}
841 	return NULL;
842 }
843 
io_poll_disarm(struct io_kiocb * req)844 static int io_poll_disarm(struct io_kiocb *req)
845 {
846 	if (!req)
847 		return -ENOENT;
848 	if (!io_poll_get_ownership(req))
849 		return -EALREADY;
850 	io_poll_remove_entries(req);
851 	hash_del(&req->hash_node);
852 	return 0;
853 }
854 
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table)855 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
856 			    struct io_hash_table *table)
857 {
858 	struct io_hash_bucket *bucket;
859 	struct io_kiocb *req;
860 
861 	if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
862 			 IORING_ASYNC_CANCEL_ANY))
863 		req = io_poll_file_find(ctx, cd, table, &bucket);
864 	else
865 		req = io_poll_find(ctx, false, cd, table, &bucket);
866 
867 	if (req)
868 		io_poll_cancel_req(req);
869 	if (bucket)
870 		spin_unlock(&bucket->lock);
871 	return req ? 0 : -ENOENT;
872 }
873 
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)874 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
875 		   unsigned issue_flags)
876 {
877 	int ret;
878 
879 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
880 	if (ret != -ENOENT)
881 		return ret;
882 
883 	io_ring_submit_lock(ctx, issue_flags);
884 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
885 	io_ring_submit_unlock(ctx, issue_flags);
886 	return ret;
887 }
888 
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)889 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
890 				     unsigned int flags)
891 {
892 	u32 events;
893 
894 	events = READ_ONCE(sqe->poll32_events);
895 #ifdef __BIG_ENDIAN
896 	events = swahw32(events);
897 #endif
898 	if (!(flags & IORING_POLL_ADD_MULTI))
899 		events |= EPOLLONESHOT;
900 	if (!(flags & IORING_POLL_ADD_LEVEL))
901 		events |= EPOLLET;
902 	return demangle_poll(events) |
903 		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
904 }
905 
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)906 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
907 {
908 	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
909 	u32 flags;
910 
911 	if (sqe->buf_index || sqe->splice_fd_in)
912 		return -EINVAL;
913 	flags = READ_ONCE(sqe->len);
914 	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
915 		      IORING_POLL_ADD_MULTI))
916 		return -EINVAL;
917 	/* meaningless without update */
918 	if (flags == IORING_POLL_ADD_MULTI)
919 		return -EINVAL;
920 
921 	upd->old_user_data = READ_ONCE(sqe->addr);
922 	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
923 	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
924 
925 	upd->new_user_data = READ_ONCE(sqe->off);
926 	if (!upd->update_user_data && upd->new_user_data)
927 		return -EINVAL;
928 	if (upd->update_events)
929 		upd->events = io_poll_parse_events(sqe, flags);
930 	else if (sqe->poll32_events)
931 		return -EINVAL;
932 
933 	return 0;
934 }
935 
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)936 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
937 {
938 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
939 	u32 flags;
940 
941 	if (sqe->buf_index || sqe->off || sqe->addr)
942 		return -EINVAL;
943 	flags = READ_ONCE(sqe->len);
944 	if (flags & ~IORING_POLL_ADD_MULTI)
945 		return -EINVAL;
946 	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
947 		return -EINVAL;
948 
949 	poll->events = io_poll_parse_events(sqe, flags);
950 	return 0;
951 }
952 
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)953 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
954 {
955 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
956 	struct io_poll_table ipt;
957 	int ret;
958 
959 	ipt.pt._qproc = io_poll_queue_proc;
960 
961 	/*
962 	 * If sqpoll or single issuer, there is no contention for ->uring_lock
963 	 * and we'll end up holding it in tw handlers anyway.
964 	 */
965 	if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
966 		req->flags |= REQ_F_HASH_LOCKED;
967 
968 	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
969 	if (ret > 0) {
970 		io_req_set_res(req, ipt.result_mask, 0);
971 		return IOU_OK;
972 	}
973 	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
974 }
975 
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)976 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
977 {
978 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
979 	struct io_ring_ctx *ctx = req->ctx;
980 	struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
981 	struct io_hash_bucket *bucket;
982 	struct io_kiocb *preq;
983 	int ret2, ret = 0;
984 	struct io_tw_state ts = { .locked = true };
985 
986 	io_ring_submit_lock(ctx, issue_flags);
987 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
988 	ret2 = io_poll_disarm(preq);
989 	if (bucket)
990 		spin_unlock(&bucket->lock);
991 	if (!ret2)
992 		goto found;
993 	if (ret2 != -ENOENT) {
994 		ret = ret2;
995 		goto out;
996 	}
997 
998 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
999 	ret2 = io_poll_disarm(preq);
1000 	if (bucket)
1001 		spin_unlock(&bucket->lock);
1002 	if (ret2) {
1003 		ret = ret2;
1004 		goto out;
1005 	}
1006 
1007 found:
1008 	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1009 		ret = -EFAULT;
1010 		goto out;
1011 	}
1012 
1013 	if (poll_update->update_events || poll_update->update_user_data) {
1014 		/* only mask one event flags, keep behavior flags */
1015 		if (poll_update->update_events) {
1016 			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1017 
1018 			poll->events &= ~0xffff;
1019 			poll->events |= poll_update->events & 0xffff;
1020 			poll->events |= IO_POLL_UNMASK;
1021 		}
1022 		if (poll_update->update_user_data)
1023 			preq->cqe.user_data = poll_update->new_user_data;
1024 
1025 		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1026 		/* successfully updated, don't complete poll request */
1027 		if (!ret2 || ret2 == -EIOCBQUEUED)
1028 			goto out;
1029 	}
1030 
1031 	req_set_fail(preq);
1032 	io_req_set_res(preq, -ECANCELED, 0);
1033 	io_req_task_complete(preq, &ts);
1034 out:
1035 	io_ring_submit_unlock(ctx, issue_flags);
1036 	if (ret < 0) {
1037 		req_set_fail(req);
1038 		return ret;
1039 	}
1040 	/* complete update request, we're done with it */
1041 	io_req_set_res(req, ret, 0);
1042 	return IOU_OK;
1043 }
1044 
io_apoll_cache_free(struct io_cache_entry * entry)1045 void io_apoll_cache_free(struct io_cache_entry *entry)
1046 {
1047 	kfree(container_of(entry, struct async_poll, cache));
1048 }
1049