1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11 
12 #include <trace/events/io_uring.h>
13 
14 #include <uapi/linux/io_uring.h>
15 
16 #include "io_uring.h"
17 #include "refs.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "poll.h"
21 #include "cancel.h"
22 
23 struct io_poll_update {
24 	struct file			*file;
25 	u64				old_user_data;
26 	u64				new_user_data;
27 	__poll_t			events;
28 	bool				update_events;
29 	bool				update_user_data;
30 };
31 
32 struct io_poll_table {
33 	struct poll_table_struct pt;
34 	struct io_kiocb *req;
35 	int nr_entries;
36 	int error;
37 	bool owning;
38 	/* output value, set only if arm poll returns >0 */
39 	__poll_t result_mask;
40 };
41 
42 #define IO_POLL_CANCEL_FLAG	BIT(31)
43 #define IO_POLL_RETRY_FLAG	BIT(30)
44 #define IO_POLL_REF_MASK	GENMASK(29, 0)
45 
46 /*
47  * We usually have 1-2 refs taken, 128 is more than enough and we want to
48  * maximise the margin between this amount and the moment when it overflows.
49  */
50 #define IO_POLL_REF_BIAS	128
51 
52 #define IO_WQE_F_DOUBLE		1
53 
wqe_to_req(struct wait_queue_entry * wqe)54 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
55 {
56 	unsigned long priv = (unsigned long)wqe->private;
57 
58 	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
59 }
60 
wqe_is_double(struct wait_queue_entry * wqe)61 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
62 {
63 	unsigned long priv = (unsigned long)wqe->private;
64 
65 	return priv & IO_WQE_F_DOUBLE;
66 }
67 
io_poll_get_ownership_slowpath(struct io_kiocb * req)68 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
69 {
70 	int v;
71 
72 	/*
73 	 * poll_refs are already elevated and we don't have much hope for
74 	 * grabbing the ownership. Instead of incrementing set a retry flag
75 	 * to notify the loop that there might have been some change.
76 	 */
77 	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
78 	if (v & IO_POLL_REF_MASK)
79 		return false;
80 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
81 }
82 
83 /*
84  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
85  * bump it and acquire ownership. It's disallowed to modify requests while not
86  * owning it, that prevents from races for enqueueing task_work's and b/w
87  * arming poll and wakeups.
88  */
io_poll_get_ownership(struct io_kiocb * req)89 static inline bool io_poll_get_ownership(struct io_kiocb *req)
90 {
91 	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
92 		return io_poll_get_ownership_slowpath(req);
93 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
94 }
95 
io_poll_mark_cancelled(struct io_kiocb * req)96 static void io_poll_mark_cancelled(struct io_kiocb *req)
97 {
98 	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
99 }
100 
io_poll_get_double(struct io_kiocb * req)101 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
102 {
103 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
104 	if (req->opcode == IORING_OP_POLL_ADD)
105 		return req->async_data;
106 	return req->apoll->double_poll;
107 }
108 
io_poll_get_single(struct io_kiocb * req)109 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
110 {
111 	if (req->opcode == IORING_OP_POLL_ADD)
112 		return io_kiocb_to_cmd(req, struct io_poll);
113 	return &req->apoll->poll;
114 }
115 
io_poll_req_insert(struct io_kiocb * req)116 static void io_poll_req_insert(struct io_kiocb *req)
117 {
118 	struct io_hash_table *table = &req->ctx->cancel_table;
119 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
120 	struct io_hash_bucket *hb = &table->hbs[index];
121 
122 	spin_lock(&hb->lock);
123 	hlist_add_head(&req->hash_node, &hb->list);
124 	spin_unlock(&hb->lock);
125 }
126 
io_poll_req_delete(struct io_kiocb * req,struct io_ring_ctx * ctx)127 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
128 {
129 	struct io_hash_table *table = &req->ctx->cancel_table;
130 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
131 	spinlock_t *lock = &table->hbs[index].lock;
132 
133 	spin_lock(lock);
134 	hash_del(&req->hash_node);
135 	spin_unlock(lock);
136 }
137 
io_poll_req_insert_locked(struct io_kiocb * req)138 static void io_poll_req_insert_locked(struct io_kiocb *req)
139 {
140 	struct io_hash_table *table = &req->ctx->cancel_table_locked;
141 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
142 
143 	lockdep_assert_held(&req->ctx->uring_lock);
144 
145 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
146 }
147 
io_poll_tw_hash_eject(struct io_kiocb * req,bool * locked)148 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
149 {
150 	struct io_ring_ctx *ctx = req->ctx;
151 
152 	if (req->flags & REQ_F_HASH_LOCKED) {
153 		/*
154 		 * ->cancel_table_locked is protected by ->uring_lock in
155 		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
156 		 * already grabbed the mutex for us, but there is a chance it
157 		 * failed.
158 		 */
159 		io_tw_lock(ctx, locked);
160 		hash_del(&req->hash_node);
161 		req->flags &= ~REQ_F_HASH_LOCKED;
162 	} else {
163 		io_poll_req_delete(req, ctx);
164 	}
165 }
166 
io_init_poll_iocb(struct io_poll * poll,__poll_t events,wait_queue_func_t wake_func)167 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
168 			      wait_queue_func_t wake_func)
169 {
170 	poll->head = NULL;
171 #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
172 	/* mask in events that we always want/need */
173 	poll->events = events | IO_POLL_UNMASK;
174 	INIT_LIST_HEAD(&poll->wait.entry);
175 	init_waitqueue_func_entry(&poll->wait, wake_func);
176 }
177 
io_poll_remove_entry(struct io_poll * poll)178 static inline void io_poll_remove_entry(struct io_poll *poll)
179 {
180 	struct wait_queue_head *head = smp_load_acquire(&poll->head);
181 
182 	if (head) {
183 		spin_lock_irq(&head->lock);
184 		list_del_init(&poll->wait.entry);
185 		poll->head = NULL;
186 		spin_unlock_irq(&head->lock);
187 	}
188 }
189 
io_poll_remove_entries(struct io_kiocb * req)190 static void io_poll_remove_entries(struct io_kiocb *req)
191 {
192 	/*
193 	 * Nothing to do if neither of those flags are set. Avoid dipping
194 	 * into the poll/apoll/double cachelines if we can.
195 	 */
196 	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
197 		return;
198 
199 	/*
200 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
201 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
202 	 * lock in the first place can race with the waitqueue being freed.
203 	 *
204 	 * We solve this as eventpoll does: by taking advantage of the fact that
205 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
206 	 * we enter rcu_read_lock() and see that the pointer to the queue is
207 	 * non-NULL, we can then lock it without the memory being freed out from
208 	 * under us.
209 	 *
210 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
211 	 * case the caller deletes the entry from the queue, leaving it empty.
212 	 * In that case, only RCU prevents the queue memory from being freed.
213 	 */
214 	rcu_read_lock();
215 	if (req->flags & REQ_F_SINGLE_POLL)
216 		io_poll_remove_entry(io_poll_get_single(req));
217 	if (req->flags & REQ_F_DOUBLE_POLL)
218 		io_poll_remove_entry(io_poll_get_double(req));
219 	rcu_read_unlock();
220 }
221 
222 enum {
223 	IOU_POLL_DONE = 0,
224 	IOU_POLL_NO_ACTION = 1,
225 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
226 	IOU_POLL_REISSUE = 3,
227 };
228 
229 /*
230  * All poll tw should go through this. Checks for poll events, manages
231  * references, does rewait, etc.
232  *
233  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
234  * require, which is either spurious wakeup or multishot CQE is served.
235  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
236  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
237  * poll and that the result is stored in req->cqe.
238  */
io_poll_check_events(struct io_kiocb * req,bool * locked)239 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
240 {
241 	struct io_ring_ctx *ctx = req->ctx;
242 	int v;
243 
244 	/* req->task == current here, checking PF_EXITING is safe */
245 	if (unlikely(req->task->flags & PF_EXITING))
246 		return -ECANCELED;
247 
248 	do {
249 		v = atomic_read(&req->poll_refs);
250 
251 		/* tw handler should be the owner, and so have some references */
252 		if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
253 			return IOU_POLL_DONE;
254 		if (v & IO_POLL_CANCEL_FLAG)
255 			return -ECANCELED;
256 		/*
257 		 * cqe.res contains only events of the first wake up
258 		 * and all others are be lost. Redo vfs_poll() to get
259 		 * up to date state.
260 		 */
261 		if ((v & IO_POLL_REF_MASK) != 1)
262 			req->cqe.res = 0;
263 		if (v & IO_POLL_RETRY_FLAG) {
264 			req->cqe.res = 0;
265 			/*
266 			 * We won't find new events that came in between
267 			 * vfs_poll and the ref put unless we clear the flag
268 			 * in advance.
269 			 */
270 			atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
271 			v &= ~IO_POLL_RETRY_FLAG;
272 		}
273 
274 		/* the mask was stashed in __io_poll_execute */
275 		if (!req->cqe.res) {
276 			struct poll_table_struct pt = { ._key = req->apoll_events };
277 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
278 			/*
279 			 * We got woken with a mask, but someone else got to
280 			 * it first. The above vfs_poll() doesn't add us back
281 			 * to the waitqueue, so if we get nothing back, we
282 			 * should be safe and attempt a reissue.
283 			 */
284 			if (unlikely(!req->cqe.res)) {
285 				/* Multishot armed need not reissue */
286 				if (!(req->apoll_events & EPOLLONESHOT))
287 					continue;
288 				return IOU_POLL_REISSUE;
289 			}
290 		}
291 		if (req->apoll_events & EPOLLONESHOT)
292 			return IOU_POLL_DONE;
293 		if (io_is_uring_fops(req->file))
294 			return IOU_POLL_DONE;
295 
296 		/* multishot, just fill a CQE and proceed */
297 		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
298 			__poll_t mask = mangle_poll(req->cqe.res &
299 						    req->apoll_events);
300 
301 			if (!io_post_aux_cqe(ctx, req->cqe.user_data,
302 					     mask, IORING_CQE_F_MORE, false)) {
303 				io_req_set_res(req, mask, 0);
304 				return IOU_POLL_REMOVE_POLL_USE_RES;
305 			}
306 		} else {
307 			int ret = io_poll_issue(req, locked);
308 			if (ret == IOU_STOP_MULTISHOT)
309 				return IOU_POLL_REMOVE_POLL_USE_RES;
310 			if (ret < 0)
311 				return ret;
312 		}
313 
314 		/* force the next iteration to vfs_poll() */
315 		req->cqe.res = 0;
316 
317 		/*
318 		 * Release all references, retry if someone tried to restart
319 		 * task_work while we were executing it.
320 		 */
321 	} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
322 					IO_POLL_REF_MASK);
323 
324 	return IOU_POLL_NO_ACTION;
325 }
326 
io_poll_task_func(struct io_kiocb * req,bool * locked)327 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
328 {
329 	int ret;
330 
331 	ret = io_poll_check_events(req, locked);
332 	if (ret == IOU_POLL_NO_ACTION)
333 		return;
334 
335 	if (ret == IOU_POLL_DONE) {
336 		struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
337 		req->cqe.res = mangle_poll(req->cqe.res & poll->events);
338 	} else if (ret == IOU_POLL_REISSUE) {
339 		io_poll_remove_entries(req);
340 		io_poll_tw_hash_eject(req, locked);
341 		io_req_task_submit(req, locked);
342 		return;
343 	} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
344 		req->cqe.res = ret;
345 		req_set_fail(req);
346 	}
347 
348 	io_poll_remove_entries(req);
349 	io_poll_tw_hash_eject(req, locked);
350 
351 	io_req_set_res(req, req->cqe.res, 0);
352 	io_req_task_complete(req, locked);
353 }
354 
io_apoll_task_func(struct io_kiocb * req,bool * locked)355 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
356 {
357 	int ret;
358 
359 	ret = io_poll_check_events(req, locked);
360 	if (ret == IOU_POLL_NO_ACTION)
361 		return;
362 
363 	io_poll_remove_entries(req);
364 	io_poll_tw_hash_eject(req, locked);
365 
366 	if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
367 		io_req_complete_post(req);
368 	else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
369 		io_req_task_submit(req, locked);
370 	else
371 		io_req_complete_failed(req, ret);
372 }
373 
__io_poll_execute(struct io_kiocb * req,int mask)374 static void __io_poll_execute(struct io_kiocb *req, int mask)
375 {
376 	io_req_set_res(req, mask, 0);
377 	/*
378 	 * This is useful for poll that is armed on behalf of another
379 	 * request, and where the wakeup path could be on a different
380 	 * CPU. We want to avoid pulling in req->apoll->events for that
381 	 * case.
382 	 */
383 	if (req->opcode == IORING_OP_POLL_ADD)
384 		req->io_task_work.func = io_poll_task_func;
385 	else
386 		req->io_task_work.func = io_apoll_task_func;
387 
388 	trace_io_uring_task_add(req, mask);
389 	io_req_task_work_add(req);
390 }
391 
io_poll_execute(struct io_kiocb * req,int res)392 static inline void io_poll_execute(struct io_kiocb *req, int res)
393 {
394 	if (io_poll_get_ownership(req))
395 		__io_poll_execute(req, res);
396 }
397 
io_poll_cancel_req(struct io_kiocb * req)398 static void io_poll_cancel_req(struct io_kiocb *req)
399 {
400 	io_poll_mark_cancelled(req);
401 	/* kick tw, which should complete the request */
402 	io_poll_execute(req, 0);
403 }
404 
405 #define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
406 
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)407 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
408 {
409 	io_poll_mark_cancelled(req);
410 	/* we have to kick tw in case it's not already */
411 	io_poll_execute(req, 0);
412 
413 	/*
414 	 * If the waitqueue is being freed early but someone is already
415 	 * holds ownership over it, we have to tear down the request as
416 	 * best we can. That means immediately removing the request from
417 	 * its waitqueue and preventing all further accesses to the
418 	 * waitqueue via the request.
419 	 */
420 	list_del_init(&poll->wait.entry);
421 
422 	/*
423 	 * Careful: this *must* be the last step, since as soon
424 	 * as req->head is NULL'ed out, the request can be
425 	 * completed and freed, since aio_poll_complete_work()
426 	 * will no longer need to take the waitqueue lock.
427 	 */
428 	smp_store_release(&poll->head, NULL);
429 	return 1;
430 }
431 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)432 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
433 			void *key)
434 {
435 	struct io_kiocb *req = wqe_to_req(wait);
436 	struct io_poll *poll = container_of(wait, struct io_poll, wait);
437 	__poll_t mask = key_to_poll(key);
438 
439 	if (unlikely(mask & POLLFREE))
440 		return io_pollfree_wake(req, poll);
441 
442 	/* for instances that support it check for an event match first */
443 	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
444 		return 0;
445 
446 	if (io_poll_get_ownership(req)) {
447 		/*
448 		 * If we trigger a multishot poll off our own wakeup path,
449 		 * disable multishot as there is a circular dependency between
450 		 * CQ posting and triggering the event.
451 		 */
452 		if (mask & EPOLL_URING_WAKE)
453 			poll->events |= EPOLLONESHOT;
454 
455 		/* optional, saves extra locking for removal in tw handler */
456 		if (mask && poll->events & EPOLLONESHOT) {
457 			list_del_init(&poll->wait.entry);
458 			poll->head = NULL;
459 			if (wqe_is_double(wait))
460 				req->flags &= ~REQ_F_DOUBLE_POLL;
461 			else
462 				req->flags &= ~REQ_F_SINGLE_POLL;
463 		}
464 		__io_poll_execute(req, mask);
465 	}
466 	return 1;
467 }
468 
469 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)470 static bool io_poll_double_prepare(struct io_kiocb *req)
471 {
472 	struct wait_queue_head *head;
473 	struct io_poll *poll = io_poll_get_single(req);
474 
475 	/* head is RCU protected, see io_poll_remove_entries() comments */
476 	rcu_read_lock();
477 	head = smp_load_acquire(&poll->head);
478 	/*
479 	 * poll arm might not hold ownership and so race for req->flags with
480 	 * io_poll_wake(). There is only one poll entry queued, serialise with
481 	 * it by taking its head lock. As we're still arming the tw hanlder
482 	 * is not going to be run, so there are no races with it.
483 	 */
484 	if (head) {
485 		spin_lock_irq(&head->lock);
486 		req->flags |= REQ_F_DOUBLE_POLL;
487 		if (req->opcode == IORING_OP_POLL_ADD)
488 			req->flags |= REQ_F_ASYNC_DATA;
489 		spin_unlock_irq(&head->lock);
490 	}
491 	rcu_read_unlock();
492 	return !!head;
493 }
494 
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)495 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
496 			    struct wait_queue_head *head,
497 			    struct io_poll **poll_ptr)
498 {
499 	struct io_kiocb *req = pt->req;
500 	unsigned long wqe_private = (unsigned long) req;
501 
502 	/*
503 	 * The file being polled uses multiple waitqueues for poll handling
504 	 * (e.g. one for read, one for write). Setup a separate io_poll
505 	 * if this happens.
506 	 */
507 	if (unlikely(pt->nr_entries)) {
508 		struct io_poll *first = poll;
509 
510 		/* double add on the same waitqueue head, ignore */
511 		if (first->head == head)
512 			return;
513 		/* already have a 2nd entry, fail a third attempt */
514 		if (*poll_ptr) {
515 			if ((*poll_ptr)->head == head)
516 				return;
517 			pt->error = -EINVAL;
518 			return;
519 		}
520 
521 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
522 		if (!poll) {
523 			pt->error = -ENOMEM;
524 			return;
525 		}
526 
527 		/* mark as double wq entry */
528 		wqe_private |= IO_WQE_F_DOUBLE;
529 		io_init_poll_iocb(poll, first->events, first->wait.func);
530 		if (!io_poll_double_prepare(req)) {
531 			/* the request is completing, just back off */
532 			kfree(poll);
533 			return;
534 		}
535 		*poll_ptr = poll;
536 	} else {
537 		/* fine to modify, there is no poll queued to race with us */
538 		req->flags |= REQ_F_SINGLE_POLL;
539 	}
540 
541 	pt->nr_entries++;
542 	poll->head = head;
543 	poll->wait.private = (void *) wqe_private;
544 
545 	if (poll->events & EPOLLEXCLUSIVE)
546 		add_wait_queue_exclusive(head, &poll->wait);
547 	else
548 		add_wait_queue(head, &poll->wait);
549 }
550 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)551 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
552 			       struct poll_table_struct *p)
553 {
554 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
555 	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
556 
557 	__io_queue_proc(poll, pt, head,
558 			(struct io_poll **) &pt->req->async_data);
559 }
560 
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)561 static bool io_poll_can_finish_inline(struct io_kiocb *req,
562 				      struct io_poll_table *pt)
563 {
564 	return pt->owning || io_poll_get_ownership(req);
565 }
566 
io_poll_add_hash(struct io_kiocb * req)567 static void io_poll_add_hash(struct io_kiocb *req)
568 {
569 	if (req->flags & REQ_F_HASH_LOCKED)
570 		io_poll_req_insert_locked(req);
571 	else
572 		io_poll_req_insert(req);
573 }
574 
575 /*
576  * Returns 0 when it's handed over for polling. The caller owns the requests if
577  * it returns non-zero, but otherwise should not touch it. Negative values
578  * contain an error code. When the result is >0, the polling has completed
579  * inline and ipt.result_mask is set to the mask.
580  */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)581 static int __io_arm_poll_handler(struct io_kiocb *req,
582 				 struct io_poll *poll,
583 				 struct io_poll_table *ipt, __poll_t mask,
584 				 unsigned issue_flags)
585 {
586 	struct io_ring_ctx *ctx = req->ctx;
587 
588 	INIT_HLIST_NODE(&req->hash_node);
589 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
590 	io_init_poll_iocb(poll, mask, io_poll_wake);
591 	poll->file = req->file;
592 	req->apoll_events = poll->events;
593 
594 	ipt->pt._key = mask;
595 	ipt->req = req;
596 	ipt->error = 0;
597 	ipt->nr_entries = 0;
598 	/*
599 	 * Polling is either completed here or via task_work, so if we're in the
600 	 * task context we're naturally serialised with tw by merit of running
601 	 * the same task. When it's io-wq, take the ownership to prevent tw
602 	 * from running. However, when we're in the task context, skip taking
603 	 * it as an optimisation.
604 	 *
605 	 * Note: even though the request won't be completed/freed, without
606 	 * ownership we still can race with io_poll_wake().
607 	 * io_poll_can_finish_inline() tries to deal with that.
608 	 */
609 	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
610 	atomic_set(&req->poll_refs, (int)ipt->owning);
611 
612 	/* io-wq doesn't hold uring_lock */
613 	if (issue_flags & IO_URING_F_UNLOCKED)
614 		req->flags &= ~REQ_F_HASH_LOCKED;
615 
616 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
617 
618 	if (unlikely(ipt->error || !ipt->nr_entries)) {
619 		io_poll_remove_entries(req);
620 
621 		if (!io_poll_can_finish_inline(req, ipt)) {
622 			io_poll_mark_cancelled(req);
623 			return 0;
624 		} else if (mask && (poll->events & EPOLLET)) {
625 			ipt->result_mask = mask;
626 			return 1;
627 		}
628 		return ipt->error ?: -EINVAL;
629 	}
630 
631 	if (mask &&
632 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
633 		if (!io_poll_can_finish_inline(req, ipt)) {
634 			io_poll_add_hash(req);
635 			return 0;
636 		}
637 		io_poll_remove_entries(req);
638 		ipt->result_mask = mask;
639 		/* no one else has access to the req, forget about the ref */
640 		return 1;
641 	}
642 
643 	io_poll_add_hash(req);
644 
645 	if (mask && (poll->events & EPOLLET) &&
646 	    io_poll_can_finish_inline(req, ipt)) {
647 		__io_poll_execute(req, mask);
648 		return 0;
649 	}
650 
651 	if (ipt->owning) {
652 		/*
653 		 * Try to release ownership. If we see a change of state, e.g.
654 		 * poll was waken up, queue up a tw, it'll deal with it.
655 		 */
656 		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
657 			__io_poll_execute(req, 0);
658 	}
659 	return 0;
660 }
661 
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)662 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
663 			       struct poll_table_struct *p)
664 {
665 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
666 	struct async_poll *apoll = pt->req->apoll;
667 
668 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
669 }
670 
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)671 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
672 					     unsigned issue_flags)
673 {
674 	struct io_ring_ctx *ctx = req->ctx;
675 	struct io_cache_entry *entry;
676 	struct async_poll *apoll;
677 
678 	if (req->flags & REQ_F_POLLED) {
679 		apoll = req->apoll;
680 		kfree(apoll->double_poll);
681 	} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
682 		   (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
683 		apoll = container_of(entry, struct async_poll, cache);
684 	} else {
685 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
686 		if (unlikely(!apoll))
687 			return NULL;
688 	}
689 	apoll->double_poll = NULL;
690 	req->apoll = apoll;
691 	return apoll;
692 }
693 
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)694 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
695 {
696 	const struct io_op_def *def = &io_op_defs[req->opcode];
697 	struct async_poll *apoll;
698 	struct io_poll_table ipt;
699 	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
700 	int ret;
701 
702 	/*
703 	 * apoll requests already grab the mutex to complete in the tw handler,
704 	 * so removal from the mutex-backed hash is free, use it by default.
705 	 */
706 	req->flags |= REQ_F_HASH_LOCKED;
707 
708 	if (!def->pollin && !def->pollout)
709 		return IO_APOLL_ABORTED;
710 	if (!file_can_poll(req->file))
711 		return IO_APOLL_ABORTED;
712 	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
713 		return IO_APOLL_ABORTED;
714 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
715 		mask |= EPOLLONESHOT;
716 
717 	if (def->pollin) {
718 		mask |= EPOLLIN | EPOLLRDNORM;
719 
720 		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
721 		if (req->flags & REQ_F_CLEAR_POLLIN)
722 			mask &= ~EPOLLIN;
723 	} else {
724 		mask |= EPOLLOUT | EPOLLWRNORM;
725 	}
726 	if (def->poll_exclusive)
727 		mask |= EPOLLEXCLUSIVE;
728 
729 	apoll = io_req_alloc_apoll(req, issue_flags);
730 	if (!apoll)
731 		return IO_APOLL_ABORTED;
732 	req->flags |= REQ_F_POLLED;
733 	ipt.pt._qproc = io_async_queue_proc;
734 
735 	io_kbuf_recycle(req, issue_flags);
736 
737 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
738 	if (ret)
739 		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
740 	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
741 	return IO_APOLL_OK;
742 }
743 
io_poll_remove_all_table(struct task_struct * tsk,struct io_hash_table * table,bool cancel_all)744 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
745 					    struct io_hash_table *table,
746 					    bool cancel_all)
747 {
748 	unsigned nr_buckets = 1U << table->hash_bits;
749 	struct hlist_node *tmp;
750 	struct io_kiocb *req;
751 	bool found = false;
752 	int i;
753 
754 	for (i = 0; i < nr_buckets; i++) {
755 		struct io_hash_bucket *hb = &table->hbs[i];
756 
757 		spin_lock(&hb->lock);
758 		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
759 			if (io_match_task_safe(req, tsk, cancel_all)) {
760 				hlist_del_init(&req->hash_node);
761 				io_poll_cancel_req(req);
762 				found = true;
763 			}
764 		}
765 		spin_unlock(&hb->lock);
766 	}
767 	return found;
768 }
769 
770 /*
771  * Returns true if we found and killed one or more poll requests
772  */
io_poll_remove_all(struct io_ring_ctx * ctx,struct task_struct * tsk,bool cancel_all)773 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
774 			       bool cancel_all)
775 	__must_hold(&ctx->uring_lock)
776 {
777 	bool ret;
778 
779 	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
780 	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
781 	return ret;
782 }
783 
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)784 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
785 				     struct io_cancel_data *cd,
786 				     struct io_hash_table *table,
787 				     struct io_hash_bucket **out_bucket)
788 {
789 	struct io_kiocb *req;
790 	u32 index = hash_long(cd->data, table->hash_bits);
791 	struct io_hash_bucket *hb = &table->hbs[index];
792 
793 	*out_bucket = NULL;
794 
795 	spin_lock(&hb->lock);
796 	hlist_for_each_entry(req, &hb->list, hash_node) {
797 		if (cd->data != req->cqe.user_data)
798 			continue;
799 		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
800 			continue;
801 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
802 			if (cd->seq == req->work.cancel_seq)
803 				continue;
804 			req->work.cancel_seq = cd->seq;
805 		}
806 		*out_bucket = hb;
807 		return req;
808 	}
809 	spin_unlock(&hb->lock);
810 	return NULL;
811 }
812 
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)813 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
814 					  struct io_cancel_data *cd,
815 					  struct io_hash_table *table,
816 					  struct io_hash_bucket **out_bucket)
817 {
818 	unsigned nr_buckets = 1U << table->hash_bits;
819 	struct io_kiocb *req;
820 	int i;
821 
822 	*out_bucket = NULL;
823 
824 	for (i = 0; i < nr_buckets; i++) {
825 		struct io_hash_bucket *hb = &table->hbs[i];
826 
827 		spin_lock(&hb->lock);
828 		hlist_for_each_entry(req, &hb->list, hash_node) {
829 			if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
830 			    req->file != cd->file)
831 				continue;
832 			if (cd->seq == req->work.cancel_seq)
833 				continue;
834 			req->work.cancel_seq = cd->seq;
835 			*out_bucket = hb;
836 			return req;
837 		}
838 		spin_unlock(&hb->lock);
839 	}
840 	return NULL;
841 }
842 
io_poll_disarm(struct io_kiocb * req)843 static int io_poll_disarm(struct io_kiocb *req)
844 {
845 	if (!req)
846 		return -ENOENT;
847 	if (!io_poll_get_ownership(req))
848 		return -EALREADY;
849 	io_poll_remove_entries(req);
850 	hash_del(&req->hash_node);
851 	return 0;
852 }
853 
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table)854 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
855 			    struct io_hash_table *table)
856 {
857 	struct io_hash_bucket *bucket;
858 	struct io_kiocb *req;
859 
860 	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
861 		req = io_poll_file_find(ctx, cd, table, &bucket);
862 	else
863 		req = io_poll_find(ctx, false, cd, table, &bucket);
864 
865 	if (req)
866 		io_poll_cancel_req(req);
867 	if (bucket)
868 		spin_unlock(&bucket->lock);
869 	return req ? 0 : -ENOENT;
870 }
871 
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)872 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
873 		   unsigned issue_flags)
874 {
875 	int ret;
876 
877 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
878 	if (ret != -ENOENT)
879 		return ret;
880 
881 	io_ring_submit_lock(ctx, issue_flags);
882 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
883 	io_ring_submit_unlock(ctx, issue_flags);
884 	return ret;
885 }
886 
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)887 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
888 				     unsigned int flags)
889 {
890 	u32 events;
891 
892 	events = READ_ONCE(sqe->poll32_events);
893 #ifdef __BIG_ENDIAN
894 	events = swahw32(events);
895 #endif
896 	if (!(flags & IORING_POLL_ADD_MULTI))
897 		events |= EPOLLONESHOT;
898 	if (!(flags & IORING_POLL_ADD_LEVEL))
899 		events |= EPOLLET;
900 	return demangle_poll(events) |
901 		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
902 }
903 
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)904 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
905 {
906 	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
907 	u32 flags;
908 
909 	if (sqe->buf_index || sqe->splice_fd_in)
910 		return -EINVAL;
911 	flags = READ_ONCE(sqe->len);
912 	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
913 		      IORING_POLL_ADD_MULTI))
914 		return -EINVAL;
915 	/* meaningless without update */
916 	if (flags == IORING_POLL_ADD_MULTI)
917 		return -EINVAL;
918 
919 	upd->old_user_data = READ_ONCE(sqe->addr);
920 	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
921 	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
922 
923 	upd->new_user_data = READ_ONCE(sqe->off);
924 	if (!upd->update_user_data && upd->new_user_data)
925 		return -EINVAL;
926 	if (upd->update_events)
927 		upd->events = io_poll_parse_events(sqe, flags);
928 	else if (sqe->poll32_events)
929 		return -EINVAL;
930 
931 	return 0;
932 }
933 
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)934 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
935 {
936 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
937 	u32 flags;
938 
939 	if (sqe->buf_index || sqe->off || sqe->addr)
940 		return -EINVAL;
941 	flags = READ_ONCE(sqe->len);
942 	if (flags & ~IORING_POLL_ADD_MULTI)
943 		return -EINVAL;
944 	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
945 		return -EINVAL;
946 
947 	poll->events = io_poll_parse_events(sqe, flags);
948 	return 0;
949 }
950 
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)951 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
952 {
953 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
954 	struct io_poll_table ipt;
955 	int ret;
956 
957 	ipt.pt._qproc = io_poll_queue_proc;
958 
959 	/*
960 	 * If sqpoll or single issuer, there is no contention for ->uring_lock
961 	 * and we'll end up holding it in tw handlers anyway.
962 	 */
963 	if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
964 		req->flags |= REQ_F_HASH_LOCKED;
965 
966 	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
967 	if (ret > 0) {
968 		io_req_set_res(req, ipt.result_mask, 0);
969 		return IOU_OK;
970 	}
971 	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
972 }
973 
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)974 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
975 {
976 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
977 	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
978 	struct io_ring_ctx *ctx = req->ctx;
979 	struct io_hash_bucket *bucket;
980 	struct io_kiocb *preq;
981 	int ret2, ret = 0;
982 	bool locked;
983 
984 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
985 	ret2 = io_poll_disarm(preq);
986 	if (bucket)
987 		spin_unlock(&bucket->lock);
988 	if (!ret2)
989 		goto found;
990 	if (ret2 != -ENOENT) {
991 		ret = ret2;
992 		goto out;
993 	}
994 
995 	io_ring_submit_lock(ctx, issue_flags);
996 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
997 	ret2 = io_poll_disarm(preq);
998 	if (bucket)
999 		spin_unlock(&bucket->lock);
1000 	io_ring_submit_unlock(ctx, issue_flags);
1001 	if (ret2) {
1002 		ret = ret2;
1003 		goto out;
1004 	}
1005 
1006 found:
1007 	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1008 		ret = -EFAULT;
1009 		goto out;
1010 	}
1011 
1012 	if (poll_update->update_events || poll_update->update_user_data) {
1013 		/* only mask one event flags, keep behavior flags */
1014 		if (poll_update->update_events) {
1015 			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1016 
1017 			poll->events &= ~0xffff;
1018 			poll->events |= poll_update->events & 0xffff;
1019 			poll->events |= IO_POLL_UNMASK;
1020 		}
1021 		if (poll_update->update_user_data)
1022 			preq->cqe.user_data = poll_update->new_user_data;
1023 
1024 		ret2 = io_poll_add(preq, issue_flags);
1025 		/* successfully updated, don't complete poll request */
1026 		if (!ret2 || ret2 == -EIOCBQUEUED)
1027 			goto out;
1028 	}
1029 
1030 	req_set_fail(preq);
1031 	io_req_set_res(preq, -ECANCELED, 0);
1032 	locked = !(issue_flags & IO_URING_F_UNLOCKED);
1033 	io_req_task_complete(preq, &locked);
1034 out:
1035 	if (ret < 0) {
1036 		req_set_fail(req);
1037 		return ret;
1038 	}
1039 	/* complete update request, we're done with it */
1040 	io_req_set_res(req, ret, 0);
1041 	return IOU_OK;
1042 }
1043 
io_apoll_cache_free(struct io_cache_entry * entry)1044 void io_apoll_cache_free(struct io_cache_entry *entry)
1045 {
1046 	kfree(container_of(entry, struct async_poll, cache));
1047 }
1048