1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "io_uring.h"
15 #include "opdef.h"
16 #include "kbuf.h"
17 
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19 
20 #define BGID_ARRAY	64
21 
22 /* BIDs are addressed by a 16-bit field in a CQE */
23 #define MAX_BIDS_PER_BGID (1 << 16)
24 
25 struct io_provide_buf {
26 	struct file			*file;
27 	__u64				addr;
28 	__u32				len;
29 	__u32				bgid;
30 	__u32				nbufs;
31 	__u16				bid;
32 };
33 
__io_buffer_get_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)34 static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
35 						   struct io_buffer_list *bl,
36 						   unsigned int bgid)
37 {
38 	if (bl && bgid < BGID_ARRAY)
39 		return &bl[bgid];
40 
41 	return xa_load(&ctx->io_bl_xa, bgid);
42 }
43 
44 struct io_buf_free {
45 	struct hlist_node		list;
46 	void				*mem;
47 	size_t				size;
48 	int				inuse;
49 };
50 
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)51 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
52 							unsigned int bgid)
53 {
54 	lockdep_assert_held(&ctx->uring_lock);
55 
56 	return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
57 }
58 
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)59 static int io_buffer_add_list(struct io_ring_ctx *ctx,
60 			      struct io_buffer_list *bl, unsigned int bgid)
61 {
62 	/*
63 	 * Store buffer group ID and finally mark the list as visible.
64 	 * The normal lookup doesn't care about the visibility as we're
65 	 * always under the ->uring_lock, but the RCU lookup from mmap does.
66 	 */
67 	bl->bgid = bgid;
68 	smp_store_release(&bl->is_ready, 1);
69 
70 	if (bgid < BGID_ARRAY)
71 		return 0;
72 
73 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
74 }
75 
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)76 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
77 {
78 	struct io_ring_ctx *ctx = req->ctx;
79 	struct io_buffer_list *bl;
80 	struct io_buffer *buf;
81 
82 	/*
83 	 * For legacy provided buffer mode, don't recycle if we already did
84 	 * IO to this buffer. For ring-mapped provided buffer mode, we should
85 	 * increment ring->head to explicitly monopolize the buffer to avoid
86 	 * multiple use.
87 	 */
88 	if (req->flags & REQ_F_PARTIAL_IO)
89 		return;
90 
91 	io_ring_submit_lock(ctx, issue_flags);
92 
93 	buf = req->kbuf;
94 	bl = io_buffer_get_list(ctx, buf->bgid);
95 	list_add(&buf->list, &bl->buf_list);
96 	req->flags &= ~REQ_F_BUFFER_SELECTED;
97 	req->buf_index = buf->bgid;
98 
99 	io_ring_submit_unlock(ctx, issue_flags);
100 	return;
101 }
102 
__io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)103 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
104 {
105 	unsigned int cflags;
106 
107 	/*
108 	 * We can add this buffer back to two lists:
109 	 *
110 	 * 1) The io_buffers_cache list. This one is protected by the
111 	 *    ctx->uring_lock. If we already hold this lock, add back to this
112 	 *    list as we can grab it from issue as well.
113 	 * 2) The io_buffers_comp list. This one is protected by the
114 	 *    ctx->completion_lock.
115 	 *
116 	 * We migrate buffers from the comp_list to the issue cache list
117 	 * when we need one.
118 	 */
119 	if (req->flags & REQ_F_BUFFER_RING) {
120 		/* no buffers to recycle for this case */
121 		cflags = __io_put_kbuf_list(req, NULL);
122 	} else if (issue_flags & IO_URING_F_UNLOCKED) {
123 		struct io_ring_ctx *ctx = req->ctx;
124 
125 		spin_lock(&ctx->completion_lock);
126 		cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
127 		spin_unlock(&ctx->completion_lock);
128 	} else {
129 		lockdep_assert_held(&req->ctx->uring_lock);
130 
131 		cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
132 	}
133 	return cflags;
134 }
135 
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)136 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
137 					      struct io_buffer_list *bl)
138 {
139 	if (!list_empty(&bl->buf_list)) {
140 		struct io_buffer *kbuf;
141 
142 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
143 		list_del(&kbuf->list);
144 		if (*len == 0 || *len > kbuf->len)
145 			*len = kbuf->len;
146 		req->flags |= REQ_F_BUFFER_SELECTED;
147 		req->kbuf = kbuf;
148 		req->buf_index = kbuf->bid;
149 		return u64_to_user_ptr(kbuf->addr);
150 	}
151 	return NULL;
152 }
153 
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)154 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
155 					  struct io_buffer_list *bl,
156 					  unsigned int issue_flags)
157 {
158 	struct io_uring_buf_ring *br = bl->buf_ring;
159 	struct io_uring_buf *buf;
160 	__u16 head = bl->head;
161 
162 	if (unlikely(smp_load_acquire(&br->tail) == head))
163 		return NULL;
164 
165 	head &= bl->mask;
166 	/* mmaped buffers are always contig */
167 	if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
168 		buf = &br->bufs[head];
169 	} else {
170 		int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
171 		int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
172 		buf = page_address(bl->buf_pages[index]);
173 		buf += off;
174 	}
175 	if (*len == 0 || *len > buf->len)
176 		*len = buf->len;
177 	req->flags |= REQ_F_BUFFER_RING;
178 	req->buf_list = bl;
179 	req->buf_index = buf->bid;
180 
181 	if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
182 		/*
183 		 * If we came in unlocked, we have no choice but to consume the
184 		 * buffer here, otherwise nothing ensures that the buffer won't
185 		 * get used by others. This does mean it'll be pinned until the
186 		 * IO completes, coming in unlocked means we're being called from
187 		 * io-wq context and there may be further retries in async hybrid
188 		 * mode. For the locked case, the caller must call commit when
189 		 * the transfer completes (or if we get -EAGAIN and must poll of
190 		 * retry).
191 		 */
192 		req->buf_list = NULL;
193 		bl->head++;
194 	}
195 	return u64_to_user_ptr(buf->addr);
196 }
197 
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)198 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
199 			      unsigned int issue_flags)
200 {
201 	struct io_ring_ctx *ctx = req->ctx;
202 	struct io_buffer_list *bl;
203 	void __user *ret = NULL;
204 
205 	io_ring_submit_lock(req->ctx, issue_flags);
206 
207 	bl = io_buffer_get_list(ctx, req->buf_index);
208 	if (likely(bl)) {
209 		if (bl->is_mapped)
210 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
211 		else
212 			ret = io_provided_buffer_select(req, len, bl);
213 	}
214 	io_ring_submit_unlock(req->ctx, issue_flags);
215 	return ret;
216 }
217 
io_init_bl_list(struct io_ring_ctx * ctx)218 static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
219 {
220 	struct io_buffer_list *bl;
221 	int i;
222 
223 	bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
224 	if (!bl)
225 		return -ENOMEM;
226 
227 	for (i = 0; i < BGID_ARRAY; i++) {
228 		INIT_LIST_HEAD(&bl[i].buf_list);
229 		bl[i].bgid = i;
230 	}
231 
232 	smp_store_release(&ctx->io_bl, bl);
233 	return 0;
234 }
235 
236 /*
237  * Mark the given mapped range as free for reuse
238  */
io_kbuf_mark_free(struct io_ring_ctx * ctx,struct io_buffer_list * bl)239 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
240 {
241 	struct io_buf_free *ibf;
242 
243 	hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
244 		if (bl->buf_ring == ibf->mem) {
245 			ibf->inuse = 0;
246 			return;
247 		}
248 	}
249 
250 	/* can't happen... */
251 	WARN_ON_ONCE(1);
252 }
253 
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)254 static int __io_remove_buffers(struct io_ring_ctx *ctx,
255 			       struct io_buffer_list *bl, unsigned nbufs)
256 {
257 	unsigned i = 0;
258 
259 	/* shouldn't happen */
260 	if (!nbufs)
261 		return 0;
262 
263 	if (bl->is_mapped) {
264 		i = bl->buf_ring->tail - bl->head;
265 		if (bl->is_mmap) {
266 			/*
267 			 * io_kbuf_list_free() will free the page(s) at
268 			 * ->release() time.
269 			 */
270 			io_kbuf_mark_free(ctx, bl);
271 			bl->buf_ring = NULL;
272 			bl->is_mmap = 0;
273 		} else if (bl->buf_nr_pages) {
274 			int j;
275 
276 			for (j = 0; j < bl->buf_nr_pages; j++)
277 				unpin_user_page(bl->buf_pages[j]);
278 			kvfree(bl->buf_pages);
279 			bl->buf_pages = NULL;
280 			bl->buf_nr_pages = 0;
281 		}
282 		/* make sure it's seen as empty */
283 		INIT_LIST_HEAD(&bl->buf_list);
284 		bl->is_mapped = 0;
285 		return i;
286 	}
287 
288 	/* protects io_buffers_cache */
289 	lockdep_assert_held(&ctx->uring_lock);
290 
291 	while (!list_empty(&bl->buf_list)) {
292 		struct io_buffer *nxt;
293 
294 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
295 		list_move(&nxt->list, &ctx->io_buffers_cache);
296 		if (++i == nbufs)
297 			return i;
298 		cond_resched();
299 	}
300 
301 	return i;
302 }
303 
io_destroy_buffers(struct io_ring_ctx * ctx)304 void io_destroy_buffers(struct io_ring_ctx *ctx)
305 {
306 	struct io_buffer_list *bl;
307 	unsigned long index;
308 	int i;
309 
310 	for (i = 0; i < BGID_ARRAY; i++) {
311 		if (!ctx->io_bl)
312 			break;
313 		__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
314 	}
315 
316 	xa_for_each(&ctx->io_bl_xa, index, bl) {
317 		xa_erase(&ctx->io_bl_xa, bl->bgid);
318 		__io_remove_buffers(ctx, bl, -1U);
319 		kfree_rcu(bl, rcu);
320 	}
321 
322 	while (!list_empty(&ctx->io_buffers_pages)) {
323 		struct page *page;
324 
325 		page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
326 		list_del_init(&page->lru);
327 		__free_page(page);
328 	}
329 }
330 
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)331 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
332 {
333 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
334 	u64 tmp;
335 
336 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
337 	    sqe->splice_fd_in)
338 		return -EINVAL;
339 
340 	tmp = READ_ONCE(sqe->fd);
341 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
342 		return -EINVAL;
343 
344 	memset(p, 0, sizeof(*p));
345 	p->nbufs = tmp;
346 	p->bgid = READ_ONCE(sqe->buf_group);
347 	return 0;
348 }
349 
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)350 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
351 {
352 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
353 	struct io_ring_ctx *ctx = req->ctx;
354 	struct io_buffer_list *bl;
355 	int ret = 0;
356 
357 	io_ring_submit_lock(ctx, issue_flags);
358 
359 	ret = -ENOENT;
360 	bl = io_buffer_get_list(ctx, p->bgid);
361 	if (bl) {
362 		ret = -EINVAL;
363 		/* can't use provide/remove buffers command on mapped buffers */
364 		if (!bl->is_mapped)
365 			ret = __io_remove_buffers(ctx, bl, p->nbufs);
366 	}
367 	io_ring_submit_unlock(ctx, issue_flags);
368 	if (ret < 0)
369 		req_set_fail(req);
370 	io_req_set_res(req, ret, 0);
371 	return IOU_OK;
372 }
373 
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)374 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
375 {
376 	unsigned long size, tmp_check;
377 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
378 	u64 tmp;
379 
380 	if (sqe->rw_flags || sqe->splice_fd_in)
381 		return -EINVAL;
382 
383 	tmp = READ_ONCE(sqe->fd);
384 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
385 		return -E2BIG;
386 	p->nbufs = tmp;
387 	p->addr = READ_ONCE(sqe->addr);
388 	p->len = READ_ONCE(sqe->len);
389 
390 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
391 				&size))
392 		return -EOVERFLOW;
393 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
394 		return -EOVERFLOW;
395 
396 	size = (unsigned long)p->len * p->nbufs;
397 	if (!access_ok(u64_to_user_ptr(p->addr), size))
398 		return -EFAULT;
399 
400 	p->bgid = READ_ONCE(sqe->buf_group);
401 	tmp = READ_ONCE(sqe->off);
402 	if (tmp > USHRT_MAX)
403 		return -E2BIG;
404 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
405 		return -EINVAL;
406 	p->bid = tmp;
407 	return 0;
408 }
409 
io_refill_buffer_cache(struct io_ring_ctx * ctx)410 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
411 {
412 	struct io_buffer *buf;
413 	struct page *page;
414 	int bufs_in_page;
415 
416 	/*
417 	 * Completions that don't happen inline (eg not under uring_lock) will
418 	 * add to ->io_buffers_comp. If we don't have any free buffers, check
419 	 * the completion list and splice those entries first.
420 	 */
421 	if (!list_empty_careful(&ctx->io_buffers_comp)) {
422 		spin_lock(&ctx->completion_lock);
423 		if (!list_empty(&ctx->io_buffers_comp)) {
424 			list_splice_init(&ctx->io_buffers_comp,
425 						&ctx->io_buffers_cache);
426 			spin_unlock(&ctx->completion_lock);
427 			return 0;
428 		}
429 		spin_unlock(&ctx->completion_lock);
430 	}
431 
432 	/*
433 	 * No free buffers and no completion entries either. Allocate a new
434 	 * page worth of buffer entries and add those to our freelist.
435 	 */
436 	page = alloc_page(GFP_KERNEL_ACCOUNT);
437 	if (!page)
438 		return -ENOMEM;
439 
440 	list_add(&page->lru, &ctx->io_buffers_pages);
441 
442 	buf = page_address(page);
443 	bufs_in_page = PAGE_SIZE / sizeof(*buf);
444 	while (bufs_in_page) {
445 		list_add_tail(&buf->list, &ctx->io_buffers_cache);
446 		buf++;
447 		bufs_in_page--;
448 	}
449 
450 	return 0;
451 }
452 
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)453 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
454 			  struct io_buffer_list *bl)
455 {
456 	struct io_buffer *buf;
457 	u64 addr = pbuf->addr;
458 	int i, bid = pbuf->bid;
459 
460 	for (i = 0; i < pbuf->nbufs; i++) {
461 		if (list_empty(&ctx->io_buffers_cache) &&
462 		    io_refill_buffer_cache(ctx))
463 			break;
464 		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
465 					list);
466 		list_move_tail(&buf->list, &bl->buf_list);
467 		buf->addr = addr;
468 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
469 		buf->bid = bid;
470 		buf->bgid = pbuf->bgid;
471 		addr += pbuf->len;
472 		bid++;
473 		cond_resched();
474 	}
475 
476 	return i ? 0 : -ENOMEM;
477 }
478 
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)479 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
480 {
481 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
482 	struct io_ring_ctx *ctx = req->ctx;
483 	struct io_buffer_list *bl;
484 	int ret = 0;
485 
486 	io_ring_submit_lock(ctx, issue_flags);
487 
488 	if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
489 		ret = io_init_bl_list(ctx);
490 		if (ret)
491 			goto err;
492 	}
493 
494 	bl = io_buffer_get_list(ctx, p->bgid);
495 	if (unlikely(!bl)) {
496 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
497 		if (!bl) {
498 			ret = -ENOMEM;
499 			goto err;
500 		}
501 		INIT_LIST_HEAD(&bl->buf_list);
502 		ret = io_buffer_add_list(ctx, bl, p->bgid);
503 		if (ret) {
504 			/*
505 			 * Doesn't need rcu free as it was never visible, but
506 			 * let's keep it consistent throughout. Also can't
507 			 * be a lower indexed array group, as adding one
508 			 * where lookup failed cannot happen.
509 			 */
510 			if (p->bgid >= BGID_ARRAY)
511 				kfree_rcu(bl, rcu);
512 			else
513 				WARN_ON_ONCE(1);
514 			goto err;
515 		}
516 	}
517 	/* can't add buffers via this command for a mapped buffer ring */
518 	if (bl->is_mapped) {
519 		ret = -EINVAL;
520 		goto err;
521 	}
522 
523 	ret = io_add_buffers(ctx, p, bl);
524 err:
525 	io_ring_submit_unlock(ctx, issue_flags);
526 
527 	if (ret < 0)
528 		req_set_fail(req);
529 	io_req_set_res(req, ret, 0);
530 	return IOU_OK;
531 }
532 
io_pin_pbuf_ring(struct io_uring_buf_reg * reg,struct io_buffer_list * bl)533 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
534 			    struct io_buffer_list *bl)
535 {
536 	struct io_uring_buf_ring *br;
537 	struct page **pages;
538 	int i, nr_pages;
539 
540 	pages = io_pin_pages(reg->ring_addr,
541 			     flex_array_size(br, bufs, reg->ring_entries),
542 			     &nr_pages);
543 	if (IS_ERR(pages))
544 		return PTR_ERR(pages);
545 
546 	/*
547 	 * Apparently some 32-bit boxes (ARM) will return highmem pages,
548 	 * which then need to be mapped. We could support that, but it'd
549 	 * complicate the code and slowdown the common cases quite a bit.
550 	 * So just error out, returning -EINVAL just like we did on kernels
551 	 * that didn't support mapped buffer rings.
552 	 */
553 	for (i = 0; i < nr_pages; i++)
554 		if (PageHighMem(pages[i]))
555 			goto error_unpin;
556 
557 	br = page_address(pages[0]);
558 #ifdef SHM_COLOUR
559 	/*
560 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
561 	 * is set and we must guarantee that the kernel and user side align
562 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
563 	 * the application mmap's the provided ring buffer. Fail the request
564 	 * if we, by chance, don't end up with aligned addresses. The app
565 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
566 	 * this transparently.
567 	 */
568 	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
569 		goto error_unpin;
570 #endif
571 	bl->buf_pages = pages;
572 	bl->buf_nr_pages = nr_pages;
573 	bl->buf_ring = br;
574 	bl->is_mapped = 1;
575 	bl->is_mmap = 0;
576 	return 0;
577 error_unpin:
578 	for (i = 0; i < nr_pages; i++)
579 		unpin_user_page(pages[i]);
580 	kvfree(pages);
581 	return -EINVAL;
582 }
583 
584 /*
585  * See if we have a suitable region that we can reuse, rather than allocate
586  * both a new io_buf_free and mem region again. We leave it on the list as
587  * even a reused entry will need freeing at ring release.
588  */
io_lookup_buf_free_entry(struct io_ring_ctx * ctx,size_t ring_size)589 static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
590 						    size_t ring_size)
591 {
592 	struct io_buf_free *ibf, *best = NULL;
593 	size_t best_dist;
594 
595 	hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
596 		size_t dist;
597 
598 		if (ibf->inuse || ibf->size < ring_size)
599 			continue;
600 		dist = ibf->size - ring_size;
601 		if (!best || dist < best_dist) {
602 			best = ibf;
603 			if (!dist)
604 				break;
605 			best_dist = dist;
606 		}
607 	}
608 
609 	return best;
610 }
611 
io_alloc_pbuf_ring(struct io_ring_ctx * ctx,struct io_uring_buf_reg * reg,struct io_buffer_list * bl)612 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
613 			      struct io_uring_buf_reg *reg,
614 			      struct io_buffer_list *bl)
615 {
616 	struct io_buf_free *ibf;
617 	size_t ring_size;
618 	void *ptr;
619 
620 	ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
621 
622 	/* Reuse existing entry, if we can */
623 	ibf = io_lookup_buf_free_entry(ctx, ring_size);
624 	if (!ibf) {
625 		ptr = io_mem_alloc(ring_size);
626 		if (IS_ERR(ptr))
627 			return PTR_ERR(ptr);
628 
629 		/* Allocate and store deferred free entry */
630 		ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
631 		if (!ibf) {
632 			io_mem_free(ptr);
633 			return -ENOMEM;
634 		}
635 		ibf->mem = ptr;
636 		ibf->size = ring_size;
637 		hlist_add_head(&ibf->list, &ctx->io_buf_list);
638 	}
639 	ibf->inuse = 1;
640 	bl->buf_ring = ibf->mem;
641 	bl->is_mapped = 1;
642 	bl->is_mmap = 1;
643 	return 0;
644 }
645 
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)646 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
647 {
648 	struct io_uring_buf_reg reg;
649 	struct io_buffer_list *bl, *free_bl = NULL;
650 	int ret;
651 
652 	lockdep_assert_held(&ctx->uring_lock);
653 
654 	if (copy_from_user(&reg, arg, sizeof(reg)))
655 		return -EFAULT;
656 
657 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
658 		return -EINVAL;
659 	if (reg.flags & ~IOU_PBUF_RING_MMAP)
660 		return -EINVAL;
661 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
662 		if (!reg.ring_addr)
663 			return -EFAULT;
664 		if (reg.ring_addr & ~PAGE_MASK)
665 			return -EINVAL;
666 	} else {
667 		if (reg.ring_addr)
668 			return -EINVAL;
669 	}
670 
671 	if (!is_power_of_2(reg.ring_entries))
672 		return -EINVAL;
673 
674 	/* cannot disambiguate full vs empty due to head/tail size */
675 	if (reg.ring_entries >= 65536)
676 		return -EINVAL;
677 
678 	if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
679 		int ret = io_init_bl_list(ctx);
680 		if (ret)
681 			return ret;
682 	}
683 
684 	bl = io_buffer_get_list(ctx, reg.bgid);
685 	if (bl) {
686 		/* if mapped buffer ring OR classic exists, don't allow */
687 		if (bl->is_mapped || !list_empty(&bl->buf_list))
688 			return -EEXIST;
689 	} else {
690 		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
691 		if (!bl)
692 			return -ENOMEM;
693 	}
694 
695 	if (!(reg.flags & IOU_PBUF_RING_MMAP))
696 		ret = io_pin_pbuf_ring(&reg, bl);
697 	else
698 		ret = io_alloc_pbuf_ring(ctx, &reg, bl);
699 
700 	if (!ret) {
701 		bl->nr_entries = reg.ring_entries;
702 		bl->mask = reg.ring_entries - 1;
703 
704 		io_buffer_add_list(ctx, bl, reg.bgid);
705 		return 0;
706 	}
707 
708 	kfree_rcu(free_bl, rcu);
709 	return ret;
710 }
711 
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)712 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
713 {
714 	struct io_uring_buf_reg reg;
715 	struct io_buffer_list *bl;
716 
717 	lockdep_assert_held(&ctx->uring_lock);
718 
719 	if (copy_from_user(&reg, arg, sizeof(reg)))
720 		return -EFAULT;
721 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
722 		return -EINVAL;
723 	if (reg.flags)
724 		return -EINVAL;
725 
726 	bl = io_buffer_get_list(ctx, reg.bgid);
727 	if (!bl)
728 		return -ENOENT;
729 	if (!bl->is_mapped)
730 		return -EINVAL;
731 
732 	__io_remove_buffers(ctx, bl, -1U);
733 	if (bl->bgid >= BGID_ARRAY) {
734 		xa_erase(&ctx->io_bl_xa, bl->bgid);
735 		kfree_rcu(bl, rcu);
736 	}
737 	return 0;
738 }
739 
io_pbuf_get_address(struct io_ring_ctx * ctx,unsigned long bgid)740 void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
741 {
742 	struct io_buffer_list *bl;
743 
744 	bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
745 
746 	if (!bl || !bl->is_mmap)
747 		return NULL;
748 	/*
749 	 * Ensure the list is fully setup. Only strictly needed for RCU lookup
750 	 * via mmap, and in that case only for the array indexed groups. For
751 	 * the xarray lookups, it's either visible and ready, or not at all.
752 	 */
753 	if (!smp_load_acquire(&bl->is_ready))
754 		return NULL;
755 
756 	return bl->buf_ring;
757 }
758 
759 /*
760  * Called at or after ->release(), free the mmap'ed buffers that we used
761  * for memory mapped provided buffer rings.
762  */
io_kbuf_mmap_list_free(struct io_ring_ctx * ctx)763 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
764 {
765 	struct io_buf_free *ibf;
766 	struct hlist_node *tmp;
767 
768 	hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
769 		hlist_del(&ibf->list);
770 		io_mem_free(ibf->mem);
771 		kfree(ibf);
772 	}
773 }
774