Lines Matching refs:bl
41 struct io_buffer_list *bl, unsigned int bgid) in io_buffer_add_list() argument
43 bl->bgid = bgid; in io_buffer_add_list()
47 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); in io_buffer_add_list()
53 struct io_buffer_list *bl; in io_kbuf_recycle_legacy() local
68 bl = io_buffer_get_list(ctx, buf->bgid); in io_kbuf_recycle_legacy()
69 list_add(&buf->list, &bl->buf_list); in io_kbuf_recycle_legacy()
111 struct io_buffer_list *bl) in io_provided_buffer_select() argument
113 if (!list_empty(&bl->buf_list)) { in io_provided_buffer_select()
116 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_provided_buffer_select()
129 struct io_buffer_list *bl, in io_ring_buffer_select() argument
132 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffer_select()
134 __u16 head = bl->head; in io_ring_buffer_select()
139 head &= bl->mask; in io_ring_buffer_select()
145 buf = page_address(bl->buf_pages[index]); in io_ring_buffer_select()
151 req->buf_list = bl; in io_ring_buffer_select()
166 bl->head++; in io_ring_buffer_select()
175 struct io_buffer_list *bl; in io_buffer_select() local
180 bl = io_buffer_get_list(ctx, req->buf_index); in io_buffer_select()
181 if (likely(bl)) { in io_buffer_select()
182 if (bl->buf_nr_pages) in io_buffer_select()
183 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
185 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select()
209 struct io_buffer_list *bl, unsigned nbufs) in __io_remove_buffers() argument
217 if (bl->buf_nr_pages) { in __io_remove_buffers()
220 i = bl->buf_ring->tail - bl->head; in __io_remove_buffers()
221 for (j = 0; j < bl->buf_nr_pages; j++) in __io_remove_buffers()
222 unpin_user_page(bl->buf_pages[j]); in __io_remove_buffers()
223 kvfree(bl->buf_pages); in __io_remove_buffers()
224 bl->buf_pages = NULL; in __io_remove_buffers()
225 bl->buf_nr_pages = 0; in __io_remove_buffers()
227 INIT_LIST_HEAD(&bl->buf_list); in __io_remove_buffers()
232 while (!list_empty(&bl->buf_list)) { in __io_remove_buffers()
235 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); in __io_remove_buffers()
248 struct io_buffer_list *bl; in io_destroy_buffers() local
258 xa_for_each(&ctx->io_bl_xa, index, bl) { in io_destroy_buffers()
259 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_buffers()
260 __io_remove_buffers(ctx, bl, -1U); in io_destroy_buffers()
261 kfree(bl); in io_destroy_buffers()
296 struct io_buffer_list *bl; in io_remove_buffers() local
302 bl = io_buffer_get_list(ctx, p->bgid); in io_remove_buffers()
303 if (bl) { in io_remove_buffers()
306 if (!bl->buf_nr_pages) in io_remove_buffers()
307 ret = __io_remove_buffers(ctx, bl, p->nbufs); in io_remove_buffers()
399 struct io_buffer_list *bl) in io_add_buffers() argument
411 list_move_tail(&buf->list, &bl->buf_list); in io_add_buffers()
428 struct io_buffer_list *bl; in io_provide_buffers() local
439 bl = io_buffer_get_list(ctx, p->bgid); in io_provide_buffers()
440 if (unlikely(!bl)) { in io_provide_buffers()
441 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_provide_buffers()
442 if (!bl) { in io_provide_buffers()
446 INIT_LIST_HEAD(&bl->buf_list); in io_provide_buffers()
447 ret = io_buffer_add_list(ctx, bl, p->bgid); in io_provide_buffers()
449 kfree(bl); in io_provide_buffers()
454 if (bl->buf_nr_pages) { in io_provide_buffers()
459 ret = io_add_buffers(ctx, p, bl); in io_provide_buffers()
474 struct io_buffer_list *bl, *free_bl = NULL; in io_register_pbuf_ring() local
500 bl = io_buffer_get_list(ctx, reg.bgid); in io_register_pbuf_ring()
501 if (bl) { in io_register_pbuf_ring()
503 if (bl->buf_nr_pages || !list_empty(&bl->buf_list)) in io_register_pbuf_ring()
506 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); in io_register_pbuf_ring()
507 if (!bl) in io_register_pbuf_ring()
520 bl->buf_pages = pages; in io_register_pbuf_ring()
521 bl->buf_nr_pages = nr_pages; in io_register_pbuf_ring()
522 bl->nr_entries = reg.ring_entries; in io_register_pbuf_ring()
523 bl->buf_ring = br; in io_register_pbuf_ring()
524 bl->mask = reg.ring_entries - 1; in io_register_pbuf_ring()
525 io_buffer_add_list(ctx, bl, reg.bgid); in io_register_pbuf_ring()
532 struct io_buffer_list *bl; in io_unregister_pbuf_ring() local
539 bl = io_buffer_get_list(ctx, reg.bgid); in io_unregister_pbuf_ring()
540 if (!bl) in io_unregister_pbuf_ring()
542 if (!bl->buf_nr_pages) in io_unregister_pbuf_ring()
545 __io_remove_buffers(ctx, bl, -1U); in io_unregister_pbuf_ring()
546 if (bl->bgid >= BGID_ARRAY) { in io_unregister_pbuf_ring()
547 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_unregister_pbuf_ring()
548 kfree(bl); in io_unregister_pbuf_ring()