Home
last modified time | relevance | path

Searched refs:io_ring_ctx (Results 1 – 25 of 28) sorted by relevance

12

/linux-6.6.21/io_uring/
Drsrc.h29 typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
32 struct io_ring_ctx *ctx;
43 struct io_ring_ctx *ctx;
61 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
62 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
69 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
70 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
71 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
73 void __io_sqe_files_unregister(struct io_ring_ctx *ctx);
74 int io_sqe_files_unregister(struct io_ring_ctx *ctx);
[all …]
Dio_uring.h48 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
50 int io_run_task_work_sig(struct io_ring_ctx *ctx);
53 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
55 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
74 struct io_ring_ctx *ctx);
80 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
81 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
82 void __io_submit_flush_completions(struct io_ring_ctx *ctx);
91 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
100 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) in io_lockdep_assert_cq_locked()
[all …]
Dtctx.h6 struct io_ring_ctx *ctx;
10 struct io_ring_ctx *ctx);
12 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx);
13 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx);
17 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
19 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
25 static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) in io_uring_add_tctx_node()
Dio_uring.c145 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
182 struct io_ring_ctx *ctx = file->private_data; in io_uring_get_socket()
191 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) in io_submit_flush_completions()
198 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) in __io_cqring_events()
203 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) in __io_cqring_events_user()
234 struct io_ring_ctx *ctx = head->ctx; in io_match_task_safe()
252 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_req_add_to_cache()
259 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); in io_ring_ctx_ref_free()
266 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, in io_fallback_req_func()
297 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) in io_ring_ctx_alloc()
[all …]
Drsrc.c26 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
27 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
28 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
62 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) in io_unaccount_mem()
71 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) in io_account_mem()
87 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, in io_copy_iov()
136 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot) in io_buffer_unmap()
171 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) in io_rsrc_node_destroy()
180 struct io_ring_ctx *ctx = node->ctx; in io_rsrc_node_ref_zero()
198 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) in io_rsrc_node_alloc()
[all …]
Dsqpoll.h23 int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p);
24 void io_sq_thread_finish(struct io_ring_ctx *ctx);
29 void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
30 int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
Dkbuf.c34 static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx, in __io_buffer_get_list()
51 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, in io_buffer_get_list()
59 static int io_buffer_add_list(struct io_ring_ctx *ctx, in io_buffer_add_list()
78 struct io_ring_ctx *ctx = req->ctx; in io_kbuf_recycle_legacy()
123 struct io_ring_ctx *ctx = req->ctx; in __io_put_kbuf()
201 struct io_ring_ctx *ctx = req->ctx; in io_buffer_select()
218 static __cold int io_init_bl_list(struct io_ring_ctx *ctx) in io_init_bl_list()
239 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_kbuf_mark_free()
254 static int __io_remove_buffers(struct io_ring_ctx *ctx, in __io_remove_buffers()
304 void io_destroy_buffers(struct io_ring_ctx *ctx) in io_destroy_buffers()
[all …]
Dfiletable.h13 int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
15 int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset);
17 int io_register_file_alloc_range(struct io_ring_ctx *ctx,
69 static inline void io_reset_alloc_hint(struct io_ring_ctx *ctx) in io_reset_alloc_hint()
74 static inline void io_file_table_set_alloc_range(struct io_ring_ctx *ctx, in io_file_table_set_alloc_range()
Dmsg_ring.c36 static void io_double_unlock_ctx(struct io_ring_ctx *octx) in io_double_unlock_ctx()
41 static int io_double_lock_ctx(struct io_ring_ctx *octx, in io_double_lock_ctx()
69 static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx) in io_msg_need_remote()
78 struct io_ring_ctx *ctx = req->file->private_data; in io_msg_exec_remote()
96 struct io_ring_ctx *target_ctx = req->file->private_data; in io_msg_tw_complete()
128 struct io_ring_ctx *target_ctx = req->file->private_data; in io_msg_ring_data()
163 struct io_ring_ctx *ctx = req->ctx; in io_msg_grab_file()
180 struct io_ring_ctx *target_ctx = req->file->private_data; in io_msg_install_complete()
225 struct io_ring_ctx *target_ctx = req->file->private_data; in io_msg_send_fd()
227 struct io_ring_ctx *ctx = req->ctx; in io_msg_send_fd()
Dfiletable.c16 static int io_file_bitmap_get(struct io_ring_ctx *ctx) in io_file_bitmap_get()
63 static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, in io_install_fixed_file()
99 int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file, in __io_fixed_fd_install()
126 struct io_ring_ctx *ctx = req->ctx; in io_fixed_fd_install()
138 int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset) in io_fixed_fd_remove()
163 int io_register_file_alloc_range(struct io_ring_ctx *ctx, in io_register_file_alloc_range()
Dkbuf.h46 void io_destroy_buffers(struct io_ring_ctx *ctx);
54 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
57 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
63 void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
Dtimeout.h24 __cold void io_flush_timeouts(struct io_ring_ctx *ctx);
26 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
27 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Dtimeout.c72 struct io_ring_ctx *ctx = req->ctx; in io_timeout_complete()
111 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) in io_flush_timeouts()
205 struct io_ring_ctx *ctx = req->ctx; in io_disarm_next()
242 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
260 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, in io_timeout_extract()
287 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) in io_timeout_cancel()
333 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
374 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, in io_linked_timeout_update()
402 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, in io_timeout_update()
466 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
[all …]
Dsqpoll.c79 struct io_ring_ctx *ctx; in io_sqd_update_thread_idle()
87 void io_sq_thread_finish(struct io_ring_ctx *ctx) in io_sq_thread_finish()
104 struct io_ring_ctx *ctx_attach; in io_attach_sq_data()
167 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) in __io_sq_thread()
225 struct io_ring_ctx *ctx; in io_sq_thread()
326 void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) in io_sqpoll_wait_sq()
343 __cold int io_sq_offload_create(struct io_ring_ctx *ctx, in io_sq_offload_create()
433 __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, in io_sqpoll_wq_cpu_affinity()
Dtctx.c15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, in io_init_wq_offload()
61 struct io_ring_ctx *ctx) in io_uring_alloc_task_context()
94 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) in __io_uring_add_tctx_node()
136 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx) in __io_uring_add_tctx_node_from_submit()
253 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, in io_ringfd_register()
312 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, in io_ringfd_unregister()
Dcancel.h6 struct io_ring_ctx *ctx;
23 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
Dpoll.h34 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
37 bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Dnotif.h22 struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
42 struct io_ring_ctx *ctx = notif->ctx; in io_notif_account_mem()
Dpoll.c130 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_poll_req_delete()
153 struct io_ring_ctx *ctx = req->ctx; in io_poll_tw_hash_eject()
577 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
673 struct io_ring_ctx *ctx = req->ctx; in io_req_alloc_apoll()
778 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, in io_poll_remove_all()
789 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, in io_poll_find()
818 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, in io_poll_file_find()
855 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, in __io_poll_cancel()
874 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, in io_poll_cancel()
979 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
Dcancel.c105 struct io_ring_ctx *ctx = cd->ctx; in io_try_cancel()
161 struct io_ring_ctx *ctx = cd->ctx; in __io_async_cancel()
241 struct io_ring_ctx *ctx = cd->ctx; in __io_sync_cancel()
257 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg) in io_sync_cancel()
Dnotif.c15 struct io_ring_ctx *ctx = notif->ctx; in io_notif_complete_tw_ext()
65 struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) in io_alloc_notif()
Dopenclose.h3 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
During_cmd.c98 struct io_ring_ctx *ctx = req->ctx; in io_uring_cmd_prep()
116 struct io_ring_ctx *ctx = req->ctx; in io_uring_cmd()
Drw.c88 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
191 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
673 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
995 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) in io_do_iopoll()
/linux-6.6.21/include/linux/
Dio_uring_types.h55 const struct io_ring_ctx *last;
196 struct io_ring_ctx { struct
557 struct io_ring_ctx *ctx;

12