Home
last modified time | relevance | path

Searched refs:io_kiocb (Results 1 – 25 of 48) sorted by relevance

12

/linux-6.6.21/io_uring/
Dnet.h34 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
35 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
37 int io_sendmsg_prep_async(struct io_kiocb *req);
38 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
39 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
40 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
42 int io_send(struct io_kiocb *req, unsigned int issue_flags);
43 int io_send_prep_async(struct io_kiocb *req);
45 int io_recvmsg_prep_async(struct io_kiocb *req);
46 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
[all …]
Dfs.h3 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags);
5 void io_renameat_cleanup(struct io_kiocb *req);
7 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags);
9 void io_unlinkat_cleanup(struct io_kiocb *req);
11 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags);
13 void io_mkdirat_cleanup(struct io_kiocb *req);
15 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
[all …]
Dtimeout.h4 struct io_kiocb *req;
11 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
12 struct io_kiocb *link);
14 static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req) in io_disarm_linked_timeout()
16 struct io_kiocb *link = req->link; in io_disarm_linked_timeout()
29 void io_queue_linked_timeout(struct io_kiocb *req);
30 void io_disarm_next(struct io_kiocb *req);
32 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
33 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
34 int io_timeout(struct io_kiocb *req, unsigned int issue_flags);
[all …]
Dio_uring.h49 void io_req_cqe_overflow(struct io_kiocb *req);
51 void io_req_defer_failed(struct io_kiocb *req, s32 res);
52 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
54 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags);
59 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
60 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
63 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
65 bool io_alloc_async_data(struct io_kiocb *req);
66 void io_req_task_queue(struct io_kiocb *req);
67 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
[all …]
Dxattr.h3 void io_xattr_cleanup(struct io_kiocb *req);
5 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags);
8 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags);
11 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags);
14 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
Dkbuf.h44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
59 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
61 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
65 static inline void io_kbuf_recycle_ring(struct io_kiocb *req) in io_kbuf_recycle_ring()
92 static inline bool io_do_buffer_select(struct io_kiocb *req) in io_do_buffer_select()
99 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle()
[all …]
Drw.h18 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
19 int io_read(struct io_kiocb *req, unsigned int issue_flags);
20 int io_readv_prep_async(struct io_kiocb *req);
21 int io_write(struct io_kiocb *req, unsigned int issue_flags);
22 int io_writev_prep_async(struct io_kiocb *req);
23 void io_readv_writev_cleanup(struct io_kiocb *req);
24 void io_rw_fail(struct io_kiocb *req);
25 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts);
Dtimeout.c23 struct io_kiocb *head;
25 struct io_kiocb *prev;
38 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq()
46 static inline void io_put_req(struct io_kiocb *req) in io_put_req()
68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_timeout_complete()
92 static bool io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout()
120 struct io_kiocb *req = cmd_to_io_kiocb(timeout); in io_flush_timeouts()
144 static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts) in io_req_tw_fail_links()
148 struct io_kiocb *nxt = link->link; in io_req_tw_fail_links()
160 static void io_fail_links(struct io_kiocb *req) in io_fail_links()
[all …]
Dopenclose.h6 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_openat(struct io_kiocb *req, unsigned int issue_flags);
8 void io_open_cleanup(struct io_kiocb *req);
10 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
13 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
14 int io_close(struct io_kiocb *req, unsigned int issue_flags);
Dsync.h3 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags);
6 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_fsync(struct io_kiocb *req, unsigned int issue_flags);
9 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags);
10 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
Dpoll.c34 struct io_kiocb *req;
57 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) in wqe_to_req()
61 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); in wqe_to_req()
71 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath()
92 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership()
99 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled()
104 static struct io_poll *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double()
112 static struct io_poll *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single()
119 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert()
130 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_poll_req_delete()
[all …]
Dpoll.h27 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
28 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
30 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
31 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
36 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
42 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts);
Dopdef.h33 int (*issue)(struct io_kiocb *, unsigned int);
34 int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
43 int (*prep_async)(struct io_kiocb *);
44 void (*cleanup)(struct io_kiocb *);
45 void (*fail)(struct io_kiocb *);
Dxattr.c25 void io_xattr_cleanup(struct io_kiocb *req) in io_xattr_cleanup()
36 static void io_xattr_finish(struct io_kiocb *req, int ret) in io_xattr_finish()
44 static int __io_getxattr_prep(struct io_kiocb *req, in __io_getxattr_prep()
82 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fgetxattr_prep()
87 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_getxattr_prep()
108 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fgetxattr()
123 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) in io_getxattr()
148 static int __io_setxattr_prep(struct io_kiocb *req, in __io_setxattr_prep()
180 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_setxattr_prep()
201 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsetxattr_prep()
[all …]
Dfs.c50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_renameat_prep()
81 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat()
96 void io_renameat_cleanup(struct io_kiocb *req) in io_renameat_cleanup()
104 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_unlinkat_prep()
130 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat()
147 void io_unlinkat_cleanup(struct io_kiocb *req) in io_unlinkat_cleanup()
154 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_mkdirat_prep()
177 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) in io_mkdirat()
191 void io_mkdirat_cleanup(struct io_kiocb *req) in io_mkdirat_cleanup()
198 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_symlinkat_prep()
[all …]
Drw.c31 static inline bool io_file_supports_nowait(struct io_kiocb *req) in io_file_supports_nowait()
55 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep()
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw()
126 void io_readv_writev_cleanup(struct io_kiocb *req) in io_readv_writev_cleanup()
154 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos()
171 static void io_req_task_queue_reissue(struct io_kiocb *req) in io_req_task_queue_reissue()
178 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep()
188 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue()
214 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep()
218 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue()
[all …]
Drefs.h14 static inline bool req_ref_inc_not_zero(struct io_kiocb *req) in req_ref_inc_not_zero()
20 static inline bool req_ref_put_and_test(struct io_kiocb *req) in req_ref_put_and_test()
29 static inline void req_ref_get(struct io_kiocb *req) in req_ref_get()
36 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) in __io_req_set_refcount()
44 static inline void io_req_set_refcount(struct io_kiocb *req) in io_req_set_refcount()
Dnotif.h22 struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
23 void io_notif_set_extended(struct io_kiocb *notif);
25 static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif) in io_notif_to_data()
30 static inline void io_notif_flush(struct io_kiocb *notif) in io_notif_flush()
40 static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len) in io_notif_account_mem()
Dadvise.h3 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_madvise(struct io_kiocb *req, unsigned int issue_flags);
6 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags);
Dsplice.h3 int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_tee(struct io_kiocb *req, unsigned int issue_flags);
6 int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_splice(struct io_kiocb *req, unsigned int issue_flags);
During_cmd.c16 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts) in io_uring_cmd_work()
28 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); in __io_uring_cmd_do_in_task()
43 static inline void io_req_set_cqe32_extra(struct io_kiocb *req, in io_req_set_cqe32_extra()
57 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); in io_uring_cmd_done()
77 int io_uring_cmd_prep_async(struct io_kiocb *req) in io_uring_cmd_prep_async()
86 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_uring_cmd_prep()
113 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) in io_uring_cmd()
162 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); in io_uring_cmd_import_fixed()
Dsync.c25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep()
40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range()
53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep()
70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync()
85 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fallocate_prep()
99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate()
Dmsg_ring.c58 void io_msg_ring_cleanup(struct io_kiocb *req) in io_msg_ring_cleanup()
76 static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func) in io_msg_exec_remote()
95 struct io_kiocb *req = cmd_to_io_kiocb(msg); in io_msg_tw_complete()
126 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) in io_msg_ring_data()
160 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) in io_msg_grab_file()
178 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) in io_msg_install_complete()
213 struct io_kiocb *req = cmd_to_io_kiocb(msg); in io_msg_tw_fd_complete()
223 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) in io_msg_send_fd()
249 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_msg_ring_prep()
269 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) in io_msg_ring()
Dopenclose.c45 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep()
80 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep()
90 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep()
109 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2()
170 int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat()
175 void io_open_cleanup(struct io_kiocb *req) in io_open_cleanup()
195 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_close_fixed()
202 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep()
219 int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close()
Dio_uring.c137 struct io_kiocb *req;
149 static void io_queue_sqe(struct io_kiocb *req);
208 static bool io_match_linked(struct io_kiocb *head) in io_match_linked()
210 struct io_kiocb *req; in io_match_linked()
223 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, in io_match_task_safe()
246 static inline void req_fail_link_node(struct io_kiocb *req, int res) in req_fail_link_node()
252 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_req_add_to_cache()
269 struct io_kiocb *req, *tmp; in io_fallback_req_func()
374 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer()
385 static void io_clean_op(struct io_kiocb *req) in io_clean_op()
[all …]

12