1 #ifndef INTERNAL_IO_WQ_H
2 #define INTERNAL_IO_WQ_H
3
4 #include <linux/refcount.h>
5
6 struct io_wq;
7
8 enum {
9 IO_WQ_WORK_CANCEL = 1,
10 IO_WQ_WORK_HASHED = 2,
11 IO_WQ_WORK_UNBOUND = 4,
12 IO_WQ_WORK_CONCURRENT = 16,
13
14 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
15 };
16
17 enum io_wq_cancel {
18 IO_WQ_CANCEL_OK, /* cancelled before started */
19 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
20 IO_WQ_CANCEL_NOTFOUND, /* work not found */
21 };
22
23 struct io_wq_work_node {
24 struct io_wq_work_node *next;
25 };
26
27 struct io_wq_work_list {
28 struct io_wq_work_node *first;
29 struct io_wq_work_node *last;
30 };
31
32 #define wq_list_for_each(pos, prv, head) \
33 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
34
35 #define wq_list_for_each_resume(pos, prv) \
36 for (; pos; prv = pos, pos = (pos)->next)
37
38 #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
39 #define INIT_WQ_LIST(list) do { \
40 (list)->first = NULL; \
41 } while (0)
42
wq_list_add_after(struct io_wq_work_node * node,struct io_wq_work_node * pos,struct io_wq_work_list * list)43 static inline void wq_list_add_after(struct io_wq_work_node *node,
44 struct io_wq_work_node *pos,
45 struct io_wq_work_list *list)
46 {
47 struct io_wq_work_node *next = pos->next;
48
49 pos->next = node;
50 node->next = next;
51 if (!next)
52 list->last = node;
53 }
54
55 /**
56 * wq_list_merge - merge the second list to the first one.
57 * @list0: the first list
58 * @list1: the second list
59 * Return the first node after mergence.
60 */
wq_list_merge(struct io_wq_work_list * list0,struct io_wq_work_list * list1)61 static inline struct io_wq_work_node *wq_list_merge(struct io_wq_work_list *list0,
62 struct io_wq_work_list *list1)
63 {
64 struct io_wq_work_node *ret;
65
66 if (!list0->first) {
67 ret = list1->first;
68 } else {
69 ret = list0->first;
70 list0->last->next = list1->first;
71 }
72 INIT_WQ_LIST(list0);
73 INIT_WQ_LIST(list1);
74 return ret;
75 }
76
wq_list_add_tail(struct io_wq_work_node * node,struct io_wq_work_list * list)77 static inline void wq_list_add_tail(struct io_wq_work_node *node,
78 struct io_wq_work_list *list)
79 {
80 node->next = NULL;
81 if (!list->first) {
82 list->last = node;
83 WRITE_ONCE(list->first, node);
84 } else {
85 list->last->next = node;
86 list->last = node;
87 }
88 }
89
wq_list_add_head(struct io_wq_work_node * node,struct io_wq_work_list * list)90 static inline void wq_list_add_head(struct io_wq_work_node *node,
91 struct io_wq_work_list *list)
92 {
93 node->next = list->first;
94 if (!node->next)
95 list->last = node;
96 WRITE_ONCE(list->first, node);
97 }
98
wq_list_cut(struct io_wq_work_list * list,struct io_wq_work_node * last,struct io_wq_work_node * prev)99 static inline void wq_list_cut(struct io_wq_work_list *list,
100 struct io_wq_work_node *last,
101 struct io_wq_work_node *prev)
102 {
103 /* first in the list, if prev==NULL */
104 if (!prev)
105 WRITE_ONCE(list->first, last->next);
106 else
107 prev->next = last->next;
108
109 if (last == list->last)
110 list->last = prev;
111 last->next = NULL;
112 }
113
__wq_list_splice(struct io_wq_work_list * list,struct io_wq_work_node * to)114 static inline void __wq_list_splice(struct io_wq_work_list *list,
115 struct io_wq_work_node *to)
116 {
117 list->last->next = to->next;
118 to->next = list->first;
119 INIT_WQ_LIST(list);
120 }
121
wq_list_splice(struct io_wq_work_list * list,struct io_wq_work_node * to)122 static inline bool wq_list_splice(struct io_wq_work_list *list,
123 struct io_wq_work_node *to)
124 {
125 if (!wq_list_empty(list)) {
126 __wq_list_splice(list, to);
127 return true;
128 }
129 return false;
130 }
131
wq_stack_add_head(struct io_wq_work_node * node,struct io_wq_work_node * stack)132 static inline void wq_stack_add_head(struct io_wq_work_node *node,
133 struct io_wq_work_node *stack)
134 {
135 node->next = stack->next;
136 stack->next = node;
137 }
138
wq_list_del(struct io_wq_work_list * list,struct io_wq_work_node * node,struct io_wq_work_node * prev)139 static inline void wq_list_del(struct io_wq_work_list *list,
140 struct io_wq_work_node *node,
141 struct io_wq_work_node *prev)
142 {
143 wq_list_cut(list, node, prev);
144 }
145
146 static inline
wq_stack_extract(struct io_wq_work_node * stack)147 struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
148 {
149 struct io_wq_work_node *node = stack->next;
150
151 stack->next = node->next;
152 return node;
153 }
154
155 struct io_wq_work {
156 struct io_wq_work_node list;
157 unsigned flags;
158 int cancel_seq;
159 };
160
wq_next_work(struct io_wq_work * work)161 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
162 {
163 if (!work->list.next)
164 return NULL;
165
166 return container_of(work->list.next, struct io_wq_work, list);
167 }
168
169 typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
170 typedef void (io_wq_work_fn)(struct io_wq_work *);
171
172 struct io_wq_hash {
173 refcount_t refs;
174 unsigned long map;
175 struct wait_queue_head wait;
176 };
177
io_wq_put_hash(struct io_wq_hash * hash)178 static inline void io_wq_put_hash(struct io_wq_hash *hash)
179 {
180 if (refcount_dec_and_test(&hash->refs))
181 kfree(hash);
182 }
183
184 struct io_wq_data {
185 struct io_wq_hash *hash;
186 struct task_struct *task;
187 io_wq_work_fn *do_work;
188 free_work_fn *free_work;
189 };
190
191 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
192 void io_wq_exit_start(struct io_wq *wq);
193 void io_wq_put_and_exit(struct io_wq *wq);
194
195 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
196 void io_wq_hash_work(struct io_wq_work *work, void *val);
197
198 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
199 int io_wq_max_workers(struct io_wq *wq, int *new_count);
200
io_wq_is_hashed(struct io_wq_work * work)201 static inline bool io_wq_is_hashed(struct io_wq_work *work)
202 {
203 return work->flags & IO_WQ_WORK_HASHED;
204 }
205
206 typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
207
208 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
209 void *data, bool cancel_all);
210
211 #if defined(CONFIG_IO_WQ)
212 extern void io_wq_worker_sleeping(struct task_struct *);
213 extern void io_wq_worker_running(struct task_struct *);
214 #else
io_wq_worker_sleeping(struct task_struct * tsk)215 static inline void io_wq_worker_sleeping(struct task_struct *tsk)
216 {
217 }
io_wq_worker_running(struct task_struct * tsk)218 static inline void io_wq_worker_running(struct task_struct *tsk)
219 {
220 }
221 #endif
222
io_wq_current_is_worker(void)223 static inline bool io_wq_current_is_worker(void)
224 {
225 return in_task() && (current->flags & PF_IO_WORKER) &&
226 current->worker_private;
227 }
228 #endif
229