1 /*
2  * Functions related to io context handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 #include <linux/slab.h>
11 
12 #include "blk.h"
13 
14 /*
15  * For io context allocations
16  */
17 static struct kmem_cache *iocontext_cachep;
18 
19 /**
20  * get_io_context - increment reference count to io_context
21  * @ioc: io_context to get
22  *
23  * Increment reference count to @ioc.
24  */
get_io_context(struct io_context * ioc)25 void get_io_context(struct io_context *ioc)
26 {
27 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 	atomic_long_inc(&ioc->refcount);
29 }
30 EXPORT_SYMBOL(get_io_context);
31 
icq_free_icq_rcu(struct rcu_head * head)32 static void icq_free_icq_rcu(struct rcu_head *head)
33 {
34 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35 
36 	kmem_cache_free(icq->__rcu_icq_cache, icq);
37 }
38 
39 /* Exit an icq. Called with both ioc and q locked. */
ioc_exit_icq(struct io_cq * icq)40 static void ioc_exit_icq(struct io_cq *icq)
41 {
42 	struct elevator_type *et = icq->q->elevator->type;
43 
44 	if (icq->flags & ICQ_EXITED)
45 		return;
46 
47 	if (et->ops.elevator_exit_icq_fn)
48 		et->ops.elevator_exit_icq_fn(icq);
49 
50 	icq->flags |= ICQ_EXITED;
51 }
52 
53 /* Release an icq.  Called with both ioc and q locked. */
ioc_destroy_icq(struct io_cq * icq)54 static void ioc_destroy_icq(struct io_cq *icq)
55 {
56 	struct io_context *ioc = icq->ioc;
57 	struct request_queue *q = icq->q;
58 	struct elevator_type *et = q->elevator->type;
59 
60 	lockdep_assert_held(&ioc->lock);
61 	lockdep_assert_held(q->queue_lock);
62 
63 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
64 	hlist_del_init(&icq->ioc_node);
65 	list_del_init(&icq->q_node);
66 
67 	/*
68 	 * Both setting lookup hint to and clearing it from @icq are done
69 	 * under queue_lock.  If it's not pointing to @icq now, it never
70 	 * will.  Hint assignment itself can race safely.
71 	 */
72 	if (rcu_dereference_raw(ioc->icq_hint) == icq)
73 		rcu_assign_pointer(ioc->icq_hint, NULL);
74 
75 	ioc_exit_icq(icq);
76 
77 	/*
78 	 * @icq->q might have gone away by the time RCU callback runs
79 	 * making it impossible to determine icq_cache.  Record it in @icq.
80 	 */
81 	icq->__rcu_icq_cache = et->icq_cache;
82 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
83 }
84 
85 /*
86  * Slow path for ioc release in put_io_context().  Performs double-lock
87  * dancing to unlink all icq's and then frees ioc.
88  */
ioc_release_fn(struct work_struct * work)89 static void ioc_release_fn(struct work_struct *work)
90 {
91 	struct io_context *ioc = container_of(work, struct io_context,
92 					      release_work);
93 	unsigned long flags;
94 
95 	/*
96 	 * Exiting icq may call into put_io_context() through elevator
97 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
98 	 * be different, use a different locking subclass here.  Use
99 	 * irqsave variant as there's no spin_lock_irq_nested().
100 	 */
101 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
102 
103 	while (!hlist_empty(&ioc->icq_list)) {
104 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
105 						struct io_cq, ioc_node);
106 		struct request_queue *q = icq->q;
107 
108 		if (spin_trylock(q->queue_lock)) {
109 			ioc_destroy_icq(icq);
110 			spin_unlock(q->queue_lock);
111 		} else {
112 			spin_unlock_irqrestore(&ioc->lock, flags);
113 			cpu_relax();
114 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
115 		}
116 	}
117 
118 	spin_unlock_irqrestore(&ioc->lock, flags);
119 
120 	kmem_cache_free(iocontext_cachep, ioc);
121 }
122 
123 /**
124  * put_io_context - put a reference of io_context
125  * @ioc: io_context to put
126  *
127  * Decrement reference count of @ioc and release it if the count reaches
128  * zero.
129  */
put_io_context(struct io_context * ioc)130 void put_io_context(struct io_context *ioc)
131 {
132 	unsigned long flags;
133 	bool free_ioc = false;
134 
135 	if (ioc == NULL)
136 		return;
137 
138 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
139 
140 	/*
141 	 * Releasing ioc requires reverse order double locking and we may
142 	 * already be holding a queue_lock.  Do it asynchronously from wq.
143 	 */
144 	if (atomic_long_dec_and_test(&ioc->refcount)) {
145 		spin_lock_irqsave(&ioc->lock, flags);
146 		if (!hlist_empty(&ioc->icq_list))
147 			schedule_work(&ioc->release_work);
148 		else
149 			free_ioc = true;
150 		spin_unlock_irqrestore(&ioc->lock, flags);
151 	}
152 
153 	if (free_ioc)
154 		kmem_cache_free(iocontext_cachep, ioc);
155 }
156 EXPORT_SYMBOL(put_io_context);
157 
158 /* Called by the exiting task */
exit_io_context(struct task_struct * task)159 void exit_io_context(struct task_struct *task)
160 {
161 	struct io_context *ioc;
162 	struct io_cq *icq;
163 	struct hlist_node *n;
164 	unsigned long flags;
165 
166 	task_lock(task);
167 	ioc = task->io_context;
168 	task->io_context = NULL;
169 	task_unlock(task);
170 
171 	if (!atomic_dec_and_test(&ioc->nr_tasks)) {
172 		put_io_context(ioc);
173 		return;
174 	}
175 
176 	/*
177 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
178 	 * reverse double locking.  Read comment in ioc_release_fn() for
179 	 * explanation on the nested locking annotation.
180 	 */
181 retry:
182 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
184 		if (icq->flags & ICQ_EXITED)
185 			continue;
186 		if (spin_trylock(icq->q->queue_lock)) {
187 			ioc_exit_icq(icq);
188 			spin_unlock(icq->q->queue_lock);
189 		} else {
190 			spin_unlock_irqrestore(&ioc->lock, flags);
191 			cpu_relax();
192 			goto retry;
193 		}
194 	}
195 	spin_unlock_irqrestore(&ioc->lock, flags);
196 
197 	put_io_context(ioc);
198 }
199 
200 /**
201  * ioc_clear_queue - break any ioc association with the specified queue
202  * @q: request_queue being cleared
203  *
204  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
205  */
ioc_clear_queue(struct request_queue * q)206 void ioc_clear_queue(struct request_queue *q)
207 {
208 	lockdep_assert_held(q->queue_lock);
209 
210 	while (!list_empty(&q->icq_list)) {
211 		struct io_cq *icq = list_entry(q->icq_list.next,
212 					       struct io_cq, q_node);
213 		struct io_context *ioc = icq->ioc;
214 
215 		spin_lock(&ioc->lock);
216 		ioc_destroy_icq(icq);
217 		spin_unlock(&ioc->lock);
218 	}
219 }
220 
create_io_context_slowpath(struct task_struct * task,gfp_t gfp_flags,int node)221 void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
222 				int node)
223 {
224 	struct io_context *ioc;
225 
226 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
227 				    node);
228 	if (unlikely(!ioc))
229 		return;
230 
231 	/* initialize */
232 	atomic_long_set(&ioc->refcount, 1);
233 	atomic_set(&ioc->nr_tasks, 1);
234 	spin_lock_init(&ioc->lock);
235 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
236 	INIT_HLIST_HEAD(&ioc->icq_list);
237 	INIT_WORK(&ioc->release_work, ioc_release_fn);
238 
239 	/*
240 	 * Try to install.  ioc shouldn't be installed if someone else
241 	 * already did or @task, which isn't %current, is exiting.  Note
242 	 * that we need to allow ioc creation on exiting %current as exit
243 	 * path may issue IOs from e.g. exit_files().  The exit path is
244 	 * responsible for not issuing IO after exit_io_context().
245 	 */
246 	task_lock(task);
247 	if (!task->io_context &&
248 	    (task == current || !(task->flags & PF_EXITING)))
249 		task->io_context = ioc;
250 	else
251 		kmem_cache_free(iocontext_cachep, ioc);
252 	task_unlock(task);
253 }
254 
255 /**
256  * get_task_io_context - get io_context of a task
257  * @task: task of interest
258  * @gfp_flags: allocation flags, used if allocation is necessary
259  * @node: allocation node, used if allocation is necessary
260  *
261  * Return io_context of @task.  If it doesn't exist, it is created with
262  * @gfp_flags and @node.  The returned io_context has its reference count
263  * incremented.
264  *
265  * This function always goes through task_lock() and it's better to use
266  * %current->io_context + get_io_context() for %current.
267  */
get_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)268 struct io_context *get_task_io_context(struct task_struct *task,
269 				       gfp_t gfp_flags, int node)
270 {
271 	struct io_context *ioc;
272 
273 	might_sleep_if(gfp_flags & __GFP_WAIT);
274 
275 	do {
276 		task_lock(task);
277 		ioc = task->io_context;
278 		if (likely(ioc)) {
279 			get_io_context(ioc);
280 			task_unlock(task);
281 			return ioc;
282 		}
283 		task_unlock(task);
284 	} while (create_io_context(task, gfp_flags, node));
285 
286 	return NULL;
287 }
288 EXPORT_SYMBOL(get_task_io_context);
289 
290 /**
291  * ioc_lookup_icq - lookup io_cq from ioc
292  * @ioc: the associated io_context
293  * @q: the associated request_queue
294  *
295  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
296  * with @q->queue_lock held.
297  */
ioc_lookup_icq(struct io_context * ioc,struct request_queue * q)298 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
299 {
300 	struct io_cq *icq;
301 
302 	lockdep_assert_held(q->queue_lock);
303 
304 	/*
305 	 * icq's are indexed from @ioc using radix tree and hint pointer,
306 	 * both of which are protected with RCU.  All removals are done
307 	 * holding both q and ioc locks, and we're holding q lock - if we
308 	 * find a icq which points to us, it's guaranteed to be valid.
309 	 */
310 	rcu_read_lock();
311 	icq = rcu_dereference(ioc->icq_hint);
312 	if (icq && icq->q == q)
313 		goto out;
314 
315 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
316 	if (icq && icq->q == q)
317 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
318 	else
319 		icq = NULL;
320 out:
321 	rcu_read_unlock();
322 	return icq;
323 }
324 EXPORT_SYMBOL(ioc_lookup_icq);
325 
326 /**
327  * ioc_create_icq - create and link io_cq
328  * @q: request_queue of interest
329  * @gfp_mask: allocation mask
330  *
331  * Make sure io_cq linking %current->io_context and @q exists.  If either
332  * io_context and/or icq don't exist, they will be created using @gfp_mask.
333  *
334  * The caller is responsible for ensuring @ioc won't go away and @q is
335  * alive and will stay alive until this function returns.
336  */
ioc_create_icq(struct request_queue * q,gfp_t gfp_mask)337 struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
338 {
339 	struct elevator_type *et = q->elevator->type;
340 	struct io_context *ioc;
341 	struct io_cq *icq;
342 
343 	/* allocate stuff */
344 	ioc = create_io_context(current, gfp_mask, q->node);
345 	if (!ioc)
346 		return NULL;
347 
348 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
349 				    q->node);
350 	if (!icq)
351 		return NULL;
352 
353 	if (radix_tree_preload(gfp_mask) < 0) {
354 		kmem_cache_free(et->icq_cache, icq);
355 		return NULL;
356 	}
357 
358 	icq->ioc = ioc;
359 	icq->q = q;
360 	INIT_LIST_HEAD(&icq->q_node);
361 	INIT_HLIST_NODE(&icq->ioc_node);
362 
363 	/* lock both q and ioc and try to link @icq */
364 	spin_lock_irq(q->queue_lock);
365 	spin_lock(&ioc->lock);
366 
367 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
368 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
369 		list_add(&icq->q_node, &q->icq_list);
370 		if (et->ops.elevator_init_icq_fn)
371 			et->ops.elevator_init_icq_fn(icq);
372 	} else {
373 		kmem_cache_free(et->icq_cache, icq);
374 		icq = ioc_lookup_icq(ioc, q);
375 		if (!icq)
376 			printk(KERN_ERR "cfq: icq link failed!\n");
377 	}
378 
379 	spin_unlock(&ioc->lock);
380 	spin_unlock_irq(q->queue_lock);
381 	radix_tree_preload_end();
382 	return icq;
383 }
384 
ioc_set_icq_flags(struct io_context * ioc,unsigned int flags)385 void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
386 {
387 	struct io_cq *icq;
388 	struct hlist_node *n;
389 
390 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
391 		icq->flags |= flags;
392 }
393 
394 /**
395  * ioc_ioprio_changed - notify ioprio change
396  * @ioc: io_context of interest
397  * @ioprio: new ioprio
398  *
399  * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
400  * icq's.  iosched is responsible for checking the bit and applying it on
401  * request issue path.
402  */
ioc_ioprio_changed(struct io_context * ioc,int ioprio)403 void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
404 {
405 	unsigned long flags;
406 
407 	spin_lock_irqsave(&ioc->lock, flags);
408 	ioc->ioprio = ioprio;
409 	ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
410 	spin_unlock_irqrestore(&ioc->lock, flags);
411 }
412 
413 /**
414  * ioc_cgroup_changed - notify cgroup change
415  * @ioc: io_context of interest
416  *
417  * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
418  * iosched is responsible for checking the bit and applying it on request
419  * issue path.
420  */
ioc_cgroup_changed(struct io_context * ioc)421 void ioc_cgroup_changed(struct io_context *ioc)
422 {
423 	unsigned long flags;
424 
425 	spin_lock_irqsave(&ioc->lock, flags);
426 	ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
427 	spin_unlock_irqrestore(&ioc->lock, flags);
428 }
429 EXPORT_SYMBOL(ioc_cgroup_changed);
430 
431 /**
432  * icq_get_changed - fetch and clear icq changed mask
433  * @icq: icq of interest
434  *
435  * Fetch and clear ICQ_*_CHANGED bits from @icq.  Grabs and releases
436  * @icq->ioc->lock.
437  */
icq_get_changed(struct io_cq * icq)438 unsigned icq_get_changed(struct io_cq *icq)
439 {
440 	unsigned int changed = 0;
441 	unsigned long flags;
442 
443 	if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
444 		spin_lock_irqsave(&icq->ioc->lock, flags);
445 		changed = icq->flags & ICQ_CHANGED_MASK;
446 		icq->flags &= ~ICQ_CHANGED_MASK;
447 		spin_unlock_irqrestore(&icq->ioc->lock, flags);
448 	}
449 	return changed;
450 }
451 EXPORT_SYMBOL(icq_get_changed);
452 
blk_ioc_init(void)453 static int __init blk_ioc_init(void)
454 {
455 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
456 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
457 	return 0;
458 }
459 subsys_initcall(blk_ioc_init);
460