1 /*
2  * Copyright (C) 2012 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-bio-prison-v1.h"
9 #include "dm-bio-prison-v2.h"
10 
11 #include <linux/spinlock.h>
12 #include <linux/mempool.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 
16 /*----------------------------------------------------------------*/
17 
18 #define MIN_CELLS 1024
19 
20 struct dm_bio_prison {
21 	spinlock_t lock;
22 	struct rb_root cells;
23 	mempool_t cell_pool;
24 };
25 
26 static struct kmem_cache *_cell_cache;
27 
28 /*----------------------------------------------------------------*/
29 
30 /*
31  * @nr_cells should be the number of cells you want in use _concurrently_.
32  * Don't confuse it with the number of distinct keys.
33  */
dm_bio_prison_create(void)34 struct dm_bio_prison *dm_bio_prison_create(void)
35 {
36 	struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
37 	int ret;
38 
39 	if (!prison)
40 		return NULL;
41 
42 	spin_lock_init(&prison->lock);
43 
44 	ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
45 	if (ret) {
46 		kfree(prison);
47 		return NULL;
48 	}
49 
50 	prison->cells = RB_ROOT;
51 
52 	return prison;
53 }
54 EXPORT_SYMBOL_GPL(dm_bio_prison_create);
55 
dm_bio_prison_destroy(struct dm_bio_prison * prison)56 void dm_bio_prison_destroy(struct dm_bio_prison *prison)
57 {
58 	mempool_exit(&prison->cell_pool);
59 	kfree(prison);
60 }
61 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
62 
dm_bio_prison_alloc_cell(struct dm_bio_prison * prison,gfp_t gfp)63 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
64 {
65 	return mempool_alloc(&prison->cell_pool, gfp);
66 }
67 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
68 
dm_bio_prison_free_cell(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell)69 void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
70 			     struct dm_bio_prison_cell *cell)
71 {
72 	mempool_free(cell, &prison->cell_pool);
73 }
74 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
75 
__setup_new_cell(struct dm_cell_key * key,struct bio * holder,struct dm_bio_prison_cell * cell)76 static void __setup_new_cell(struct dm_cell_key *key,
77 			     struct bio *holder,
78 			     struct dm_bio_prison_cell *cell)
79 {
80        memcpy(&cell->key, key, sizeof(cell->key));
81        cell->holder = holder;
82        bio_list_init(&cell->bios);
83 }
84 
cmp_keys(struct dm_cell_key * lhs,struct dm_cell_key * rhs)85 static int cmp_keys(struct dm_cell_key *lhs,
86 		    struct dm_cell_key *rhs)
87 {
88 	if (lhs->virtual < rhs->virtual)
89 		return -1;
90 
91 	if (lhs->virtual > rhs->virtual)
92 		return 1;
93 
94 	if (lhs->dev < rhs->dev)
95 		return -1;
96 
97 	if (lhs->dev > rhs->dev)
98 		return 1;
99 
100 	if (lhs->block_end <= rhs->block_begin)
101 		return -1;
102 
103 	if (lhs->block_begin >= rhs->block_end)
104 		return 1;
105 
106 	return 0;
107 }
108 
__bio_detain(struct dm_bio_prison * prison,struct dm_cell_key * key,struct bio * inmate,struct dm_bio_prison_cell * cell_prealloc,struct dm_bio_prison_cell ** cell_result)109 static int __bio_detain(struct dm_bio_prison *prison,
110 			struct dm_cell_key *key,
111 			struct bio *inmate,
112 			struct dm_bio_prison_cell *cell_prealloc,
113 			struct dm_bio_prison_cell **cell_result)
114 {
115 	int r;
116 	struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
117 
118 	while (*new) {
119 		struct dm_bio_prison_cell *cell =
120 			rb_entry(*new, struct dm_bio_prison_cell, node);
121 
122 		r = cmp_keys(key, &cell->key);
123 
124 		parent = *new;
125 		if (r < 0)
126 			new = &((*new)->rb_left);
127 		else if (r > 0)
128 			new = &((*new)->rb_right);
129 		else {
130 			if (inmate)
131 				bio_list_add(&cell->bios, inmate);
132 			*cell_result = cell;
133 			return 1;
134 		}
135 	}
136 
137 	__setup_new_cell(key, inmate, cell_prealloc);
138 	*cell_result = cell_prealloc;
139 
140 	rb_link_node(&cell_prealloc->node, parent, new);
141 	rb_insert_color(&cell_prealloc->node, &prison->cells);
142 
143 	return 0;
144 }
145 
bio_detain(struct dm_bio_prison * prison,struct dm_cell_key * key,struct bio * inmate,struct dm_bio_prison_cell * cell_prealloc,struct dm_bio_prison_cell ** cell_result)146 static int bio_detain(struct dm_bio_prison *prison,
147 		      struct dm_cell_key *key,
148 		      struct bio *inmate,
149 		      struct dm_bio_prison_cell *cell_prealloc,
150 		      struct dm_bio_prison_cell **cell_result)
151 {
152 	int r;
153 
154 	spin_lock_irq(&prison->lock);
155 	r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
156 	spin_unlock_irq(&prison->lock);
157 
158 	return r;
159 }
160 
dm_bio_detain(struct dm_bio_prison * prison,struct dm_cell_key * key,struct bio * inmate,struct dm_bio_prison_cell * cell_prealloc,struct dm_bio_prison_cell ** cell_result)161 int dm_bio_detain(struct dm_bio_prison *prison,
162 		  struct dm_cell_key *key,
163 		  struct bio *inmate,
164 		  struct dm_bio_prison_cell *cell_prealloc,
165 		  struct dm_bio_prison_cell **cell_result)
166 {
167 	return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
168 }
169 EXPORT_SYMBOL_GPL(dm_bio_detain);
170 
dm_get_cell(struct dm_bio_prison * prison,struct dm_cell_key * key,struct dm_bio_prison_cell * cell_prealloc,struct dm_bio_prison_cell ** cell_result)171 int dm_get_cell(struct dm_bio_prison *prison,
172 		struct dm_cell_key *key,
173 		struct dm_bio_prison_cell *cell_prealloc,
174 		struct dm_bio_prison_cell **cell_result)
175 {
176 	return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
177 }
178 EXPORT_SYMBOL_GPL(dm_get_cell);
179 
180 /*
181  * @inmates must have been initialised prior to this call
182  */
__cell_release(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell,struct bio_list * inmates)183 static void __cell_release(struct dm_bio_prison *prison,
184 			   struct dm_bio_prison_cell *cell,
185 			   struct bio_list *inmates)
186 {
187 	rb_erase(&cell->node, &prison->cells);
188 
189 	if (inmates) {
190 		if (cell->holder)
191 			bio_list_add(inmates, cell->holder);
192 		bio_list_merge(inmates, &cell->bios);
193 	}
194 }
195 
dm_cell_release(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell,struct bio_list * bios)196 void dm_cell_release(struct dm_bio_prison *prison,
197 		     struct dm_bio_prison_cell *cell,
198 		     struct bio_list *bios)
199 {
200 	spin_lock_irq(&prison->lock);
201 	__cell_release(prison, cell, bios);
202 	spin_unlock_irq(&prison->lock);
203 }
204 EXPORT_SYMBOL_GPL(dm_cell_release);
205 
206 /*
207  * Sometimes we don't want the holder, just the additional bios.
208  */
__cell_release_no_holder(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell,struct bio_list * inmates)209 static void __cell_release_no_holder(struct dm_bio_prison *prison,
210 				     struct dm_bio_prison_cell *cell,
211 				     struct bio_list *inmates)
212 {
213 	rb_erase(&cell->node, &prison->cells);
214 	bio_list_merge(inmates, &cell->bios);
215 }
216 
dm_cell_release_no_holder(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell,struct bio_list * inmates)217 void dm_cell_release_no_holder(struct dm_bio_prison *prison,
218 			       struct dm_bio_prison_cell *cell,
219 			       struct bio_list *inmates)
220 {
221 	unsigned long flags;
222 
223 	spin_lock_irqsave(&prison->lock, flags);
224 	__cell_release_no_holder(prison, cell, inmates);
225 	spin_unlock_irqrestore(&prison->lock, flags);
226 }
227 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
228 
dm_cell_error(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell,blk_status_t error)229 void dm_cell_error(struct dm_bio_prison *prison,
230 		   struct dm_bio_prison_cell *cell, blk_status_t error)
231 {
232 	struct bio_list bios;
233 	struct bio *bio;
234 
235 	bio_list_init(&bios);
236 	dm_cell_release(prison, cell, &bios);
237 
238 	while ((bio = bio_list_pop(&bios))) {
239 		bio->bi_status = error;
240 		bio_endio(bio);
241 	}
242 }
243 EXPORT_SYMBOL_GPL(dm_cell_error);
244 
dm_cell_visit_release(struct dm_bio_prison * prison,void (* visit_fn)(void *,struct dm_bio_prison_cell *),void * context,struct dm_bio_prison_cell * cell)245 void dm_cell_visit_release(struct dm_bio_prison *prison,
246 			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
247 			   void *context,
248 			   struct dm_bio_prison_cell *cell)
249 {
250 	spin_lock_irq(&prison->lock);
251 	visit_fn(context, cell);
252 	rb_erase(&cell->node, &prison->cells);
253 	spin_unlock_irq(&prison->lock);
254 }
255 EXPORT_SYMBOL_GPL(dm_cell_visit_release);
256 
__promote_or_release(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell)257 static int __promote_or_release(struct dm_bio_prison *prison,
258 				struct dm_bio_prison_cell *cell)
259 {
260 	if (bio_list_empty(&cell->bios)) {
261 		rb_erase(&cell->node, &prison->cells);
262 		return 1;
263 	}
264 
265 	cell->holder = bio_list_pop(&cell->bios);
266 	return 0;
267 }
268 
dm_cell_promote_or_release(struct dm_bio_prison * prison,struct dm_bio_prison_cell * cell)269 int dm_cell_promote_or_release(struct dm_bio_prison *prison,
270 			       struct dm_bio_prison_cell *cell)
271 {
272 	int r;
273 
274 	spin_lock_irq(&prison->lock);
275 	r = __promote_or_release(prison, cell);
276 	spin_unlock_irq(&prison->lock);
277 
278 	return r;
279 }
280 EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
281 
282 /*----------------------------------------------------------------*/
283 
284 #define DEFERRED_SET_SIZE 64
285 
286 struct dm_deferred_entry {
287 	struct dm_deferred_set *ds;
288 	unsigned count;
289 	struct list_head work_items;
290 };
291 
292 struct dm_deferred_set {
293 	spinlock_t lock;
294 	unsigned current_entry;
295 	unsigned sweeper;
296 	struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
297 };
298 
dm_deferred_set_create(void)299 struct dm_deferred_set *dm_deferred_set_create(void)
300 {
301 	int i;
302 	struct dm_deferred_set *ds;
303 
304 	ds = kmalloc(sizeof(*ds), GFP_KERNEL);
305 	if (!ds)
306 		return NULL;
307 
308 	spin_lock_init(&ds->lock);
309 	ds->current_entry = 0;
310 	ds->sweeper = 0;
311 	for (i = 0; i < DEFERRED_SET_SIZE; i++) {
312 		ds->entries[i].ds = ds;
313 		ds->entries[i].count = 0;
314 		INIT_LIST_HEAD(&ds->entries[i].work_items);
315 	}
316 
317 	return ds;
318 }
319 EXPORT_SYMBOL_GPL(dm_deferred_set_create);
320 
dm_deferred_set_destroy(struct dm_deferred_set * ds)321 void dm_deferred_set_destroy(struct dm_deferred_set *ds)
322 {
323 	kfree(ds);
324 }
325 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
326 
dm_deferred_entry_inc(struct dm_deferred_set * ds)327 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
328 {
329 	unsigned long flags;
330 	struct dm_deferred_entry *entry;
331 
332 	spin_lock_irqsave(&ds->lock, flags);
333 	entry = ds->entries + ds->current_entry;
334 	entry->count++;
335 	spin_unlock_irqrestore(&ds->lock, flags);
336 
337 	return entry;
338 }
339 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
340 
ds_next(unsigned index)341 static unsigned ds_next(unsigned index)
342 {
343 	return (index + 1) % DEFERRED_SET_SIZE;
344 }
345 
__sweep(struct dm_deferred_set * ds,struct list_head * head)346 static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
347 {
348 	while ((ds->sweeper != ds->current_entry) &&
349 	       !ds->entries[ds->sweeper].count) {
350 		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
351 		ds->sweeper = ds_next(ds->sweeper);
352 	}
353 
354 	if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
355 		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
356 }
357 
dm_deferred_entry_dec(struct dm_deferred_entry * entry,struct list_head * head)358 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
359 {
360 	unsigned long flags;
361 
362 	spin_lock_irqsave(&entry->ds->lock, flags);
363 	BUG_ON(!entry->count);
364 	--entry->count;
365 	__sweep(entry->ds, head);
366 	spin_unlock_irqrestore(&entry->ds->lock, flags);
367 }
368 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
369 
370 /*
371  * Returns 1 if deferred or 0 if no pending items to delay job.
372  */
dm_deferred_set_add_work(struct dm_deferred_set * ds,struct list_head * work)373 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
374 {
375 	int r = 1;
376 	unsigned next_entry;
377 
378 	spin_lock_irq(&ds->lock);
379 	if ((ds->sweeper == ds->current_entry) &&
380 	    !ds->entries[ds->current_entry].count)
381 		r = 0;
382 	else {
383 		list_add(work, &ds->entries[ds->current_entry].work_items);
384 		next_entry = ds_next(ds->current_entry);
385 		if (!ds->entries[next_entry].count)
386 			ds->current_entry = next_entry;
387 	}
388 	spin_unlock_irq(&ds->lock);
389 
390 	return r;
391 }
392 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
393 
394 /*----------------------------------------------------------------*/
395 
dm_bio_prison_init_v1(void)396 static int __init dm_bio_prison_init_v1(void)
397 {
398 	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
399 	if (!_cell_cache)
400 		return -ENOMEM;
401 
402 	return 0;
403 }
404 
dm_bio_prison_exit_v1(void)405 static void dm_bio_prison_exit_v1(void)
406 {
407 	kmem_cache_destroy(_cell_cache);
408 	_cell_cache = NULL;
409 }
410 
411 static int (*_inits[])(void) __initdata = {
412 	dm_bio_prison_init_v1,
413 	dm_bio_prison_init_v2,
414 };
415 
416 static void (*_exits[])(void) = {
417 	dm_bio_prison_exit_v1,
418 	dm_bio_prison_exit_v2,
419 };
420 
dm_bio_prison_init(void)421 static int __init dm_bio_prison_init(void)
422 {
423 	const int count = ARRAY_SIZE(_inits);
424 
425 	int r, i;
426 
427 	for (i = 0; i < count; i++) {
428 		r = _inits[i]();
429 		if (r)
430 			goto bad;
431 	}
432 
433 	return 0;
434 
435       bad:
436 	while (i--)
437 		_exits[i]();
438 
439 	return r;
440 }
441 
dm_bio_prison_exit(void)442 static void __exit dm_bio_prison_exit(void)
443 {
444 	int i = ARRAY_SIZE(_exits);
445 
446 	while (i--)
447 		_exits[i]();
448 }
449 
450 /*
451  * module hooks
452  */
453 module_init(dm_bio_prison_init);
454 module_exit(dm_bio_prison_exit);
455 
456 MODULE_DESCRIPTION(DM_NAME " bio prison");
457 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
458 MODULE_LICENSE("GPL");
459