1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-bufio.h"
10 
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
17 
18 #define DM_MSG_PREFIX "bufio"
19 
20 /*
21  * Memory management policy:
22  *	Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23  *	or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24  *	Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25  *	Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
26  *	dirty buffers.
27  */
28 #define DM_BUFIO_MIN_BUFFERS		8
29 
30 #define DM_BUFIO_MEMORY_PERCENT		2
31 #define DM_BUFIO_VMALLOC_PERCENT	25
32 #define DM_BUFIO_WRITEBACK_PERCENT	75
33 
34 /*
35  * Check buffer ages in this interval (seconds)
36  */
37 #define DM_BUFIO_WORK_TIMER_SECS	10
38 
39 /*
40  * Free buffers when they are older than this (seconds)
41  */
42 #define DM_BUFIO_DEFAULT_AGE_SECS	60
43 
44 /*
45  * The number of bvec entries that are embedded directly in the buffer.
46  * If the chunk size is larger, dm-io is used to do the io.
47  */
48 #define DM_BUFIO_INLINE_VECS		16
49 
50 /*
51  * Buffer hash
52  */
53 #define DM_BUFIO_HASH_BITS	20
54 #define DM_BUFIO_HASH(block) \
55 	((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 	 ((1 << DM_BUFIO_HASH_BITS) - 1))
57 
58 /*
59  * Don't try to use kmem_cache_alloc for blocks larger than this.
60  * For explanation, see alloc_buffer_data below.
61  */
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT	(PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT	(PAGE_SIZE << (MAX_ORDER - 1))
64 
65 /*
66  * dm_buffer->list_mode
67  */
68 #define LIST_CLEAN	0
69 #define LIST_DIRTY	1
70 #define LIST_SIZE	2
71 
72 /*
73  * Linking of buffers:
74  *	All buffers are linked to cache_hash with their hash_list field.
75  *
76  *	Clean buffers that are not being written (B_WRITING not set)
77  *	are linked to lru[LIST_CLEAN] with their lru_list field.
78  *
79  *	Dirty and clean buffers that are being written are linked to
80  *	lru[LIST_DIRTY] with their lru_list field. When the write
81  *	finishes, the buffer cannot be relinked immediately (because we
82  *	are in an interrupt context and relinking requires process
83  *	context), so some clean-not-writing buffers can be held on
84  *	dirty_lru too.  They are later added to lru in the process
85  *	context.
86  */
87 struct dm_bufio_client {
88 	struct mutex lock;
89 
90 	struct list_head lru[LIST_SIZE];
91 	unsigned long n_buffers[LIST_SIZE];
92 
93 	struct block_device *bdev;
94 	unsigned block_size;
95 	unsigned char sectors_per_block_bits;
96 	unsigned char pages_per_block_bits;
97 	unsigned char blocks_per_page_bits;
98 	unsigned aux_size;
99 	void (*alloc_callback)(struct dm_buffer *);
100 	void (*write_callback)(struct dm_buffer *);
101 
102 	struct dm_io_client *dm_io;
103 
104 	struct list_head reserved_buffers;
105 	unsigned need_reserved_buffers;
106 
107 	struct hlist_head *cache_hash;
108 	wait_queue_head_t free_buffer_wait;
109 
110 	int async_write_error;
111 
112 	struct list_head client_list;
113 	struct shrinker shrinker;
114 };
115 
116 /*
117  * Buffer state bits.
118  */
119 #define B_READING	0
120 #define B_WRITING	1
121 #define B_DIRTY		2
122 
123 /*
124  * Describes how the block was allocated:
125  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
126  * See the comment at alloc_buffer_data.
127  */
128 enum data_mode {
129 	DATA_MODE_SLAB = 0,
130 	DATA_MODE_GET_FREE_PAGES = 1,
131 	DATA_MODE_VMALLOC = 2,
132 	DATA_MODE_LIMIT = 3
133 };
134 
135 struct dm_buffer {
136 	struct hlist_node hash_list;
137 	struct list_head lru_list;
138 	sector_t block;
139 	void *data;
140 	enum data_mode data_mode;
141 	unsigned char list_mode;		/* LIST_* */
142 	unsigned hold_count;
143 	int read_error;
144 	int write_error;
145 	unsigned long state;
146 	unsigned long last_accessed;
147 	struct dm_bufio_client *c;
148 	struct bio bio;
149 	struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
150 };
151 
152 /*----------------------------------------------------------------*/
153 
154 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
155 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
156 
dm_bufio_cache_index(struct dm_bufio_client * c)157 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
158 {
159 	unsigned ret = c->blocks_per_page_bits - 1;
160 
161 	BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
162 
163 	return ret;
164 }
165 
166 #define DM_BUFIO_CACHE(c)	(dm_bufio_caches[dm_bufio_cache_index(c)])
167 #define DM_BUFIO_CACHE_NAME(c)	(dm_bufio_cache_names[dm_bufio_cache_index(c)])
168 
169 #define dm_bufio_in_request()	(!!current->bio_list)
170 
dm_bufio_lock(struct dm_bufio_client * c)171 static void dm_bufio_lock(struct dm_bufio_client *c)
172 {
173 	mutex_lock_nested(&c->lock, dm_bufio_in_request());
174 }
175 
dm_bufio_trylock(struct dm_bufio_client * c)176 static int dm_bufio_trylock(struct dm_bufio_client *c)
177 {
178 	return mutex_trylock(&c->lock);
179 }
180 
dm_bufio_unlock(struct dm_bufio_client * c)181 static void dm_bufio_unlock(struct dm_bufio_client *c)
182 {
183 	mutex_unlock(&c->lock);
184 }
185 
186 /*
187  * FIXME Move to sched.h?
188  */
189 #ifdef CONFIG_PREEMPT_VOLUNTARY
190 #  define dm_bufio_cond_resched()		\
191 do {						\
192 	if (unlikely(need_resched()))		\
193 		_cond_resched();		\
194 } while (0)
195 #else
196 #  define dm_bufio_cond_resched()                do { } while (0)
197 #endif
198 
199 /*----------------------------------------------------------------*/
200 
201 /*
202  * Default cache size: available memory divided by the ratio.
203  */
204 static unsigned long dm_bufio_default_cache_size;
205 
206 /*
207  * Total cache size set by the user.
208  */
209 static unsigned long dm_bufio_cache_size;
210 
211 /*
212  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
213  * at any time.  If it disagrees, the user has changed cache size.
214  */
215 static unsigned long dm_bufio_cache_size_latch;
216 
217 static DEFINE_SPINLOCK(param_spinlock);
218 
219 /*
220  * Buffers are freed after this timeout
221  */
222 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
223 
224 static unsigned long dm_bufio_peak_allocated;
225 static unsigned long dm_bufio_allocated_kmem_cache;
226 static unsigned long dm_bufio_allocated_get_free_pages;
227 static unsigned long dm_bufio_allocated_vmalloc;
228 static unsigned long dm_bufio_current_allocated;
229 
230 /*----------------------------------------------------------------*/
231 
232 /*
233  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
234  */
235 static unsigned long dm_bufio_cache_size_per_client;
236 
237 /*
238  * The current number of clients.
239  */
240 static int dm_bufio_client_count;
241 
242 /*
243  * The list of all clients.
244  */
245 static LIST_HEAD(dm_bufio_all_clients);
246 
247 /*
248  * This mutex protects dm_bufio_cache_size_latch,
249  * dm_bufio_cache_size_per_client and dm_bufio_client_count
250  */
251 static DEFINE_MUTEX(dm_bufio_clients_lock);
252 
253 /*----------------------------------------------------------------*/
254 
adjust_total_allocated(enum data_mode data_mode,long diff)255 static void adjust_total_allocated(enum data_mode data_mode, long diff)
256 {
257 	static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
258 		&dm_bufio_allocated_kmem_cache,
259 		&dm_bufio_allocated_get_free_pages,
260 		&dm_bufio_allocated_vmalloc,
261 	};
262 
263 	spin_lock(&param_spinlock);
264 
265 	*class_ptr[data_mode] += diff;
266 
267 	dm_bufio_current_allocated += diff;
268 
269 	if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
270 		dm_bufio_peak_allocated = dm_bufio_current_allocated;
271 
272 	spin_unlock(&param_spinlock);
273 }
274 
275 /*
276  * Change the number of clients and recalculate per-client limit.
277  */
__cache_size_refresh(void)278 static void __cache_size_refresh(void)
279 {
280 	BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
281 	BUG_ON(dm_bufio_client_count < 0);
282 
283 	dm_bufio_cache_size_latch = dm_bufio_cache_size;
284 
285 	barrier();
286 
287 	/*
288 	 * Use default if set to 0 and report the actual cache size used.
289 	 */
290 	if (!dm_bufio_cache_size_latch) {
291 		(void)cmpxchg(&dm_bufio_cache_size, 0,
292 			      dm_bufio_default_cache_size);
293 		dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
294 	}
295 
296 	dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
297 					 (dm_bufio_client_count ? : 1);
298 }
299 
300 /*
301  * Allocating buffer data.
302  *
303  * Small buffers are allocated with kmem_cache, to use space optimally.
304  *
305  * For large buffers, we choose between get_free_pages and vmalloc.
306  * Each has advantages and disadvantages.
307  *
308  * __get_free_pages can randomly fail if the memory is fragmented.
309  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
310  * as low as 128M) so using it for caching is not appropriate.
311  *
312  * If the allocation may fail we use __get_free_pages. Memory fragmentation
313  * won't have a fatal effect here, but it just causes flushes of some other
314  * buffers and more I/O will be performed. Don't use __get_free_pages if it
315  * always fails (i.e. order >= MAX_ORDER).
316  *
317  * If the allocation shouldn't fail we use __vmalloc. This is only for the
318  * initial reserve allocation, so there's no risk of wasting all vmalloc
319  * space.
320  */
alloc_buffer_data(struct dm_bufio_client * c,gfp_t gfp_mask,enum data_mode * data_mode)321 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
322 			       enum data_mode *data_mode)
323 {
324 	unsigned noio_flag;
325 	void *ptr;
326 
327 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
328 		*data_mode = DATA_MODE_SLAB;
329 		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
330 	}
331 
332 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
333 	    gfp_mask & __GFP_NORETRY) {
334 		*data_mode = DATA_MODE_GET_FREE_PAGES;
335 		return (void *)__get_free_pages(gfp_mask,
336 						c->pages_per_block_bits);
337 	}
338 
339 	*data_mode = DATA_MODE_VMALLOC;
340 
341 	/*
342 	 * __vmalloc allocates the data pages and auxiliary structures with
343 	 * gfp_flags that were specified, but pagetables are always allocated
344 	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
345 	 *
346 	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
347 	 * all allocations done by this process (including pagetables) are done
348 	 * as if GFP_NOIO was specified.
349 	 */
350 
351 	if (gfp_mask & __GFP_NORETRY) {
352 		noio_flag = current->flags & PF_MEMALLOC;
353 		current->flags |= PF_MEMALLOC;
354 	}
355 
356 	ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
357 
358 	if (gfp_mask & __GFP_NORETRY)
359 		current->flags = (current->flags & ~PF_MEMALLOC) | noio_flag;
360 
361 	return ptr;
362 }
363 
364 /*
365  * Free buffer's data.
366  */
free_buffer_data(struct dm_bufio_client * c,void * data,enum data_mode data_mode)367 static void free_buffer_data(struct dm_bufio_client *c,
368 			     void *data, enum data_mode data_mode)
369 {
370 	switch (data_mode) {
371 	case DATA_MODE_SLAB:
372 		kmem_cache_free(DM_BUFIO_CACHE(c), data);
373 		break;
374 
375 	case DATA_MODE_GET_FREE_PAGES:
376 		free_pages((unsigned long)data, c->pages_per_block_bits);
377 		break;
378 
379 	case DATA_MODE_VMALLOC:
380 		vfree(data);
381 		break;
382 
383 	default:
384 		DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
385 		       data_mode);
386 		BUG();
387 	}
388 }
389 
390 /*
391  * Allocate buffer and its data.
392  */
alloc_buffer(struct dm_bufio_client * c,gfp_t gfp_mask)393 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
394 {
395 	struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
396 				      gfp_mask);
397 
398 	if (!b)
399 		return NULL;
400 
401 	b->c = c;
402 
403 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
404 	if (!b->data) {
405 		kfree(b);
406 		return NULL;
407 	}
408 
409 	adjust_total_allocated(b->data_mode, (long)c->block_size);
410 
411 	return b;
412 }
413 
414 /*
415  * Free buffer and its data.
416  */
free_buffer(struct dm_buffer * b)417 static void free_buffer(struct dm_buffer *b)
418 {
419 	struct dm_bufio_client *c = b->c;
420 
421 	adjust_total_allocated(b->data_mode, -(long)c->block_size);
422 
423 	free_buffer_data(c, b->data, b->data_mode);
424 	kfree(b);
425 }
426 
427 /*
428  * Link buffer to the hash list and clean or dirty queue.
429  */
__link_buffer(struct dm_buffer * b,sector_t block,int dirty)430 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
431 {
432 	struct dm_bufio_client *c = b->c;
433 
434 	c->n_buffers[dirty]++;
435 	b->block = block;
436 	b->list_mode = dirty;
437 	list_add(&b->lru_list, &c->lru[dirty]);
438 	hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
439 	b->last_accessed = jiffies;
440 }
441 
442 /*
443  * Unlink buffer from the hash list and dirty or clean queue.
444  */
__unlink_buffer(struct dm_buffer * b)445 static void __unlink_buffer(struct dm_buffer *b)
446 {
447 	struct dm_bufio_client *c = b->c;
448 
449 	BUG_ON(!c->n_buffers[b->list_mode]);
450 
451 	c->n_buffers[b->list_mode]--;
452 	hlist_del(&b->hash_list);
453 	list_del(&b->lru_list);
454 }
455 
456 /*
457  * Place the buffer to the head of dirty or clean LRU queue.
458  */
__relink_lru(struct dm_buffer * b,int dirty)459 static void __relink_lru(struct dm_buffer *b, int dirty)
460 {
461 	struct dm_bufio_client *c = b->c;
462 
463 	BUG_ON(!c->n_buffers[b->list_mode]);
464 
465 	c->n_buffers[b->list_mode]--;
466 	c->n_buffers[dirty]++;
467 	b->list_mode = dirty;
468 	list_del(&b->lru_list);
469 	list_add(&b->lru_list, &c->lru[dirty]);
470 }
471 
472 /*----------------------------------------------------------------
473  * Submit I/O on the buffer.
474  *
475  * Bio interface is faster but it has some problems:
476  *	the vector list is limited (increasing this limit increases
477  *	memory-consumption per buffer, so it is not viable);
478  *
479  *	the memory must be direct-mapped, not vmalloced;
480  *
481  *	the I/O driver can reject requests spuriously if it thinks that
482  *	the requests are too big for the device or if they cross a
483  *	controller-defined memory boundary.
484  *
485  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
486  * it is not vmalloced, try using the bio interface.
487  *
488  * If the buffer is big, if it is vmalloced or if the underlying device
489  * rejects the bio because it is too large, use dm-io layer to do the I/O.
490  * The dm-io layer splits the I/O into multiple requests, avoiding the above
491  * shortcomings.
492  *--------------------------------------------------------------*/
493 
494 /*
495  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
496  * that the request was handled directly with bio interface.
497  */
dmio_complete(unsigned long error,void * context)498 static void dmio_complete(unsigned long error, void *context)
499 {
500 	struct dm_buffer *b = context;
501 
502 	b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
503 }
504 
use_dmio(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)505 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
506 		     bio_end_io_t *end_io)
507 {
508 	int r;
509 	struct dm_io_request io_req = {
510 		.bi_rw = rw,
511 		.notify.fn = dmio_complete,
512 		.notify.context = b,
513 		.client = b->c->dm_io,
514 	};
515 	struct dm_io_region region = {
516 		.bdev = b->c->bdev,
517 		.sector = block << b->c->sectors_per_block_bits,
518 		.count = b->c->block_size >> SECTOR_SHIFT,
519 	};
520 
521 	if (b->data_mode != DATA_MODE_VMALLOC) {
522 		io_req.mem.type = DM_IO_KMEM;
523 		io_req.mem.ptr.addr = b->data;
524 	} else {
525 		io_req.mem.type = DM_IO_VMA;
526 		io_req.mem.ptr.vma = b->data;
527 	}
528 
529 	b->bio.bi_end_io = end_io;
530 
531 	r = dm_io(&io_req, 1, &region, NULL);
532 	if (r)
533 		end_io(&b->bio, r);
534 }
535 
use_inline_bio(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)536 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
537 			   bio_end_io_t *end_io)
538 {
539 	char *ptr;
540 	int len;
541 
542 	bio_init(&b->bio);
543 	b->bio.bi_io_vec = b->bio_vec;
544 	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
545 	b->bio.bi_sector = block << b->c->sectors_per_block_bits;
546 	b->bio.bi_bdev = b->c->bdev;
547 	b->bio.bi_end_io = end_io;
548 
549 	/*
550 	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
551 	 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
552 	 */
553 	ptr = b->data;
554 	len = b->c->block_size;
555 
556 	if (len >= PAGE_SIZE)
557 		BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
558 	else
559 		BUG_ON((unsigned long)ptr & (len - 1));
560 
561 	do {
562 		if (!bio_add_page(&b->bio, virt_to_page(ptr),
563 				  len < PAGE_SIZE ? len : PAGE_SIZE,
564 				  virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
565 			BUG_ON(b->c->block_size <= PAGE_SIZE);
566 			use_dmio(b, rw, block, end_io);
567 			return;
568 		}
569 
570 		len -= PAGE_SIZE;
571 		ptr += PAGE_SIZE;
572 	} while (len > 0);
573 
574 	submit_bio(rw, &b->bio);
575 }
576 
submit_io(struct dm_buffer * b,int rw,sector_t block,bio_end_io_t * end_io)577 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
578 		      bio_end_io_t *end_io)
579 {
580 	if (rw == WRITE && b->c->write_callback)
581 		b->c->write_callback(b);
582 
583 	if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
584 	    b->data_mode != DATA_MODE_VMALLOC)
585 		use_inline_bio(b, rw, block, end_io);
586 	else
587 		use_dmio(b, rw, block, end_io);
588 }
589 
590 /*----------------------------------------------------------------
591  * Writing dirty buffers
592  *--------------------------------------------------------------*/
593 
594 /*
595  * The endio routine for write.
596  *
597  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
598  * it.
599  */
write_endio(struct bio * bio,int error)600 static void write_endio(struct bio *bio, int error)
601 {
602 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
603 
604 	b->write_error = error;
605 	if (unlikely(error)) {
606 		struct dm_bufio_client *c = b->c;
607 		(void)cmpxchg(&c->async_write_error, 0, error);
608 	}
609 
610 	BUG_ON(!test_bit(B_WRITING, &b->state));
611 
612 	smp_mb__before_clear_bit();
613 	clear_bit(B_WRITING, &b->state);
614 	smp_mb__after_clear_bit();
615 
616 	wake_up_bit(&b->state, B_WRITING);
617 }
618 
619 /*
620  * This function is called when wait_on_bit is actually waiting.
621  */
do_io_schedule(void * word)622 static int do_io_schedule(void *word)
623 {
624 	io_schedule();
625 
626 	return 0;
627 }
628 
629 /*
630  * Initiate a write on a dirty buffer, but don't wait for it.
631  *
632  * - If the buffer is not dirty, exit.
633  * - If there some previous write going on, wait for it to finish (we can't
634  *   have two writes on the same buffer simultaneously).
635  * - Submit our write and don't wait on it. We set B_WRITING indicating
636  *   that there is a write in progress.
637  */
__write_dirty_buffer(struct dm_buffer * b)638 static void __write_dirty_buffer(struct dm_buffer *b)
639 {
640 	if (!test_bit(B_DIRTY, &b->state))
641 		return;
642 
643 	clear_bit(B_DIRTY, &b->state);
644 	wait_on_bit_lock(&b->state, B_WRITING,
645 			 do_io_schedule, TASK_UNINTERRUPTIBLE);
646 
647 	submit_io(b, WRITE, b->block, write_endio);
648 }
649 
650 /*
651  * Wait until any activity on the buffer finishes.  Possibly write the
652  * buffer if it is dirty.  When this function finishes, there is no I/O
653  * running on the buffer and the buffer is not dirty.
654  */
__make_buffer_clean(struct dm_buffer * b)655 static void __make_buffer_clean(struct dm_buffer *b)
656 {
657 	BUG_ON(b->hold_count);
658 
659 	if (!b->state)	/* fast case */
660 		return;
661 
662 	wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
663 	__write_dirty_buffer(b);
664 	wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
665 }
666 
667 /*
668  * Find some buffer that is not held by anybody, clean it, unlink it and
669  * return it.
670  */
__get_unclaimed_buffer(struct dm_bufio_client * c)671 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
672 {
673 	struct dm_buffer *b;
674 
675 	list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
676 		BUG_ON(test_bit(B_WRITING, &b->state));
677 		BUG_ON(test_bit(B_DIRTY, &b->state));
678 
679 		if (!b->hold_count) {
680 			__make_buffer_clean(b);
681 			__unlink_buffer(b);
682 			return b;
683 		}
684 		dm_bufio_cond_resched();
685 	}
686 
687 	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
688 		BUG_ON(test_bit(B_READING, &b->state));
689 
690 		if (!b->hold_count) {
691 			__make_buffer_clean(b);
692 			__unlink_buffer(b);
693 			return b;
694 		}
695 		dm_bufio_cond_resched();
696 	}
697 
698 	return NULL;
699 }
700 
701 /*
702  * Wait until some other threads free some buffer or release hold count on
703  * some buffer.
704  *
705  * This function is entered with c->lock held, drops it and regains it
706  * before exiting.
707  */
__wait_for_free_buffer(struct dm_bufio_client * c)708 static void __wait_for_free_buffer(struct dm_bufio_client *c)
709 {
710 	DECLARE_WAITQUEUE(wait, current);
711 
712 	add_wait_queue(&c->free_buffer_wait, &wait);
713 	set_task_state(current, TASK_UNINTERRUPTIBLE);
714 	dm_bufio_unlock(c);
715 
716 	io_schedule();
717 
718 	set_task_state(current, TASK_RUNNING);
719 	remove_wait_queue(&c->free_buffer_wait, &wait);
720 
721 	dm_bufio_lock(c);
722 }
723 
724 enum new_flag {
725 	NF_FRESH = 0,
726 	NF_READ = 1,
727 	NF_GET = 2,
728 	NF_PREFETCH = 3
729 };
730 
731 /*
732  * Allocate a new buffer. If the allocation is not possible, wait until
733  * some other thread frees a buffer.
734  *
735  * May drop the lock and regain it.
736  */
__alloc_buffer_wait_no_callback(struct dm_bufio_client * c,enum new_flag nf)737 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
738 {
739 	struct dm_buffer *b;
740 
741 	/*
742 	 * dm-bufio is resistant to allocation failures (it just keeps
743 	 * one buffer reserved in cases all the allocations fail).
744 	 * So set flags to not try too hard:
745 	 *	GFP_NOIO: don't recurse into the I/O layer
746 	 *	__GFP_NORETRY: don't retry and rather return failure
747 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
748 	 *	__GFP_NOWARN: don't print a warning in case of failure
749 	 *
750 	 * For debugging, if we set the cache size to 1, no new buffers will
751 	 * be allocated.
752 	 */
753 	while (1) {
754 		if (dm_bufio_cache_size_latch != 1) {
755 			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
756 			if (b)
757 				return b;
758 		}
759 
760 		if (nf == NF_PREFETCH)
761 			return NULL;
762 
763 		if (!list_empty(&c->reserved_buffers)) {
764 			b = list_entry(c->reserved_buffers.next,
765 				       struct dm_buffer, lru_list);
766 			list_del(&b->lru_list);
767 			c->need_reserved_buffers++;
768 
769 			return b;
770 		}
771 
772 		b = __get_unclaimed_buffer(c);
773 		if (b)
774 			return b;
775 
776 		__wait_for_free_buffer(c);
777 	}
778 }
779 
__alloc_buffer_wait(struct dm_bufio_client * c,enum new_flag nf)780 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
781 {
782 	struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
783 
784 	if (!b)
785 		return NULL;
786 
787 	if (c->alloc_callback)
788 		c->alloc_callback(b);
789 
790 	return b;
791 }
792 
793 /*
794  * Free a buffer and wake other threads waiting for free buffers.
795  */
__free_buffer_wake(struct dm_buffer * b)796 static void __free_buffer_wake(struct dm_buffer *b)
797 {
798 	struct dm_bufio_client *c = b->c;
799 
800 	if (!c->need_reserved_buffers)
801 		free_buffer(b);
802 	else {
803 		list_add(&b->lru_list, &c->reserved_buffers);
804 		c->need_reserved_buffers--;
805 	}
806 
807 	wake_up(&c->free_buffer_wait);
808 }
809 
__write_dirty_buffers_async(struct dm_bufio_client * c,int no_wait)810 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
811 {
812 	struct dm_buffer *b, *tmp;
813 
814 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
815 		BUG_ON(test_bit(B_READING, &b->state));
816 
817 		if (!test_bit(B_DIRTY, &b->state) &&
818 		    !test_bit(B_WRITING, &b->state)) {
819 			__relink_lru(b, LIST_CLEAN);
820 			continue;
821 		}
822 
823 		if (no_wait && test_bit(B_WRITING, &b->state))
824 			return;
825 
826 		__write_dirty_buffer(b);
827 		dm_bufio_cond_resched();
828 	}
829 }
830 
831 /*
832  * Get writeback threshold and buffer limit for a given client.
833  */
__get_memory_limit(struct dm_bufio_client * c,unsigned long * threshold_buffers,unsigned long * limit_buffers)834 static void __get_memory_limit(struct dm_bufio_client *c,
835 			       unsigned long *threshold_buffers,
836 			       unsigned long *limit_buffers)
837 {
838 	unsigned long buffers;
839 
840 	if (dm_bufio_cache_size != dm_bufio_cache_size_latch) {
841 		mutex_lock(&dm_bufio_clients_lock);
842 		__cache_size_refresh();
843 		mutex_unlock(&dm_bufio_clients_lock);
844 	}
845 
846 	buffers = dm_bufio_cache_size_per_client >>
847 		  (c->sectors_per_block_bits + SECTOR_SHIFT);
848 
849 	if (buffers < DM_BUFIO_MIN_BUFFERS)
850 		buffers = DM_BUFIO_MIN_BUFFERS;
851 
852 	*limit_buffers = buffers;
853 	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
854 }
855 
856 /*
857  * Check if we're over watermark.
858  * If we are over threshold_buffers, start freeing buffers.
859  * If we're over "limit_buffers", block until we get under the limit.
860  */
__check_watermark(struct dm_bufio_client * c)861 static void __check_watermark(struct dm_bufio_client *c)
862 {
863 	unsigned long threshold_buffers, limit_buffers;
864 
865 	__get_memory_limit(c, &threshold_buffers, &limit_buffers);
866 
867 	while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
868 	       limit_buffers) {
869 
870 		struct dm_buffer *b = __get_unclaimed_buffer(c);
871 
872 		if (!b)
873 			return;
874 
875 		__free_buffer_wake(b);
876 		dm_bufio_cond_resched();
877 	}
878 
879 	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
880 		__write_dirty_buffers_async(c, 1);
881 }
882 
883 /*
884  * Find a buffer in the hash.
885  */
__find(struct dm_bufio_client * c,sector_t block)886 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
887 {
888 	struct dm_buffer *b;
889 	struct hlist_node *hn;
890 
891 	hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
892 			     hash_list) {
893 		dm_bufio_cond_resched();
894 		if (b->block == block)
895 			return b;
896 	}
897 
898 	return NULL;
899 }
900 
901 /*----------------------------------------------------------------
902  * Getting a buffer
903  *--------------------------------------------------------------*/
904 
__bufio_new(struct dm_bufio_client * c,sector_t block,enum new_flag nf,int * need_submit)905 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
906 				     enum new_flag nf, int *need_submit)
907 {
908 	struct dm_buffer *b, *new_b = NULL;
909 
910 	*need_submit = 0;
911 
912 	b = __find(c, block);
913 	if (b)
914 		goto found_buffer;
915 
916 	if (nf == NF_GET)
917 		return NULL;
918 
919 	new_b = __alloc_buffer_wait(c, nf);
920 	if (!new_b)
921 		return NULL;
922 
923 	/*
924 	 * We've had a period where the mutex was unlocked, so need to
925 	 * recheck the hash table.
926 	 */
927 	b = __find(c, block);
928 	if (b) {
929 		__free_buffer_wake(new_b);
930 		goto found_buffer;
931 	}
932 
933 	__check_watermark(c);
934 
935 	b = new_b;
936 	b->hold_count = 1;
937 	b->read_error = 0;
938 	b->write_error = 0;
939 	__link_buffer(b, block, LIST_CLEAN);
940 
941 	if (nf == NF_FRESH) {
942 		b->state = 0;
943 		return b;
944 	}
945 
946 	b->state = 1 << B_READING;
947 	*need_submit = 1;
948 
949 	return b;
950 
951 found_buffer:
952 	if (nf == NF_PREFETCH)
953 		return NULL;
954 	/*
955 	 * Note: it is essential that we don't wait for the buffer to be
956 	 * read if dm_bufio_get function is used. Both dm_bufio_get and
957 	 * dm_bufio_prefetch can be used in the driver request routine.
958 	 * If the user called both dm_bufio_prefetch and dm_bufio_get on
959 	 * the same buffer, it would deadlock if we waited.
960 	 */
961 	if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
962 		return NULL;
963 
964 	b->hold_count++;
965 	__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
966 		     test_bit(B_WRITING, &b->state));
967 	return b;
968 }
969 
970 /*
971  * The endio routine for reading: set the error, clear the bit and wake up
972  * anyone waiting on the buffer.
973  */
read_endio(struct bio * bio,int error)974 static void read_endio(struct bio *bio, int error)
975 {
976 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
977 
978 	b->read_error = error;
979 
980 	BUG_ON(!test_bit(B_READING, &b->state));
981 
982 	smp_mb__before_clear_bit();
983 	clear_bit(B_READING, &b->state);
984 	smp_mb__after_clear_bit();
985 
986 	wake_up_bit(&b->state, B_READING);
987 }
988 
989 /*
990  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
991  * functions is similar except that dm_bufio_new doesn't read the
992  * buffer from the disk (assuming that the caller overwrites all the data
993  * and uses dm_bufio_mark_buffer_dirty to write new data back).
994  */
new_read(struct dm_bufio_client * c,sector_t block,enum new_flag nf,struct dm_buffer ** bp)995 static void *new_read(struct dm_bufio_client *c, sector_t block,
996 		      enum new_flag nf, struct dm_buffer **bp)
997 {
998 	int need_submit;
999 	struct dm_buffer *b;
1000 
1001 	dm_bufio_lock(c);
1002 	b = __bufio_new(c, block, nf, &need_submit);
1003 	dm_bufio_unlock(c);
1004 
1005 	if (!b)
1006 		return b;
1007 
1008 	if (need_submit)
1009 		submit_io(b, READ, b->block, read_endio);
1010 
1011 	wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
1012 
1013 	if (b->read_error) {
1014 		int error = b->read_error;
1015 
1016 		dm_bufio_release(b);
1017 
1018 		return ERR_PTR(error);
1019 	}
1020 
1021 	*bp = b;
1022 
1023 	return b->data;
1024 }
1025 
dm_bufio_get(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1026 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1027 		   struct dm_buffer **bp)
1028 {
1029 	return new_read(c, block, NF_GET, bp);
1030 }
1031 EXPORT_SYMBOL_GPL(dm_bufio_get);
1032 
dm_bufio_read(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1033 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1034 		    struct dm_buffer **bp)
1035 {
1036 	BUG_ON(dm_bufio_in_request());
1037 
1038 	return new_read(c, block, NF_READ, bp);
1039 }
1040 EXPORT_SYMBOL_GPL(dm_bufio_read);
1041 
dm_bufio_new(struct dm_bufio_client * c,sector_t block,struct dm_buffer ** bp)1042 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1043 		   struct dm_buffer **bp)
1044 {
1045 	BUG_ON(dm_bufio_in_request());
1046 
1047 	return new_read(c, block, NF_FRESH, bp);
1048 }
1049 EXPORT_SYMBOL_GPL(dm_bufio_new);
1050 
dm_bufio_prefetch(struct dm_bufio_client * c,sector_t block,unsigned n_blocks)1051 void dm_bufio_prefetch(struct dm_bufio_client *c,
1052 		       sector_t block, unsigned n_blocks)
1053 {
1054 	struct blk_plug plug;
1055 
1056 	blk_start_plug(&plug);
1057 	dm_bufio_lock(c);
1058 
1059 	for (; n_blocks--; block++) {
1060 		int need_submit;
1061 		struct dm_buffer *b;
1062 		b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
1063 		if (unlikely(b != NULL)) {
1064 			dm_bufio_unlock(c);
1065 
1066 			if (need_submit)
1067 				submit_io(b, READ, b->block, read_endio);
1068 			dm_bufio_release(b);
1069 
1070 			dm_bufio_cond_resched();
1071 
1072 			if (!n_blocks)
1073 				goto flush_plug;
1074 			dm_bufio_lock(c);
1075 		}
1076 
1077 	}
1078 
1079 	dm_bufio_unlock(c);
1080 
1081 flush_plug:
1082 	blk_finish_plug(&plug);
1083 }
1084 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1085 
dm_bufio_release(struct dm_buffer * b)1086 void dm_bufio_release(struct dm_buffer *b)
1087 {
1088 	struct dm_bufio_client *c = b->c;
1089 
1090 	dm_bufio_lock(c);
1091 
1092 	BUG_ON(!b->hold_count);
1093 
1094 	b->hold_count--;
1095 	if (!b->hold_count) {
1096 		wake_up(&c->free_buffer_wait);
1097 
1098 		/*
1099 		 * If there were errors on the buffer, and the buffer is not
1100 		 * to be written, free the buffer. There is no point in caching
1101 		 * invalid buffer.
1102 		 */
1103 		if ((b->read_error || b->write_error) &&
1104 		    !test_bit(B_READING, &b->state) &&
1105 		    !test_bit(B_WRITING, &b->state) &&
1106 		    !test_bit(B_DIRTY, &b->state)) {
1107 			__unlink_buffer(b);
1108 			__free_buffer_wake(b);
1109 		}
1110 	}
1111 
1112 	dm_bufio_unlock(c);
1113 }
1114 EXPORT_SYMBOL_GPL(dm_bufio_release);
1115 
dm_bufio_mark_buffer_dirty(struct dm_buffer * b)1116 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1117 {
1118 	struct dm_bufio_client *c = b->c;
1119 
1120 	dm_bufio_lock(c);
1121 
1122 	BUG_ON(test_bit(B_READING, &b->state));
1123 
1124 	if (!test_and_set_bit(B_DIRTY, &b->state))
1125 		__relink_lru(b, LIST_DIRTY);
1126 
1127 	dm_bufio_unlock(c);
1128 }
1129 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1130 
dm_bufio_write_dirty_buffers_async(struct dm_bufio_client * c)1131 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1132 {
1133 	BUG_ON(dm_bufio_in_request());
1134 
1135 	dm_bufio_lock(c);
1136 	__write_dirty_buffers_async(c, 0);
1137 	dm_bufio_unlock(c);
1138 }
1139 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1140 
1141 /*
1142  * For performance, it is essential that the buffers are written asynchronously
1143  * and simultaneously (so that the block layer can merge the writes) and then
1144  * waited upon.
1145  *
1146  * Finally, we flush hardware disk cache.
1147  */
dm_bufio_write_dirty_buffers(struct dm_bufio_client * c)1148 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1149 {
1150 	int a, f;
1151 	unsigned long buffers_processed = 0;
1152 	struct dm_buffer *b, *tmp;
1153 
1154 	dm_bufio_lock(c);
1155 	__write_dirty_buffers_async(c, 0);
1156 
1157 again:
1158 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1159 		int dropped_lock = 0;
1160 
1161 		if (buffers_processed < c->n_buffers[LIST_DIRTY])
1162 			buffers_processed++;
1163 
1164 		BUG_ON(test_bit(B_READING, &b->state));
1165 
1166 		if (test_bit(B_WRITING, &b->state)) {
1167 			if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1168 				dropped_lock = 1;
1169 				b->hold_count++;
1170 				dm_bufio_unlock(c);
1171 				wait_on_bit(&b->state, B_WRITING,
1172 					    do_io_schedule,
1173 					    TASK_UNINTERRUPTIBLE);
1174 				dm_bufio_lock(c);
1175 				b->hold_count--;
1176 			} else
1177 				wait_on_bit(&b->state, B_WRITING,
1178 					    do_io_schedule,
1179 					    TASK_UNINTERRUPTIBLE);
1180 		}
1181 
1182 		if (!test_bit(B_DIRTY, &b->state) &&
1183 		    !test_bit(B_WRITING, &b->state))
1184 			__relink_lru(b, LIST_CLEAN);
1185 
1186 		dm_bufio_cond_resched();
1187 
1188 		/*
1189 		 * If we dropped the lock, the list is no longer consistent,
1190 		 * so we must restart the search.
1191 		 *
1192 		 * In the most common case, the buffer just processed is
1193 		 * relinked to the clean list, so we won't loop scanning the
1194 		 * same buffer again and again.
1195 		 *
1196 		 * This may livelock if there is another thread simultaneously
1197 		 * dirtying buffers, so we count the number of buffers walked
1198 		 * and if it exceeds the total number of buffers, it means that
1199 		 * someone is doing some writes simultaneously with us.  In
1200 		 * this case, stop, dropping the lock.
1201 		 */
1202 		if (dropped_lock)
1203 			goto again;
1204 	}
1205 	wake_up(&c->free_buffer_wait);
1206 	dm_bufio_unlock(c);
1207 
1208 	a = xchg(&c->async_write_error, 0);
1209 	f = dm_bufio_issue_flush(c);
1210 	if (a)
1211 		return a;
1212 
1213 	return f;
1214 }
1215 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1216 
1217 /*
1218  * Use dm-io to send and empty barrier flush the device.
1219  */
dm_bufio_issue_flush(struct dm_bufio_client * c)1220 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1221 {
1222 	struct dm_io_request io_req = {
1223 		.bi_rw = REQ_FLUSH,
1224 		.mem.type = DM_IO_KMEM,
1225 		.mem.ptr.addr = NULL,
1226 		.client = c->dm_io,
1227 	};
1228 	struct dm_io_region io_reg = {
1229 		.bdev = c->bdev,
1230 		.sector = 0,
1231 		.count = 0,
1232 	};
1233 
1234 	BUG_ON(dm_bufio_in_request());
1235 
1236 	return dm_io(&io_req, 1, &io_reg, NULL);
1237 }
1238 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1239 
1240 /*
1241  * We first delete any other buffer that may be at that new location.
1242  *
1243  * Then, we write the buffer to the original location if it was dirty.
1244  *
1245  * Then, if we are the only one who is holding the buffer, relink the buffer
1246  * in the hash queue for the new location.
1247  *
1248  * If there was someone else holding the buffer, we write it to the new
1249  * location but not relink it, because that other user needs to have the buffer
1250  * at the same place.
1251  */
dm_bufio_release_move(struct dm_buffer * b,sector_t new_block)1252 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1253 {
1254 	struct dm_bufio_client *c = b->c;
1255 	struct dm_buffer *new;
1256 
1257 	BUG_ON(dm_bufio_in_request());
1258 
1259 	dm_bufio_lock(c);
1260 
1261 retry:
1262 	new = __find(c, new_block);
1263 	if (new) {
1264 		if (new->hold_count) {
1265 			__wait_for_free_buffer(c);
1266 			goto retry;
1267 		}
1268 
1269 		/*
1270 		 * FIXME: Is there any point waiting for a write that's going
1271 		 * to be overwritten in a bit?
1272 		 */
1273 		__make_buffer_clean(new);
1274 		__unlink_buffer(new);
1275 		__free_buffer_wake(new);
1276 	}
1277 
1278 	BUG_ON(!b->hold_count);
1279 	BUG_ON(test_bit(B_READING, &b->state));
1280 
1281 	__write_dirty_buffer(b);
1282 	if (b->hold_count == 1) {
1283 		wait_on_bit(&b->state, B_WRITING,
1284 			    do_io_schedule, TASK_UNINTERRUPTIBLE);
1285 		set_bit(B_DIRTY, &b->state);
1286 		__unlink_buffer(b);
1287 		__link_buffer(b, new_block, LIST_DIRTY);
1288 	} else {
1289 		sector_t old_block;
1290 		wait_on_bit_lock(&b->state, B_WRITING,
1291 				 do_io_schedule, TASK_UNINTERRUPTIBLE);
1292 		/*
1293 		 * Relink buffer to "new_block" so that write_callback
1294 		 * sees "new_block" as a block number.
1295 		 * After the write, link the buffer back to old_block.
1296 		 * All this must be done in bufio lock, so that block number
1297 		 * change isn't visible to other threads.
1298 		 */
1299 		old_block = b->block;
1300 		__unlink_buffer(b);
1301 		__link_buffer(b, new_block, b->list_mode);
1302 		submit_io(b, WRITE, new_block, write_endio);
1303 		wait_on_bit(&b->state, B_WRITING,
1304 			    do_io_schedule, TASK_UNINTERRUPTIBLE);
1305 		__unlink_buffer(b);
1306 		__link_buffer(b, old_block, b->list_mode);
1307 	}
1308 
1309 	dm_bufio_unlock(c);
1310 	dm_bufio_release(b);
1311 }
1312 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1313 
dm_bufio_get_block_size(struct dm_bufio_client * c)1314 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1315 {
1316 	return c->block_size;
1317 }
1318 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1319 
dm_bufio_get_device_size(struct dm_bufio_client * c)1320 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1321 {
1322 	return i_size_read(c->bdev->bd_inode) >>
1323 			   (SECTOR_SHIFT + c->sectors_per_block_bits);
1324 }
1325 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1326 
dm_bufio_get_block_number(struct dm_buffer * b)1327 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1328 {
1329 	return b->block;
1330 }
1331 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1332 
dm_bufio_get_block_data(struct dm_buffer * b)1333 void *dm_bufio_get_block_data(struct dm_buffer *b)
1334 {
1335 	return b->data;
1336 }
1337 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1338 
dm_bufio_get_aux_data(struct dm_buffer * b)1339 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1340 {
1341 	return b + 1;
1342 }
1343 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1344 
dm_bufio_get_client(struct dm_buffer * b)1345 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1346 {
1347 	return b->c;
1348 }
1349 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1350 
drop_buffers(struct dm_bufio_client * c)1351 static void drop_buffers(struct dm_bufio_client *c)
1352 {
1353 	struct dm_buffer *b;
1354 	int i;
1355 
1356 	BUG_ON(dm_bufio_in_request());
1357 
1358 	/*
1359 	 * An optimization so that the buffers are not written one-by-one.
1360 	 */
1361 	dm_bufio_write_dirty_buffers_async(c);
1362 
1363 	dm_bufio_lock(c);
1364 
1365 	while ((b = __get_unclaimed_buffer(c)))
1366 		__free_buffer_wake(b);
1367 
1368 	for (i = 0; i < LIST_SIZE; i++)
1369 		list_for_each_entry(b, &c->lru[i], lru_list)
1370 			DMERR("leaked buffer %llx, hold count %u, list %d",
1371 			      (unsigned long long)b->block, b->hold_count, i);
1372 
1373 	for (i = 0; i < LIST_SIZE; i++)
1374 		BUG_ON(!list_empty(&c->lru[i]));
1375 
1376 	dm_bufio_unlock(c);
1377 }
1378 
1379 /*
1380  * Test if the buffer is unused and too old, and commit it.
1381  * At if noio is set, we must not do any I/O because we hold
1382  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1383  * different bufio client.
1384  */
__cleanup_old_buffer(struct dm_buffer * b,gfp_t gfp,unsigned long max_jiffies)1385 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1386 				unsigned long max_jiffies)
1387 {
1388 	if (jiffies - b->last_accessed < max_jiffies)
1389 		return 1;
1390 
1391 	if (!(gfp & __GFP_IO)) {
1392 		if (test_bit(B_READING, &b->state) ||
1393 		    test_bit(B_WRITING, &b->state) ||
1394 		    test_bit(B_DIRTY, &b->state))
1395 			return 1;
1396 	}
1397 
1398 	if (b->hold_count)
1399 		return 1;
1400 
1401 	__make_buffer_clean(b);
1402 	__unlink_buffer(b);
1403 	__free_buffer_wake(b);
1404 
1405 	return 0;
1406 }
1407 
__scan(struct dm_bufio_client * c,unsigned long nr_to_scan,struct shrink_control * sc)1408 static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1409 		   struct shrink_control *sc)
1410 {
1411 	int l;
1412 	struct dm_buffer *b, *tmp;
1413 
1414 	for (l = 0; l < LIST_SIZE; l++) {
1415 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
1416 			if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1417 			    !--nr_to_scan)
1418 				return;
1419 		dm_bufio_cond_resched();
1420 	}
1421 }
1422 
shrink(struct shrinker * shrinker,struct shrink_control * sc)1423 static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1424 {
1425 	struct dm_bufio_client *c =
1426 	    container_of(shrinker, struct dm_bufio_client, shrinker);
1427 	unsigned long r;
1428 	unsigned long nr_to_scan = sc->nr_to_scan;
1429 
1430 	if (sc->gfp_mask & __GFP_IO)
1431 		dm_bufio_lock(c);
1432 	else if (!dm_bufio_trylock(c))
1433 		return !nr_to_scan ? 0 : -1;
1434 
1435 	if (nr_to_scan)
1436 		__scan(c, nr_to_scan, sc);
1437 
1438 	r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1439 	if (r > INT_MAX)
1440 		r = INT_MAX;
1441 
1442 	dm_bufio_unlock(c);
1443 
1444 	return r;
1445 }
1446 
1447 /*
1448  * Create the buffering interface
1449  */
dm_bufio_client_create(struct block_device * bdev,unsigned block_size,unsigned reserved_buffers,unsigned aux_size,void (* alloc_callback)(struct dm_buffer *),void (* write_callback)(struct dm_buffer *))1450 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1451 					       unsigned reserved_buffers, unsigned aux_size,
1452 					       void (*alloc_callback)(struct dm_buffer *),
1453 					       void (*write_callback)(struct dm_buffer *))
1454 {
1455 	int r;
1456 	struct dm_bufio_client *c;
1457 	unsigned i;
1458 
1459 	BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1460 	       (block_size & (block_size - 1)));
1461 
1462 	c = kmalloc(sizeof(*c), GFP_KERNEL);
1463 	if (!c) {
1464 		r = -ENOMEM;
1465 		goto bad_client;
1466 	}
1467 	c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1468 	if (!c->cache_hash) {
1469 		r = -ENOMEM;
1470 		goto bad_hash;
1471 	}
1472 
1473 	c->bdev = bdev;
1474 	c->block_size = block_size;
1475 	c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1476 	c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1477 				  ffs(block_size) - 1 - PAGE_SHIFT : 0;
1478 	c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1479 				  PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1480 
1481 	c->aux_size = aux_size;
1482 	c->alloc_callback = alloc_callback;
1483 	c->write_callback = write_callback;
1484 
1485 	for (i = 0; i < LIST_SIZE; i++) {
1486 		INIT_LIST_HEAD(&c->lru[i]);
1487 		c->n_buffers[i] = 0;
1488 	}
1489 
1490 	for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1491 		INIT_HLIST_HEAD(&c->cache_hash[i]);
1492 
1493 	mutex_init(&c->lock);
1494 	INIT_LIST_HEAD(&c->reserved_buffers);
1495 	c->need_reserved_buffers = reserved_buffers;
1496 
1497 	init_waitqueue_head(&c->free_buffer_wait);
1498 	c->async_write_error = 0;
1499 
1500 	c->dm_io = dm_io_client_create();
1501 	if (IS_ERR(c->dm_io)) {
1502 		r = PTR_ERR(c->dm_io);
1503 		goto bad_dm_io;
1504 	}
1505 
1506 	mutex_lock(&dm_bufio_clients_lock);
1507 	if (c->blocks_per_page_bits) {
1508 		if (!DM_BUFIO_CACHE_NAME(c)) {
1509 			DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1510 			if (!DM_BUFIO_CACHE_NAME(c)) {
1511 				r = -ENOMEM;
1512 				mutex_unlock(&dm_bufio_clients_lock);
1513 				goto bad_cache;
1514 			}
1515 		}
1516 
1517 		if (!DM_BUFIO_CACHE(c)) {
1518 			DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1519 							      c->block_size,
1520 							      c->block_size, 0, NULL);
1521 			if (!DM_BUFIO_CACHE(c)) {
1522 				r = -ENOMEM;
1523 				mutex_unlock(&dm_bufio_clients_lock);
1524 				goto bad_cache;
1525 			}
1526 		}
1527 	}
1528 	mutex_unlock(&dm_bufio_clients_lock);
1529 
1530 	while (c->need_reserved_buffers) {
1531 		struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1532 
1533 		if (!b) {
1534 			r = -ENOMEM;
1535 			goto bad_buffer;
1536 		}
1537 		__free_buffer_wake(b);
1538 	}
1539 
1540 	mutex_lock(&dm_bufio_clients_lock);
1541 	dm_bufio_client_count++;
1542 	list_add(&c->client_list, &dm_bufio_all_clients);
1543 	__cache_size_refresh();
1544 	mutex_unlock(&dm_bufio_clients_lock);
1545 
1546 	c->shrinker.shrink = shrink;
1547 	c->shrinker.seeks = 1;
1548 	c->shrinker.batch = 0;
1549 	register_shrinker(&c->shrinker);
1550 
1551 	return c;
1552 
1553 bad_buffer:
1554 bad_cache:
1555 	while (!list_empty(&c->reserved_buffers)) {
1556 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1557 						 struct dm_buffer, lru_list);
1558 		list_del(&b->lru_list);
1559 		free_buffer(b);
1560 	}
1561 	dm_io_client_destroy(c->dm_io);
1562 bad_dm_io:
1563 	vfree(c->cache_hash);
1564 bad_hash:
1565 	kfree(c);
1566 bad_client:
1567 	return ERR_PTR(r);
1568 }
1569 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1570 
1571 /*
1572  * Free the buffering interface.
1573  * It is required that there are no references on any buffers.
1574  */
dm_bufio_client_destroy(struct dm_bufio_client * c)1575 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1576 {
1577 	unsigned i;
1578 
1579 	drop_buffers(c);
1580 
1581 	unregister_shrinker(&c->shrinker);
1582 
1583 	mutex_lock(&dm_bufio_clients_lock);
1584 
1585 	list_del(&c->client_list);
1586 	dm_bufio_client_count--;
1587 	__cache_size_refresh();
1588 
1589 	mutex_unlock(&dm_bufio_clients_lock);
1590 
1591 	for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1592 		BUG_ON(!hlist_empty(&c->cache_hash[i]));
1593 
1594 	BUG_ON(c->need_reserved_buffers);
1595 
1596 	while (!list_empty(&c->reserved_buffers)) {
1597 		struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1598 						 struct dm_buffer, lru_list);
1599 		list_del(&b->lru_list);
1600 		free_buffer(b);
1601 	}
1602 
1603 	for (i = 0; i < LIST_SIZE; i++)
1604 		if (c->n_buffers[i])
1605 			DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1606 
1607 	for (i = 0; i < LIST_SIZE; i++)
1608 		BUG_ON(c->n_buffers[i]);
1609 
1610 	dm_io_client_destroy(c->dm_io);
1611 	vfree(c->cache_hash);
1612 	kfree(c);
1613 }
1614 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1615 
cleanup_old_buffers(void)1616 static void cleanup_old_buffers(void)
1617 {
1618 	unsigned long max_age = dm_bufio_max_age;
1619 	struct dm_bufio_client *c;
1620 
1621 	barrier();
1622 
1623 	if (max_age > ULONG_MAX / HZ)
1624 		max_age = ULONG_MAX / HZ;
1625 
1626 	mutex_lock(&dm_bufio_clients_lock);
1627 	list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1628 		if (!dm_bufio_trylock(c))
1629 			continue;
1630 
1631 		while (!list_empty(&c->lru[LIST_CLEAN])) {
1632 			struct dm_buffer *b;
1633 			b = list_entry(c->lru[LIST_CLEAN].prev,
1634 				       struct dm_buffer, lru_list);
1635 			if (__cleanup_old_buffer(b, 0, max_age * HZ))
1636 				break;
1637 			dm_bufio_cond_resched();
1638 		}
1639 
1640 		dm_bufio_unlock(c);
1641 		dm_bufio_cond_resched();
1642 	}
1643 	mutex_unlock(&dm_bufio_clients_lock);
1644 }
1645 
1646 static struct workqueue_struct *dm_bufio_wq;
1647 static struct delayed_work dm_bufio_work;
1648 
work_fn(struct work_struct * w)1649 static void work_fn(struct work_struct *w)
1650 {
1651 	cleanup_old_buffers();
1652 
1653 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1654 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1655 }
1656 
1657 /*----------------------------------------------------------------
1658  * Module setup
1659  *--------------------------------------------------------------*/
1660 
1661 /*
1662  * This is called only once for the whole dm_bufio module.
1663  * It initializes memory limit.
1664  */
dm_bufio_init(void)1665 static int __init dm_bufio_init(void)
1666 {
1667 	__u64 mem;
1668 
1669 	dm_bufio_allocated_kmem_cache = 0;
1670 	dm_bufio_allocated_get_free_pages = 0;
1671 	dm_bufio_allocated_vmalloc = 0;
1672 	dm_bufio_current_allocated = 0;
1673 
1674 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1675 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1676 
1677 	mem = (__u64)((totalram_pages - totalhigh_pages) *
1678 		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1679 
1680 	if (mem > ULONG_MAX)
1681 		mem = ULONG_MAX;
1682 
1683 #ifdef CONFIG_MMU
1684 	/*
1685 	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1686 	 * in fs/proc/internal.h
1687 	 */
1688 	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1689 		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1690 #endif
1691 
1692 	dm_bufio_default_cache_size = mem;
1693 
1694 	mutex_lock(&dm_bufio_clients_lock);
1695 	__cache_size_refresh();
1696 	mutex_unlock(&dm_bufio_clients_lock);
1697 
1698 	dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1699 	if (!dm_bufio_wq)
1700 		return -ENOMEM;
1701 
1702 	INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1703 	queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1704 			   DM_BUFIO_WORK_TIMER_SECS * HZ);
1705 
1706 	return 0;
1707 }
1708 
1709 /*
1710  * This is called once when unloading the dm_bufio module.
1711  */
dm_bufio_exit(void)1712 static void __exit dm_bufio_exit(void)
1713 {
1714 	int bug = 0;
1715 	int i;
1716 
1717 	cancel_delayed_work_sync(&dm_bufio_work);
1718 	destroy_workqueue(dm_bufio_wq);
1719 
1720 	for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1721 		struct kmem_cache *kc = dm_bufio_caches[i];
1722 
1723 		if (kc)
1724 			kmem_cache_destroy(kc);
1725 	}
1726 
1727 	for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1728 		kfree(dm_bufio_cache_names[i]);
1729 
1730 	if (dm_bufio_client_count) {
1731 		DMCRIT("%s: dm_bufio_client_count leaked: %d",
1732 			__func__, dm_bufio_client_count);
1733 		bug = 1;
1734 	}
1735 
1736 	if (dm_bufio_current_allocated) {
1737 		DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1738 			__func__, dm_bufio_current_allocated);
1739 		bug = 1;
1740 	}
1741 
1742 	if (dm_bufio_allocated_get_free_pages) {
1743 		DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1744 		       __func__, dm_bufio_allocated_get_free_pages);
1745 		bug = 1;
1746 	}
1747 
1748 	if (dm_bufio_allocated_vmalloc) {
1749 		DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1750 		       __func__, dm_bufio_allocated_vmalloc);
1751 		bug = 1;
1752 	}
1753 
1754 	if (bug)
1755 		BUG();
1756 }
1757 
1758 module_init(dm_bufio_init)
1759 module_exit(dm_bufio_exit)
1760 
1761 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1762 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1763 
1764 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1765 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1766 
1767 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1768 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1769 
1770 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1771 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1772 
1773 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1774 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1775 
1776 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1777 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1778 
1779 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1780 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1781 
1782 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1783 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1784 MODULE_LICENSE("GPL");
1785