1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 #include <linux/list_sort.h>
37 
38 #include "xfs_sb.h"
39 #include "xfs_inum.h"
40 #include "xfs_log.h"
41 #include "xfs_ag.h"
42 #include "xfs_mount.h"
43 #include "xfs_trace.h"
44 
45 static kmem_zone_t *xfs_buf_zone;
46 STATIC int xfsbufd(void *);
47 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
48 
49 static struct workqueue_struct *xfslogd_workqueue;
50 struct workqueue_struct *xfsdatad_workqueue;
51 struct workqueue_struct *xfsconvertd_workqueue;
52 
53 #ifdef XFS_BUF_LOCK_TRACKING
54 # define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
55 # define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
56 # define XB_GET_OWNER(bp)	((bp)->b_last_holder)
57 #else
58 # define XB_SET_OWNER(bp)	do { } while (0)
59 # define XB_CLEAR_OWNER(bp)	do { } while (0)
60 # define XB_GET_OWNER(bp)	do { } while (0)
61 #endif
62 
63 #define xb_to_gfp(flags) \
64 	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
65 	  ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
66 
67 #define xb_to_km(flags) \
68 	 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
69 
70 #define xfs_buf_allocate(flags) \
71 	kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
72 #define xfs_buf_deallocate(bp) \
73 	kmem_zone_free(xfs_buf_zone, (bp));
74 
75 static inline int
xfs_buf_is_vmapped(struct xfs_buf * bp)76 xfs_buf_is_vmapped(
77 	struct xfs_buf	*bp)
78 {
79 	/*
80 	 * Return true if the buffer is vmapped.
81 	 *
82 	 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
83 	 * code is clever enough to know it doesn't have to map a single page,
84 	 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
85 	 */
86 	return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
87 }
88 
89 static inline int
xfs_buf_vmap_len(struct xfs_buf * bp)90 xfs_buf_vmap_len(
91 	struct xfs_buf	*bp)
92 {
93 	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
94 }
95 
96 /*
97  * xfs_buf_lru_add - add a buffer to the LRU.
98  *
99  * The LRU takes a new reference to the buffer so that it will only be freed
100  * once the shrinker takes the buffer off the LRU.
101  */
102 STATIC void
xfs_buf_lru_add(struct xfs_buf * bp)103 xfs_buf_lru_add(
104 	struct xfs_buf	*bp)
105 {
106 	struct xfs_buftarg *btp = bp->b_target;
107 
108 	spin_lock(&btp->bt_lru_lock);
109 	if (list_empty(&bp->b_lru)) {
110 		atomic_inc(&bp->b_hold);
111 		list_add_tail(&bp->b_lru, &btp->bt_lru);
112 		btp->bt_lru_nr++;
113 	}
114 	spin_unlock(&btp->bt_lru_lock);
115 }
116 
117 /*
118  * xfs_buf_lru_del - remove a buffer from the LRU
119  *
120  * The unlocked check is safe here because it only occurs when there are not
121  * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
122  * to optimise the shrinker removing the buffer from the LRU and calling
123  * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
124  * bt_lru_lock.
125  */
126 STATIC void
xfs_buf_lru_del(struct xfs_buf * bp)127 xfs_buf_lru_del(
128 	struct xfs_buf	*bp)
129 {
130 	struct xfs_buftarg *btp = bp->b_target;
131 
132 	if (list_empty(&bp->b_lru))
133 		return;
134 
135 	spin_lock(&btp->bt_lru_lock);
136 	if (!list_empty(&bp->b_lru)) {
137 		list_del_init(&bp->b_lru);
138 		btp->bt_lru_nr--;
139 	}
140 	spin_unlock(&btp->bt_lru_lock);
141 }
142 
143 /*
144  * When we mark a buffer stale, we remove the buffer from the LRU and clear the
145  * b_lru_ref count so that the buffer is freed immediately when the buffer
146  * reference count falls to zero. If the buffer is already on the LRU, we need
147  * to remove the reference that LRU holds on the buffer.
148  *
149  * This prevents build-up of stale buffers on the LRU.
150  */
151 void
xfs_buf_stale(struct xfs_buf * bp)152 xfs_buf_stale(
153 	struct xfs_buf	*bp)
154 {
155 	bp->b_flags |= XBF_STALE;
156 	atomic_set(&(bp)->b_lru_ref, 0);
157 	if (!list_empty(&bp->b_lru)) {
158 		struct xfs_buftarg *btp = bp->b_target;
159 
160 		spin_lock(&btp->bt_lru_lock);
161 		if (!list_empty(&bp->b_lru)) {
162 			list_del_init(&bp->b_lru);
163 			btp->bt_lru_nr--;
164 			atomic_dec(&bp->b_hold);
165 		}
166 		spin_unlock(&btp->bt_lru_lock);
167 	}
168 	ASSERT(atomic_read(&bp->b_hold) >= 1);
169 }
170 
171 STATIC void
_xfs_buf_initialize(xfs_buf_t * bp,xfs_buftarg_t * target,xfs_off_t range_base,size_t range_length,xfs_buf_flags_t flags)172 _xfs_buf_initialize(
173 	xfs_buf_t		*bp,
174 	xfs_buftarg_t		*target,
175 	xfs_off_t		range_base,
176 	size_t			range_length,
177 	xfs_buf_flags_t		flags)
178 {
179 	/*
180 	 * We don't want certain flags to appear in b_flags.
181 	 */
182 	flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
183 
184 	memset(bp, 0, sizeof(xfs_buf_t));
185 	atomic_set(&bp->b_hold, 1);
186 	atomic_set(&bp->b_lru_ref, 1);
187 	init_completion(&bp->b_iowait);
188 	INIT_LIST_HEAD(&bp->b_lru);
189 	INIT_LIST_HEAD(&bp->b_list);
190 	RB_CLEAR_NODE(&bp->b_rbnode);
191 	sema_init(&bp->b_sema, 0); /* held, no waiters */
192 	XB_SET_OWNER(bp);
193 	bp->b_target = target;
194 	bp->b_file_offset = range_base;
195 	/*
196 	 * Set buffer_length and count_desired to the same value initially.
197 	 * I/O routines should use count_desired, which will be the same in
198 	 * most cases but may be reset (e.g. XFS recovery).
199 	 */
200 	bp->b_buffer_length = bp->b_count_desired = range_length;
201 	bp->b_flags = flags;
202 	bp->b_bn = XFS_BUF_DADDR_NULL;
203 	atomic_set(&bp->b_pin_count, 0);
204 	init_waitqueue_head(&bp->b_waiters);
205 
206 	XFS_STATS_INC(xb_create);
207 
208 	trace_xfs_buf_init(bp, _RET_IP_);
209 }
210 
211 /*
212  *	Allocate a page array capable of holding a specified number
213  *	of pages, and point the page buf at it.
214  */
215 STATIC int
_xfs_buf_get_pages(xfs_buf_t * bp,int page_count,xfs_buf_flags_t flags)216 _xfs_buf_get_pages(
217 	xfs_buf_t		*bp,
218 	int			page_count,
219 	xfs_buf_flags_t		flags)
220 {
221 	/* Make sure that we have a page list */
222 	if (bp->b_pages == NULL) {
223 		bp->b_offset = xfs_buf_poff(bp->b_file_offset);
224 		bp->b_page_count = page_count;
225 		if (page_count <= XB_PAGES) {
226 			bp->b_pages = bp->b_page_array;
227 		} else {
228 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
229 					page_count, xb_to_km(flags));
230 			if (bp->b_pages == NULL)
231 				return -ENOMEM;
232 		}
233 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
234 	}
235 	return 0;
236 }
237 
238 /*
239  *	Frees b_pages if it was allocated.
240  */
241 STATIC void
_xfs_buf_free_pages(xfs_buf_t * bp)242 _xfs_buf_free_pages(
243 	xfs_buf_t	*bp)
244 {
245 	if (bp->b_pages != bp->b_page_array) {
246 		kmem_free(bp->b_pages);
247 		bp->b_pages = NULL;
248 	}
249 }
250 
251 /*
252  *	Releases the specified buffer.
253  *
254  * 	The modification state of any associated pages is left unchanged.
255  * 	The buffer most not be on any hash - use xfs_buf_rele instead for
256  * 	hashed and refcounted buffers
257  */
258 void
xfs_buf_free(xfs_buf_t * bp)259 xfs_buf_free(
260 	xfs_buf_t		*bp)
261 {
262 	trace_xfs_buf_free(bp, _RET_IP_);
263 
264 	ASSERT(list_empty(&bp->b_lru));
265 
266 	if (bp->b_flags & _XBF_PAGES) {
267 		uint		i;
268 
269 		if (xfs_buf_is_vmapped(bp))
270 			vm_unmap_ram(bp->b_addr - bp->b_offset,
271 					bp->b_page_count);
272 
273 		for (i = 0; i < bp->b_page_count; i++) {
274 			struct page	*page = bp->b_pages[i];
275 
276 			__free_page(page);
277 		}
278 	} else if (bp->b_flags & _XBF_KMEM)
279 		kmem_free(bp->b_addr);
280 	_xfs_buf_free_pages(bp);
281 	xfs_buf_deallocate(bp);
282 }
283 
284 /*
285  * Allocates all the pages for buffer in question and builds it's page list.
286  */
287 STATIC int
xfs_buf_allocate_memory(xfs_buf_t * bp,uint flags)288 xfs_buf_allocate_memory(
289 	xfs_buf_t		*bp,
290 	uint			flags)
291 {
292 	size_t			size = bp->b_count_desired;
293 	size_t			nbytes, offset;
294 	gfp_t			gfp_mask = xb_to_gfp(flags);
295 	unsigned short		page_count, i;
296 	xfs_off_t		end;
297 	int			error;
298 
299 	/*
300 	 * for buffers that are contained within a single page, just allocate
301 	 * the memory from the heap - there's no need for the complexity of
302 	 * page arrays to keep allocation down to order 0.
303 	 */
304 	if (bp->b_buffer_length < PAGE_SIZE) {
305 		bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
306 		if (!bp->b_addr) {
307 			/* low memory - use alloc_page loop instead */
308 			goto use_alloc_page;
309 		}
310 
311 		if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
312 								PAGE_MASK) !=
313 		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
314 			/* b_addr spans two pages - use alloc_page instead */
315 			kmem_free(bp->b_addr);
316 			bp->b_addr = NULL;
317 			goto use_alloc_page;
318 		}
319 		bp->b_offset = offset_in_page(bp->b_addr);
320 		bp->b_pages = bp->b_page_array;
321 		bp->b_pages[0] = virt_to_page(bp->b_addr);
322 		bp->b_page_count = 1;
323 		bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
324 		return 0;
325 	}
326 
327 use_alloc_page:
328 	end = bp->b_file_offset + bp->b_buffer_length;
329 	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
330 	error = _xfs_buf_get_pages(bp, page_count, flags);
331 	if (unlikely(error))
332 		return error;
333 
334 	offset = bp->b_offset;
335 	bp->b_flags |= _XBF_PAGES;
336 
337 	for (i = 0; i < bp->b_page_count; i++) {
338 		struct page	*page;
339 		uint		retries = 0;
340 retry:
341 		page = alloc_page(gfp_mask);
342 		if (unlikely(page == NULL)) {
343 			if (flags & XBF_READ_AHEAD) {
344 				bp->b_page_count = i;
345 				error = ENOMEM;
346 				goto out_free_pages;
347 			}
348 
349 			/*
350 			 * This could deadlock.
351 			 *
352 			 * But until all the XFS lowlevel code is revamped to
353 			 * handle buffer allocation failures we can't do much.
354 			 */
355 			if (!(++retries % 100))
356 				xfs_err(NULL,
357 		"possible memory allocation deadlock in %s (mode:0x%x)",
358 					__func__, gfp_mask);
359 
360 			XFS_STATS_INC(xb_page_retries);
361 			congestion_wait(BLK_RW_ASYNC, HZ/50);
362 			goto retry;
363 		}
364 
365 		XFS_STATS_INC(xb_page_found);
366 
367 		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
368 		size -= nbytes;
369 		bp->b_pages[i] = page;
370 		offset = 0;
371 	}
372 	return 0;
373 
374 out_free_pages:
375 	for (i = 0; i < bp->b_page_count; i++)
376 		__free_page(bp->b_pages[i]);
377 	return error;
378 }
379 
380 /*
381  *	Map buffer into kernel address-space if necessary.
382  */
383 STATIC int
_xfs_buf_map_pages(xfs_buf_t * bp,uint flags)384 _xfs_buf_map_pages(
385 	xfs_buf_t		*bp,
386 	uint			flags)
387 {
388 	ASSERT(bp->b_flags & _XBF_PAGES);
389 	if (bp->b_page_count == 1) {
390 		/* A single page buffer is always mappable */
391 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
392 		bp->b_flags |= XBF_MAPPED;
393 	} else if (flags & XBF_MAPPED) {
394 		int retried = 0;
395 
396 		do {
397 			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
398 						-1, PAGE_KERNEL);
399 			if (bp->b_addr)
400 				break;
401 			vm_unmap_aliases();
402 		} while (retried++ <= 1);
403 
404 		if (!bp->b_addr)
405 			return -ENOMEM;
406 		bp->b_addr += bp->b_offset;
407 		bp->b_flags |= XBF_MAPPED;
408 	}
409 
410 	return 0;
411 }
412 
413 /*
414  *	Finding and Reading Buffers
415  */
416 
417 /*
418  *	Look up, and creates if absent, a lockable buffer for
419  *	a given range of an inode.  The buffer is returned
420  *	locked.	 If other overlapping buffers exist, they are
421  *	released before the new buffer is created and locked,
422  *	which may imply that this call will block until those buffers
423  *	are unlocked.  No I/O is implied by this call.
424  */
425 xfs_buf_t *
_xfs_buf_find(xfs_buftarg_t * btp,xfs_off_t ioff,size_t isize,xfs_buf_flags_t flags,xfs_buf_t * new_bp)426 _xfs_buf_find(
427 	xfs_buftarg_t		*btp,	/* block device target		*/
428 	xfs_off_t		ioff,	/* starting offset of range	*/
429 	size_t			isize,	/* length of range		*/
430 	xfs_buf_flags_t		flags,
431 	xfs_buf_t		*new_bp)
432 {
433 	xfs_off_t		range_base;
434 	size_t			range_length;
435 	struct xfs_perag	*pag;
436 	struct rb_node		**rbp;
437 	struct rb_node		*parent;
438 	xfs_buf_t		*bp;
439 
440 	range_base = (ioff << BBSHIFT);
441 	range_length = (isize << BBSHIFT);
442 
443 	/* Check for IOs smaller than the sector size / not sector aligned */
444 	ASSERT(!(range_length < (1 << btp->bt_sshift)));
445 	ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
446 
447 	/* get tree root */
448 	pag = xfs_perag_get(btp->bt_mount,
449 				xfs_daddr_to_agno(btp->bt_mount, ioff));
450 
451 	/* walk tree */
452 	spin_lock(&pag->pag_buf_lock);
453 	rbp = &pag->pag_buf_tree.rb_node;
454 	parent = NULL;
455 	bp = NULL;
456 	while (*rbp) {
457 		parent = *rbp;
458 		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
459 
460 		if (range_base < bp->b_file_offset)
461 			rbp = &(*rbp)->rb_left;
462 		else if (range_base > bp->b_file_offset)
463 			rbp = &(*rbp)->rb_right;
464 		else {
465 			/*
466 			 * found a block offset match. If the range doesn't
467 			 * match, the only way this is allowed is if the buffer
468 			 * in the cache is stale and the transaction that made
469 			 * it stale has not yet committed. i.e. we are
470 			 * reallocating a busy extent. Skip this buffer and
471 			 * continue searching to the right for an exact match.
472 			 */
473 			if (bp->b_buffer_length != range_length) {
474 				ASSERT(bp->b_flags & XBF_STALE);
475 				rbp = &(*rbp)->rb_right;
476 				continue;
477 			}
478 			atomic_inc(&bp->b_hold);
479 			goto found;
480 		}
481 	}
482 
483 	/* No match found */
484 	if (new_bp) {
485 		_xfs_buf_initialize(new_bp, btp, range_base,
486 				range_length, flags);
487 		rb_link_node(&new_bp->b_rbnode, parent, rbp);
488 		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
489 		/* the buffer keeps the perag reference until it is freed */
490 		new_bp->b_pag = pag;
491 		spin_unlock(&pag->pag_buf_lock);
492 	} else {
493 		XFS_STATS_INC(xb_miss_locked);
494 		spin_unlock(&pag->pag_buf_lock);
495 		xfs_perag_put(pag);
496 	}
497 	return new_bp;
498 
499 found:
500 	spin_unlock(&pag->pag_buf_lock);
501 	xfs_perag_put(pag);
502 
503 	if (xfs_buf_cond_lock(bp)) {
504 		/* failed, so wait for the lock if requested. */
505 		if (!(flags & XBF_TRYLOCK)) {
506 			xfs_buf_lock(bp);
507 			XFS_STATS_INC(xb_get_locked_waited);
508 		} else {
509 			xfs_buf_rele(bp);
510 			XFS_STATS_INC(xb_busy_locked);
511 			return NULL;
512 		}
513 	}
514 
515 	/*
516 	 * if the buffer is stale, clear all the external state associated with
517 	 * it. We need to keep flags such as how we allocated the buffer memory
518 	 * intact here.
519 	 */
520 	if (bp->b_flags & XBF_STALE) {
521 		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
522 		bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
523 	}
524 
525 	trace_xfs_buf_find(bp, flags, _RET_IP_);
526 	XFS_STATS_INC(xb_get_locked);
527 	return bp;
528 }
529 
530 /*
531  *	Assembles a buffer covering the specified range.
532  *	Storage in memory for all portions of the buffer will be allocated,
533  *	although backing storage may not be.
534  */
535 xfs_buf_t *
xfs_buf_get(xfs_buftarg_t * target,xfs_off_t ioff,size_t isize,xfs_buf_flags_t flags)536 xfs_buf_get(
537 	xfs_buftarg_t		*target,/* target for buffer		*/
538 	xfs_off_t		ioff,	/* starting offset of range	*/
539 	size_t			isize,	/* length of range		*/
540 	xfs_buf_flags_t		flags)
541 {
542 	xfs_buf_t		*bp, *new_bp;
543 	int			error = 0;
544 
545 	new_bp = xfs_buf_allocate(flags);
546 	if (unlikely(!new_bp))
547 		return NULL;
548 
549 	bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
550 	if (bp == new_bp) {
551 		error = xfs_buf_allocate_memory(bp, flags);
552 		if (error)
553 			goto no_buffer;
554 	} else {
555 		xfs_buf_deallocate(new_bp);
556 		if (unlikely(bp == NULL))
557 			return NULL;
558 	}
559 
560 	if (!(bp->b_flags & XBF_MAPPED)) {
561 		error = _xfs_buf_map_pages(bp, flags);
562 		if (unlikely(error)) {
563 			xfs_warn(target->bt_mount,
564 				"%s: failed to map pages\n", __func__);
565 			goto no_buffer;
566 		}
567 	}
568 
569 	XFS_STATS_INC(xb_get);
570 
571 	/*
572 	 * Always fill in the block number now, the mapped cases can do
573 	 * their own overlay of this later.
574 	 */
575 	bp->b_bn = ioff;
576 	bp->b_count_desired = bp->b_buffer_length;
577 
578 	trace_xfs_buf_get(bp, flags, _RET_IP_);
579 	return bp;
580 
581  no_buffer:
582 	if (flags & (XBF_LOCK | XBF_TRYLOCK))
583 		xfs_buf_unlock(bp);
584 	xfs_buf_rele(bp);
585 	return NULL;
586 }
587 
588 STATIC int
_xfs_buf_read(xfs_buf_t * bp,xfs_buf_flags_t flags)589 _xfs_buf_read(
590 	xfs_buf_t		*bp,
591 	xfs_buf_flags_t		flags)
592 {
593 	int			status;
594 
595 	ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
596 	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
597 
598 	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
599 			XBF_READ_AHEAD | _XBF_RUN_QUEUES);
600 	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
601 			XBF_READ_AHEAD | _XBF_RUN_QUEUES);
602 
603 	status = xfs_buf_iorequest(bp);
604 	if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
605 		return status;
606 	return xfs_buf_iowait(bp);
607 }
608 
609 xfs_buf_t *
xfs_buf_read(xfs_buftarg_t * target,xfs_off_t ioff,size_t isize,xfs_buf_flags_t flags)610 xfs_buf_read(
611 	xfs_buftarg_t		*target,
612 	xfs_off_t		ioff,
613 	size_t			isize,
614 	xfs_buf_flags_t		flags)
615 {
616 	xfs_buf_t		*bp;
617 
618 	flags |= XBF_READ;
619 
620 	bp = xfs_buf_get(target, ioff, isize, flags);
621 	if (bp) {
622 		trace_xfs_buf_read(bp, flags, _RET_IP_);
623 
624 		if (!XFS_BUF_ISDONE(bp)) {
625 			XFS_STATS_INC(xb_get_read);
626 			_xfs_buf_read(bp, flags);
627 		} else if (flags & XBF_ASYNC) {
628 			/*
629 			 * Read ahead call which is already satisfied,
630 			 * drop the buffer
631 			 */
632 			goto no_buffer;
633 		} else {
634 			/* We do not want read in the flags */
635 			bp->b_flags &= ~XBF_READ;
636 		}
637 	}
638 
639 	return bp;
640 
641  no_buffer:
642 	if (flags & (XBF_LOCK | XBF_TRYLOCK))
643 		xfs_buf_unlock(bp);
644 	xfs_buf_rele(bp);
645 	return NULL;
646 }
647 
648 /*
649  *	If we are not low on memory then do the readahead in a deadlock
650  *	safe manner.
651  */
652 void
xfs_buf_readahead(xfs_buftarg_t * target,xfs_off_t ioff,size_t isize)653 xfs_buf_readahead(
654 	xfs_buftarg_t		*target,
655 	xfs_off_t		ioff,
656 	size_t			isize)
657 {
658 	if (bdi_read_congested(target->bt_bdi))
659 		return;
660 
661 	xfs_buf_read(target, ioff, isize,
662 		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
663 }
664 
665 /*
666  * Read an uncached buffer from disk. Allocates and returns a locked
667  * buffer containing the disk contents or nothing.
668  */
669 struct xfs_buf *
xfs_buf_read_uncached(struct xfs_mount * mp,struct xfs_buftarg * target,xfs_daddr_t daddr,size_t length,int flags)670 xfs_buf_read_uncached(
671 	struct xfs_mount	*mp,
672 	struct xfs_buftarg	*target,
673 	xfs_daddr_t		daddr,
674 	size_t			length,
675 	int			flags)
676 {
677 	xfs_buf_t		*bp;
678 	int			error;
679 
680 	bp = xfs_buf_get_uncached(target, length, flags);
681 	if (!bp)
682 		return NULL;
683 
684 	/* set up the buffer for a read IO */
685 	xfs_buf_lock(bp);
686 	XFS_BUF_SET_ADDR(bp, daddr);
687 	XFS_BUF_READ(bp);
688 	XFS_BUF_BUSY(bp);
689 
690 	xfsbdstrat(mp, bp);
691 	error = xfs_buf_iowait(bp);
692 	if (error || bp->b_error) {
693 		xfs_buf_relse(bp);
694 		return NULL;
695 	}
696 	return bp;
697 }
698 
699 xfs_buf_t *
xfs_buf_get_empty(size_t len,xfs_buftarg_t * target)700 xfs_buf_get_empty(
701 	size_t			len,
702 	xfs_buftarg_t		*target)
703 {
704 	xfs_buf_t		*bp;
705 
706 	bp = xfs_buf_allocate(0);
707 	if (bp)
708 		_xfs_buf_initialize(bp, target, 0, len, 0);
709 	return bp;
710 }
711 
712 static inline struct page *
mem_to_page(void * addr)713 mem_to_page(
714 	void			*addr)
715 {
716 	if ((!is_vmalloc_addr(addr))) {
717 		return virt_to_page(addr);
718 	} else {
719 		return vmalloc_to_page(addr);
720 	}
721 }
722 
723 int
xfs_buf_associate_memory(xfs_buf_t * bp,void * mem,size_t len)724 xfs_buf_associate_memory(
725 	xfs_buf_t		*bp,
726 	void			*mem,
727 	size_t			len)
728 {
729 	int			rval;
730 	int			i = 0;
731 	unsigned long		pageaddr;
732 	unsigned long		offset;
733 	size_t			buflen;
734 	int			page_count;
735 
736 	pageaddr = (unsigned long)mem & PAGE_MASK;
737 	offset = (unsigned long)mem - pageaddr;
738 	buflen = PAGE_ALIGN(len + offset);
739 	page_count = buflen >> PAGE_SHIFT;
740 
741 	/* Free any previous set of page pointers */
742 	if (bp->b_pages)
743 		_xfs_buf_free_pages(bp);
744 
745 	bp->b_pages = NULL;
746 	bp->b_addr = mem;
747 
748 	rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
749 	if (rval)
750 		return rval;
751 
752 	bp->b_offset = offset;
753 
754 	for (i = 0; i < bp->b_page_count; i++) {
755 		bp->b_pages[i] = mem_to_page((void *)pageaddr);
756 		pageaddr += PAGE_SIZE;
757 	}
758 
759 	bp->b_count_desired = len;
760 	bp->b_buffer_length = buflen;
761 	bp->b_flags |= XBF_MAPPED;
762 
763 	return 0;
764 }
765 
766 xfs_buf_t *
xfs_buf_get_uncached(struct xfs_buftarg * target,size_t len,int flags)767 xfs_buf_get_uncached(
768 	struct xfs_buftarg	*target,
769 	size_t			len,
770 	int			flags)
771 {
772 	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
773 	int			error, i;
774 	xfs_buf_t		*bp;
775 
776 	bp = xfs_buf_allocate(0);
777 	if (unlikely(bp == NULL))
778 		goto fail;
779 	_xfs_buf_initialize(bp, target, 0, len, 0);
780 
781 	error = _xfs_buf_get_pages(bp, page_count, 0);
782 	if (error)
783 		goto fail_free_buf;
784 
785 	for (i = 0; i < page_count; i++) {
786 		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
787 		if (!bp->b_pages[i])
788 			goto fail_free_mem;
789 	}
790 	bp->b_flags |= _XBF_PAGES;
791 
792 	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
793 	if (unlikely(error)) {
794 		xfs_warn(target->bt_mount,
795 			"%s: failed to map pages\n", __func__);
796 		goto fail_free_mem;
797 	}
798 
799 	xfs_buf_unlock(bp);
800 
801 	trace_xfs_buf_get_uncached(bp, _RET_IP_);
802 	return bp;
803 
804  fail_free_mem:
805 	while (--i >= 0)
806 		__free_page(bp->b_pages[i]);
807 	_xfs_buf_free_pages(bp);
808  fail_free_buf:
809 	xfs_buf_deallocate(bp);
810  fail:
811 	return NULL;
812 }
813 
814 /*
815  *	Increment reference count on buffer, to hold the buffer concurrently
816  *	with another thread which may release (free) the buffer asynchronously.
817  *	Must hold the buffer already to call this function.
818  */
819 void
xfs_buf_hold(xfs_buf_t * bp)820 xfs_buf_hold(
821 	xfs_buf_t		*bp)
822 {
823 	trace_xfs_buf_hold(bp, _RET_IP_);
824 	atomic_inc(&bp->b_hold);
825 }
826 
827 /*
828  *	Releases a hold on the specified buffer.  If the
829  *	the hold count is 1, calls xfs_buf_free.
830  */
831 void
xfs_buf_rele(xfs_buf_t * bp)832 xfs_buf_rele(
833 	xfs_buf_t		*bp)
834 {
835 	struct xfs_perag	*pag = bp->b_pag;
836 
837 	trace_xfs_buf_rele(bp, _RET_IP_);
838 
839 	if (!pag) {
840 		ASSERT(list_empty(&bp->b_lru));
841 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
842 		if (atomic_dec_and_test(&bp->b_hold))
843 			xfs_buf_free(bp);
844 		return;
845 	}
846 
847 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
848 
849 	ASSERT(atomic_read(&bp->b_hold) > 0);
850 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
851 		if (!(bp->b_flags & XBF_STALE) &&
852 			   atomic_read(&bp->b_lru_ref)) {
853 			xfs_buf_lru_add(bp);
854 			spin_unlock(&pag->pag_buf_lock);
855 		} else {
856 			xfs_buf_lru_del(bp);
857 			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
858 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
859 			spin_unlock(&pag->pag_buf_lock);
860 			xfs_perag_put(pag);
861 			xfs_buf_free(bp);
862 		}
863 	}
864 }
865 
866 
867 /*
868  *	Lock a buffer object, if it is not already locked.
869  *
870  *	If we come across a stale, pinned, locked buffer, we know that we are
871  *	being asked to lock a buffer that has been reallocated. Because it is
872  *	pinned, we know that the log has not been pushed to disk and hence it
873  *	will still be locked.  Rather than continuing to have trylock attempts
874  *	fail until someone else pushes the log, push it ourselves before
875  *	returning.  This means that the xfsaild will not get stuck trying
876  *	to push on stale inode buffers.
877  */
878 int
xfs_buf_cond_lock(xfs_buf_t * bp)879 xfs_buf_cond_lock(
880 	xfs_buf_t		*bp)
881 {
882 	int			locked;
883 
884 	locked = down_trylock(&bp->b_sema) == 0;
885 	if (locked)
886 		XB_SET_OWNER(bp);
887 	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
888 		xfs_log_force(bp->b_target->bt_mount, 0);
889 
890 	trace_xfs_buf_cond_lock(bp, _RET_IP_);
891 	return locked ? 0 : -EBUSY;
892 }
893 
894 int
xfs_buf_lock_value(xfs_buf_t * bp)895 xfs_buf_lock_value(
896 	xfs_buf_t		*bp)
897 {
898 	return bp->b_sema.count;
899 }
900 
901 /*
902  *	Lock a buffer object.
903  *
904  *	If we come across a stale, pinned, locked buffer, we know that we
905  *	are being asked to lock a buffer that has been reallocated. Because
906  *	it is pinned, we know that the log has not been pushed to disk and
907  *	hence it will still be locked. Rather than sleeping until someone
908  *	else pushes the log, push it ourselves before trying to get the lock.
909  */
910 void
xfs_buf_lock(xfs_buf_t * bp)911 xfs_buf_lock(
912 	xfs_buf_t		*bp)
913 {
914 	trace_xfs_buf_lock(bp, _RET_IP_);
915 
916 	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
917 		xfs_log_force(bp->b_target->bt_mount, 0);
918 	down(&bp->b_sema);
919 	XB_SET_OWNER(bp);
920 
921 	trace_xfs_buf_lock_done(bp, _RET_IP_);
922 }
923 
924 /*
925  *	Releases the lock on the buffer object.
926  *	If the buffer is marked delwri but is not queued, do so before we
927  *	unlock the buffer as we need to set flags correctly.  We also need to
928  *	take a reference for the delwri queue because the unlocker is going to
929  *	drop their's and they don't know we just queued it.
930  */
931 void
xfs_buf_unlock(xfs_buf_t * bp)932 xfs_buf_unlock(
933 	xfs_buf_t		*bp)
934 {
935 	if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
936 		atomic_inc(&bp->b_hold);
937 		bp->b_flags |= XBF_ASYNC;
938 		xfs_buf_delwri_queue(bp, 0);
939 	}
940 
941 	XB_CLEAR_OWNER(bp);
942 	up(&bp->b_sema);
943 
944 	trace_xfs_buf_unlock(bp, _RET_IP_);
945 }
946 
947 STATIC void
xfs_buf_wait_unpin(xfs_buf_t * bp)948 xfs_buf_wait_unpin(
949 	xfs_buf_t		*bp)
950 {
951 	DECLARE_WAITQUEUE	(wait, current);
952 
953 	if (atomic_read(&bp->b_pin_count) == 0)
954 		return;
955 
956 	add_wait_queue(&bp->b_waiters, &wait);
957 	for (;;) {
958 		set_current_state(TASK_UNINTERRUPTIBLE);
959 		if (atomic_read(&bp->b_pin_count) == 0)
960 			break;
961 		io_schedule();
962 	}
963 	remove_wait_queue(&bp->b_waiters, &wait);
964 	set_current_state(TASK_RUNNING);
965 }
966 
967 /*
968  *	Buffer Utility Routines
969  */
970 
971 STATIC void
xfs_buf_iodone_work(struct work_struct * work)972 xfs_buf_iodone_work(
973 	struct work_struct	*work)
974 {
975 	xfs_buf_t		*bp =
976 		container_of(work, xfs_buf_t, b_iodone_work);
977 
978 	if (bp->b_iodone)
979 		(*(bp->b_iodone))(bp);
980 	else if (bp->b_flags & XBF_ASYNC)
981 		xfs_buf_relse(bp);
982 }
983 
984 void
xfs_buf_ioend(xfs_buf_t * bp,int schedule)985 xfs_buf_ioend(
986 	xfs_buf_t		*bp,
987 	int			schedule)
988 {
989 	trace_xfs_buf_iodone(bp, _RET_IP_);
990 
991 	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
992 	if (bp->b_error == 0)
993 		bp->b_flags |= XBF_DONE;
994 
995 	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
996 		if (schedule) {
997 			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
998 			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
999 		} else {
1000 			xfs_buf_iodone_work(&bp->b_iodone_work);
1001 		}
1002 	} else {
1003 		complete(&bp->b_iowait);
1004 	}
1005 }
1006 
1007 void
xfs_buf_ioerror(xfs_buf_t * bp,int error)1008 xfs_buf_ioerror(
1009 	xfs_buf_t		*bp,
1010 	int			error)
1011 {
1012 	ASSERT(error >= 0 && error <= 0xffff);
1013 	bp->b_error = (unsigned short)error;
1014 	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1015 }
1016 
1017 int
xfs_bwrite(struct xfs_mount * mp,struct xfs_buf * bp)1018 xfs_bwrite(
1019 	struct xfs_mount	*mp,
1020 	struct xfs_buf		*bp)
1021 {
1022 	int			error;
1023 
1024 	bp->b_flags |= XBF_WRITE;
1025 	bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
1026 
1027 	xfs_buf_delwri_dequeue(bp);
1028 	xfs_bdstrat_cb(bp);
1029 
1030 	error = xfs_buf_iowait(bp);
1031 	if (error)
1032 		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1033 	xfs_buf_relse(bp);
1034 	return error;
1035 }
1036 
1037 void
xfs_bdwrite(void * mp,struct xfs_buf * bp)1038 xfs_bdwrite(
1039 	void			*mp,
1040 	struct xfs_buf		*bp)
1041 {
1042 	trace_xfs_buf_bdwrite(bp, _RET_IP_);
1043 
1044 	bp->b_flags &= ~XBF_READ;
1045 	bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1046 
1047 	xfs_buf_delwri_queue(bp, 1);
1048 }
1049 
1050 /*
1051  * Called when we want to stop a buffer from getting written or read.
1052  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1053  * so that the proper iodone callbacks get called.
1054  */
1055 STATIC int
xfs_bioerror(xfs_buf_t * bp)1056 xfs_bioerror(
1057 	xfs_buf_t *bp)
1058 {
1059 #ifdef XFSERRORDEBUG
1060 	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1061 #endif
1062 
1063 	/*
1064 	 * No need to wait until the buffer is unpinned, we aren't flushing it.
1065 	 */
1066 	XFS_BUF_ERROR(bp, EIO);
1067 
1068 	/*
1069 	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1070 	 */
1071 	XFS_BUF_UNREAD(bp);
1072 	XFS_BUF_UNDELAYWRITE(bp);
1073 	XFS_BUF_UNDONE(bp);
1074 	XFS_BUF_STALE(bp);
1075 
1076 	xfs_buf_ioend(bp, 0);
1077 
1078 	return EIO;
1079 }
1080 
1081 /*
1082  * Same as xfs_bioerror, except that we are releasing the buffer
1083  * here ourselves, and avoiding the xfs_buf_ioend call.
1084  * This is meant for userdata errors; metadata bufs come with
1085  * iodone functions attached, so that we can track down errors.
1086  */
1087 STATIC int
xfs_bioerror_relse(struct xfs_buf * bp)1088 xfs_bioerror_relse(
1089 	struct xfs_buf	*bp)
1090 {
1091 	int64_t		fl = XFS_BUF_BFLAGS(bp);
1092 	/*
1093 	 * No need to wait until the buffer is unpinned.
1094 	 * We aren't flushing it.
1095 	 *
1096 	 * chunkhold expects B_DONE to be set, whether
1097 	 * we actually finish the I/O or not. We don't want to
1098 	 * change that interface.
1099 	 */
1100 	XFS_BUF_UNREAD(bp);
1101 	XFS_BUF_UNDELAYWRITE(bp);
1102 	XFS_BUF_DONE(bp);
1103 	XFS_BUF_STALE(bp);
1104 	XFS_BUF_CLR_IODONE_FUNC(bp);
1105 	if (!(fl & XBF_ASYNC)) {
1106 		/*
1107 		 * Mark b_error and B_ERROR _both_.
1108 		 * Lot's of chunkcache code assumes that.
1109 		 * There's no reason to mark error for
1110 		 * ASYNC buffers.
1111 		 */
1112 		XFS_BUF_ERROR(bp, EIO);
1113 		XFS_BUF_FINISH_IOWAIT(bp);
1114 	} else {
1115 		xfs_buf_relse(bp);
1116 	}
1117 
1118 	return EIO;
1119 }
1120 
1121 
1122 /*
1123  * All xfs metadata buffers except log state machine buffers
1124  * get this attached as their b_bdstrat callback function.
1125  * This is so that we can catch a buffer
1126  * after prematurely unpinning it to forcibly shutdown the filesystem.
1127  */
1128 int
xfs_bdstrat_cb(struct xfs_buf * bp)1129 xfs_bdstrat_cb(
1130 	struct xfs_buf	*bp)
1131 {
1132 	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1133 		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1134 		/*
1135 		 * Metadata write that didn't get logged but
1136 		 * written delayed anyway. These aren't associated
1137 		 * with a transaction, and can be ignored.
1138 		 */
1139 		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1140 			return xfs_bioerror_relse(bp);
1141 		else
1142 			return xfs_bioerror(bp);
1143 	}
1144 
1145 	xfs_buf_iorequest(bp);
1146 	return 0;
1147 }
1148 
1149 /*
1150  * Wrapper around bdstrat so that we can stop data from going to disk in case
1151  * we are shutting down the filesystem.  Typically user data goes thru this
1152  * path; one of the exceptions is the superblock.
1153  */
1154 void
xfsbdstrat(struct xfs_mount * mp,struct xfs_buf * bp)1155 xfsbdstrat(
1156 	struct xfs_mount	*mp,
1157 	struct xfs_buf		*bp)
1158 {
1159 	if (XFS_FORCED_SHUTDOWN(mp)) {
1160 		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1161 		xfs_bioerror_relse(bp);
1162 		return;
1163 	}
1164 
1165 	xfs_buf_iorequest(bp);
1166 }
1167 
1168 STATIC void
_xfs_buf_ioend(xfs_buf_t * bp,int schedule)1169 _xfs_buf_ioend(
1170 	xfs_buf_t		*bp,
1171 	int			schedule)
1172 {
1173 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1174 		xfs_buf_ioend(bp, schedule);
1175 }
1176 
1177 STATIC void
xfs_buf_bio_end_io(struct bio * bio,int error)1178 xfs_buf_bio_end_io(
1179 	struct bio		*bio,
1180 	int			error)
1181 {
1182 	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
1183 
1184 	xfs_buf_ioerror(bp, -error);
1185 
1186 	if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1187 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1188 
1189 	_xfs_buf_ioend(bp, 1);
1190 	bio_put(bio);
1191 }
1192 
1193 STATIC void
_xfs_buf_ioapply(xfs_buf_t * bp)1194 _xfs_buf_ioapply(
1195 	xfs_buf_t		*bp)
1196 {
1197 	int			rw, map_i, total_nr_pages, nr_pages;
1198 	struct bio		*bio;
1199 	int			offset = bp->b_offset;
1200 	int			size = bp->b_count_desired;
1201 	sector_t		sector = bp->b_bn;
1202 
1203 	total_nr_pages = bp->b_page_count;
1204 	map_i = 0;
1205 
1206 	if (bp->b_flags & XBF_ORDERED) {
1207 		ASSERT(!(bp->b_flags & XBF_READ));
1208 		rw = WRITE_FLUSH_FUA;
1209 	} else if (bp->b_flags & XBF_LOG_BUFFER) {
1210 		ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1211 		bp->b_flags &= ~_XBF_RUN_QUEUES;
1212 		rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1213 	} else if (bp->b_flags & _XBF_RUN_QUEUES) {
1214 		ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1215 		bp->b_flags &= ~_XBF_RUN_QUEUES;
1216 		rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
1217 	} else {
1218 		rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1219 		     (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1220 	}
1221 
1222 
1223 next_chunk:
1224 	atomic_inc(&bp->b_io_remaining);
1225 	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1226 	if (nr_pages > total_nr_pages)
1227 		nr_pages = total_nr_pages;
1228 
1229 	bio = bio_alloc(GFP_NOIO, nr_pages);
1230 	bio->bi_bdev = bp->b_target->bt_bdev;
1231 	bio->bi_sector = sector;
1232 	bio->bi_end_io = xfs_buf_bio_end_io;
1233 	bio->bi_private = bp;
1234 
1235 
1236 	for (; size && nr_pages; nr_pages--, map_i++) {
1237 		int	rbytes, nbytes = PAGE_SIZE - offset;
1238 
1239 		if (nbytes > size)
1240 			nbytes = size;
1241 
1242 		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1243 		if (rbytes < nbytes)
1244 			break;
1245 
1246 		offset = 0;
1247 		sector += nbytes >> BBSHIFT;
1248 		size -= nbytes;
1249 		total_nr_pages--;
1250 	}
1251 
1252 	if (likely(bio->bi_size)) {
1253 		if (xfs_buf_is_vmapped(bp)) {
1254 			flush_kernel_vmap_range(bp->b_addr,
1255 						xfs_buf_vmap_len(bp));
1256 		}
1257 		submit_bio(rw, bio);
1258 		if (size)
1259 			goto next_chunk;
1260 	} else {
1261 		xfs_buf_ioerror(bp, EIO);
1262 		bio_put(bio);
1263 	}
1264 }
1265 
1266 int
xfs_buf_iorequest(xfs_buf_t * bp)1267 xfs_buf_iorequest(
1268 	xfs_buf_t		*bp)
1269 {
1270 	trace_xfs_buf_iorequest(bp, _RET_IP_);
1271 
1272 	if (bp->b_flags & XBF_DELWRI) {
1273 		xfs_buf_delwri_queue(bp, 1);
1274 		return 0;
1275 	}
1276 
1277 	if (bp->b_flags & XBF_WRITE) {
1278 		xfs_buf_wait_unpin(bp);
1279 	}
1280 
1281 	xfs_buf_hold(bp);
1282 
1283 	/* Set the count to 1 initially, this will stop an I/O
1284 	 * completion callout which happens before we have started
1285 	 * all the I/O from calling xfs_buf_ioend too early.
1286 	 */
1287 	atomic_set(&bp->b_io_remaining, 1);
1288 	_xfs_buf_ioapply(bp);
1289 	_xfs_buf_ioend(bp, 0);
1290 
1291 	xfs_buf_rele(bp);
1292 	return 0;
1293 }
1294 
1295 /*
1296  *	Waits for I/O to complete on the buffer supplied.
1297  *	It returns immediately if no I/O is pending.
1298  *	It returns the I/O error code, if any, or 0 if there was no error.
1299  */
1300 int
xfs_buf_iowait(xfs_buf_t * bp)1301 xfs_buf_iowait(
1302 	xfs_buf_t		*bp)
1303 {
1304 	trace_xfs_buf_iowait(bp, _RET_IP_);
1305 
1306 	wait_for_completion(&bp->b_iowait);
1307 
1308 	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1309 	return bp->b_error;
1310 }
1311 
1312 xfs_caddr_t
xfs_buf_offset(xfs_buf_t * bp,size_t offset)1313 xfs_buf_offset(
1314 	xfs_buf_t		*bp,
1315 	size_t			offset)
1316 {
1317 	struct page		*page;
1318 
1319 	if (bp->b_flags & XBF_MAPPED)
1320 		return XFS_BUF_PTR(bp) + offset;
1321 
1322 	offset += bp->b_offset;
1323 	page = bp->b_pages[offset >> PAGE_SHIFT];
1324 	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1325 }
1326 
1327 /*
1328  *	Move data into or out of a buffer.
1329  */
1330 void
xfs_buf_iomove(xfs_buf_t * bp,size_t boff,size_t bsize,void * data,xfs_buf_rw_t mode)1331 xfs_buf_iomove(
1332 	xfs_buf_t		*bp,	/* buffer to process		*/
1333 	size_t			boff,	/* starting buffer offset	*/
1334 	size_t			bsize,	/* length to copy		*/
1335 	void			*data,	/* data address			*/
1336 	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1337 {
1338 	size_t			bend, cpoff, csize;
1339 	struct page		*page;
1340 
1341 	bend = boff + bsize;
1342 	while (boff < bend) {
1343 		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1344 		cpoff = xfs_buf_poff(boff + bp->b_offset);
1345 		csize = min_t(size_t,
1346 			      PAGE_SIZE-cpoff, bp->b_count_desired-boff);
1347 
1348 		ASSERT(((csize + cpoff) <= PAGE_SIZE));
1349 
1350 		switch (mode) {
1351 		case XBRW_ZERO:
1352 			memset(page_address(page) + cpoff, 0, csize);
1353 			break;
1354 		case XBRW_READ:
1355 			memcpy(data, page_address(page) + cpoff, csize);
1356 			break;
1357 		case XBRW_WRITE:
1358 			memcpy(page_address(page) + cpoff, data, csize);
1359 		}
1360 
1361 		boff += csize;
1362 		data += csize;
1363 	}
1364 }
1365 
1366 /*
1367  *	Handling of buffer targets (buftargs).
1368  */
1369 
1370 /*
1371  * Wait for any bufs with callbacks that have been submitted but have not yet
1372  * returned. These buffers will have an elevated hold count, so wait on those
1373  * while freeing all the buffers only held by the LRU.
1374  */
1375 void
xfs_wait_buftarg(struct xfs_buftarg * btp)1376 xfs_wait_buftarg(
1377 	struct xfs_buftarg	*btp)
1378 {
1379 	struct xfs_buf		*bp;
1380 
1381 restart:
1382 	spin_lock(&btp->bt_lru_lock);
1383 	while (!list_empty(&btp->bt_lru)) {
1384 		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1385 		if (atomic_read(&bp->b_hold) > 1) {
1386 			spin_unlock(&btp->bt_lru_lock);
1387 			delay(100);
1388 			goto restart;
1389 		}
1390 		/*
1391 		 * clear the LRU reference count so the bufer doesn't get
1392 		 * ignored in xfs_buf_rele().
1393 		 */
1394 		atomic_set(&bp->b_lru_ref, 0);
1395 		spin_unlock(&btp->bt_lru_lock);
1396 		xfs_buf_rele(bp);
1397 		spin_lock(&btp->bt_lru_lock);
1398 	}
1399 	spin_unlock(&btp->bt_lru_lock);
1400 }
1401 
1402 int
xfs_buftarg_shrink(struct shrinker * shrink,int nr_to_scan,gfp_t mask)1403 xfs_buftarg_shrink(
1404 	struct shrinker		*shrink,
1405 	int			nr_to_scan,
1406 	gfp_t			mask)
1407 {
1408 	struct xfs_buftarg	*btp = container_of(shrink,
1409 					struct xfs_buftarg, bt_shrinker);
1410 	struct xfs_buf		*bp;
1411 	LIST_HEAD(dispose);
1412 
1413 	if (!nr_to_scan)
1414 		return btp->bt_lru_nr;
1415 
1416 	spin_lock(&btp->bt_lru_lock);
1417 	while (!list_empty(&btp->bt_lru)) {
1418 		if (nr_to_scan-- <= 0)
1419 			break;
1420 
1421 		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1422 
1423 		/*
1424 		 * Decrement the b_lru_ref count unless the value is already
1425 		 * zero. If the value is already zero, we need to reclaim the
1426 		 * buffer, otherwise it gets another trip through the LRU.
1427 		 */
1428 		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1429 			list_move_tail(&bp->b_lru, &btp->bt_lru);
1430 			continue;
1431 		}
1432 
1433 		/*
1434 		 * remove the buffer from the LRU now to avoid needing another
1435 		 * lock round trip inside xfs_buf_rele().
1436 		 */
1437 		list_move(&bp->b_lru, &dispose);
1438 		btp->bt_lru_nr--;
1439 	}
1440 	spin_unlock(&btp->bt_lru_lock);
1441 
1442 	while (!list_empty(&dispose)) {
1443 		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1444 		list_del_init(&bp->b_lru);
1445 		xfs_buf_rele(bp);
1446 	}
1447 
1448 	return btp->bt_lru_nr;
1449 }
1450 
1451 void
xfs_free_buftarg(struct xfs_mount * mp,struct xfs_buftarg * btp)1452 xfs_free_buftarg(
1453 	struct xfs_mount	*mp,
1454 	struct xfs_buftarg	*btp)
1455 {
1456 	unregister_shrinker(&btp->bt_shrinker);
1457 
1458 	xfs_flush_buftarg(btp, 1);
1459 	if (mp->m_flags & XFS_MOUNT_BARRIER)
1460 		xfs_blkdev_issue_flush(btp);
1461 
1462 	kthread_stop(btp->bt_task);
1463 	kmem_free(btp);
1464 }
1465 
1466 STATIC int
xfs_setsize_buftarg_flags(xfs_buftarg_t * btp,unsigned int blocksize,unsigned int sectorsize,int verbose)1467 xfs_setsize_buftarg_flags(
1468 	xfs_buftarg_t		*btp,
1469 	unsigned int		blocksize,
1470 	unsigned int		sectorsize,
1471 	int			verbose)
1472 {
1473 	btp->bt_bsize = blocksize;
1474 	btp->bt_sshift = ffs(sectorsize) - 1;
1475 	btp->bt_smask = sectorsize - 1;
1476 
1477 	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1478 		xfs_warn(btp->bt_mount,
1479 			"Cannot set_blocksize to %u on device %s\n",
1480 			sectorsize, XFS_BUFTARG_NAME(btp));
1481 		return EINVAL;
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 /*
1488  *	When allocating the initial buffer target we have not yet
1489  *	read in the superblock, so don't know what sized sectors
1490  *	are being used is at this early stage.  Play safe.
1491  */
1492 STATIC int
xfs_setsize_buftarg_early(xfs_buftarg_t * btp,struct block_device * bdev)1493 xfs_setsize_buftarg_early(
1494 	xfs_buftarg_t		*btp,
1495 	struct block_device	*bdev)
1496 {
1497 	return xfs_setsize_buftarg_flags(btp,
1498 			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1499 }
1500 
1501 int
xfs_setsize_buftarg(xfs_buftarg_t * btp,unsigned int blocksize,unsigned int sectorsize)1502 xfs_setsize_buftarg(
1503 	xfs_buftarg_t		*btp,
1504 	unsigned int		blocksize,
1505 	unsigned int		sectorsize)
1506 {
1507 	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1508 }
1509 
1510 STATIC int
xfs_alloc_delwrite_queue(xfs_buftarg_t * btp,const char * fsname)1511 xfs_alloc_delwrite_queue(
1512 	xfs_buftarg_t		*btp,
1513 	const char		*fsname)
1514 {
1515 	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1516 	spin_lock_init(&btp->bt_delwrite_lock);
1517 	btp->bt_flags = 0;
1518 	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1519 	if (IS_ERR(btp->bt_task))
1520 		return PTR_ERR(btp->bt_task);
1521 	return 0;
1522 }
1523 
1524 xfs_buftarg_t *
xfs_alloc_buftarg(struct xfs_mount * mp,struct block_device * bdev,int external,const char * fsname)1525 xfs_alloc_buftarg(
1526 	struct xfs_mount	*mp,
1527 	struct block_device	*bdev,
1528 	int			external,
1529 	const char		*fsname)
1530 {
1531 	xfs_buftarg_t		*btp;
1532 
1533 	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1534 
1535 	btp->bt_mount = mp;
1536 	btp->bt_dev =  bdev->bd_dev;
1537 	btp->bt_bdev = bdev;
1538 	btp->bt_bdi = blk_get_backing_dev_info(bdev);
1539 	if (!btp->bt_bdi)
1540 		goto error;
1541 
1542 	INIT_LIST_HEAD(&btp->bt_lru);
1543 	spin_lock_init(&btp->bt_lru_lock);
1544 	if (xfs_setsize_buftarg_early(btp, bdev))
1545 		goto error;
1546 	if (xfs_alloc_delwrite_queue(btp, fsname))
1547 		goto error;
1548 	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1549 	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1550 	register_shrinker(&btp->bt_shrinker);
1551 	return btp;
1552 
1553 error:
1554 	kmem_free(btp);
1555 	return NULL;
1556 }
1557 
1558 
1559 /*
1560  *	Delayed write buffer handling
1561  */
1562 STATIC void
xfs_buf_delwri_queue(xfs_buf_t * bp,int unlock)1563 xfs_buf_delwri_queue(
1564 	xfs_buf_t		*bp,
1565 	int			unlock)
1566 {
1567 	struct list_head	*dwq = &bp->b_target->bt_delwrite_queue;
1568 	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
1569 
1570 	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1571 
1572 	ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1573 
1574 	spin_lock(dwlk);
1575 	/* If already in the queue, dequeue and place at tail */
1576 	if (!list_empty(&bp->b_list)) {
1577 		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1578 		if (unlock)
1579 			atomic_dec(&bp->b_hold);
1580 		list_del(&bp->b_list);
1581 	}
1582 
1583 	if (list_empty(dwq)) {
1584 		/* start xfsbufd as it is about to have something to do */
1585 		wake_up_process(bp->b_target->bt_task);
1586 	}
1587 
1588 	bp->b_flags |= _XBF_DELWRI_Q;
1589 	list_add_tail(&bp->b_list, dwq);
1590 	bp->b_queuetime = jiffies;
1591 	spin_unlock(dwlk);
1592 
1593 	if (unlock)
1594 		xfs_buf_unlock(bp);
1595 }
1596 
1597 void
xfs_buf_delwri_dequeue(xfs_buf_t * bp)1598 xfs_buf_delwri_dequeue(
1599 	xfs_buf_t		*bp)
1600 {
1601 	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
1602 	int			dequeued = 0;
1603 
1604 	spin_lock(dwlk);
1605 	if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1606 		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1607 		list_del_init(&bp->b_list);
1608 		dequeued = 1;
1609 	}
1610 	bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1611 	spin_unlock(dwlk);
1612 
1613 	if (dequeued)
1614 		xfs_buf_rele(bp);
1615 
1616 	trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1617 }
1618 
1619 /*
1620  * If a delwri buffer needs to be pushed before it has aged out, then promote
1621  * it to the head of the delwri queue so that it will be flushed on the next
1622  * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1623  * than the age currently needed to flush the buffer. Hence the next time the
1624  * xfsbufd sees it is guaranteed to be considered old enough to flush.
1625  */
1626 void
xfs_buf_delwri_promote(struct xfs_buf * bp)1627 xfs_buf_delwri_promote(
1628 	struct xfs_buf	*bp)
1629 {
1630 	struct xfs_buftarg *btp = bp->b_target;
1631 	long		age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1632 
1633 	ASSERT(bp->b_flags & XBF_DELWRI);
1634 	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1635 
1636 	/*
1637 	 * Check the buffer age before locking the delayed write queue as we
1638 	 * don't need to promote buffers that are already past the flush age.
1639 	 */
1640 	if (bp->b_queuetime < jiffies - age)
1641 		return;
1642 	bp->b_queuetime = jiffies - age;
1643 	spin_lock(&btp->bt_delwrite_lock);
1644 	list_move(&bp->b_list, &btp->bt_delwrite_queue);
1645 	spin_unlock(&btp->bt_delwrite_lock);
1646 }
1647 
1648 STATIC void
xfs_buf_runall_queues(struct workqueue_struct * queue)1649 xfs_buf_runall_queues(
1650 	struct workqueue_struct	*queue)
1651 {
1652 	flush_workqueue(queue);
1653 }
1654 
1655 /*
1656  * Move as many buffers as specified to the supplied list
1657  * idicating if we skipped any buffers to prevent deadlocks.
1658  */
1659 STATIC int
xfs_buf_delwri_split(xfs_buftarg_t * target,struct list_head * list,unsigned long age)1660 xfs_buf_delwri_split(
1661 	xfs_buftarg_t	*target,
1662 	struct list_head *list,
1663 	unsigned long	age)
1664 {
1665 	xfs_buf_t	*bp, *n;
1666 	struct list_head *dwq = &target->bt_delwrite_queue;
1667 	spinlock_t	*dwlk = &target->bt_delwrite_lock;
1668 	int		skipped = 0;
1669 	int		force;
1670 
1671 	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1672 	INIT_LIST_HEAD(list);
1673 	spin_lock(dwlk);
1674 	list_for_each_entry_safe(bp, n, dwq, b_list) {
1675 		ASSERT(bp->b_flags & XBF_DELWRI);
1676 
1677 		if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
1678 			if (!force &&
1679 			    time_before(jiffies, bp->b_queuetime + age)) {
1680 				xfs_buf_unlock(bp);
1681 				break;
1682 			}
1683 
1684 			bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1685 					 _XBF_RUN_QUEUES);
1686 			bp->b_flags |= XBF_WRITE;
1687 			list_move_tail(&bp->b_list, list);
1688 			trace_xfs_buf_delwri_split(bp, _RET_IP_);
1689 		} else
1690 			skipped++;
1691 	}
1692 	spin_unlock(dwlk);
1693 
1694 	return skipped;
1695 
1696 }
1697 
1698 /*
1699  * Compare function is more complex than it needs to be because
1700  * the return value is only 32 bits and we are doing comparisons
1701  * on 64 bit values
1702  */
1703 static int
xfs_buf_cmp(void * priv,struct list_head * a,struct list_head * b)1704 xfs_buf_cmp(
1705 	void		*priv,
1706 	struct list_head *a,
1707 	struct list_head *b)
1708 {
1709 	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1710 	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1711 	xfs_daddr_t		diff;
1712 
1713 	diff = ap->b_bn - bp->b_bn;
1714 	if (diff < 0)
1715 		return -1;
1716 	if (diff > 0)
1717 		return 1;
1718 	return 0;
1719 }
1720 
1721 void
xfs_buf_delwri_sort(xfs_buftarg_t * target,struct list_head * list)1722 xfs_buf_delwri_sort(
1723 	xfs_buftarg_t	*target,
1724 	struct list_head *list)
1725 {
1726 	list_sort(NULL, list, xfs_buf_cmp);
1727 }
1728 
1729 STATIC int
xfsbufd(void * data)1730 xfsbufd(
1731 	void		*data)
1732 {
1733 	xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1734 
1735 	current->flags |= PF_MEMALLOC;
1736 
1737 	set_freezable();
1738 
1739 	do {
1740 		long	age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1741 		long	tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1742 		struct list_head tmp;
1743 		struct blk_plug plug;
1744 
1745 		if (unlikely(freezing(current))) {
1746 			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1747 			refrigerator();
1748 		} else {
1749 			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1750 		}
1751 
1752 		/* sleep for a long time if there is nothing to do. */
1753 		if (list_empty(&target->bt_delwrite_queue))
1754 			tout = MAX_SCHEDULE_TIMEOUT;
1755 		schedule_timeout_interruptible(tout);
1756 
1757 		xfs_buf_delwri_split(target, &tmp, age);
1758 		list_sort(NULL, &tmp, xfs_buf_cmp);
1759 
1760 		blk_start_plug(&plug);
1761 		while (!list_empty(&tmp)) {
1762 			struct xfs_buf *bp;
1763 			bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1764 			list_del_init(&bp->b_list);
1765 			xfs_bdstrat_cb(bp);
1766 		}
1767 		blk_finish_plug(&plug);
1768 	} while (!kthread_should_stop());
1769 
1770 	return 0;
1771 }
1772 
1773 /*
1774  *	Go through all incore buffers, and release buffers if they belong to
1775  *	the given device. This is used in filesystem error handling to
1776  *	preserve the consistency of its metadata.
1777  */
1778 int
xfs_flush_buftarg(xfs_buftarg_t * target,int wait)1779 xfs_flush_buftarg(
1780 	xfs_buftarg_t	*target,
1781 	int		wait)
1782 {
1783 	xfs_buf_t	*bp;
1784 	int		pincount = 0;
1785 	LIST_HEAD(tmp_list);
1786 	LIST_HEAD(wait_list);
1787 	struct blk_plug plug;
1788 
1789 	xfs_buf_runall_queues(xfsconvertd_workqueue);
1790 	xfs_buf_runall_queues(xfsdatad_workqueue);
1791 	xfs_buf_runall_queues(xfslogd_workqueue);
1792 
1793 	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1794 	pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1795 
1796 	/*
1797 	 * Dropped the delayed write list lock, now walk the temporary list.
1798 	 * All I/O is issued async and then if we need to wait for completion
1799 	 * we do that after issuing all the IO.
1800 	 */
1801 	list_sort(NULL, &tmp_list, xfs_buf_cmp);
1802 
1803 	blk_start_plug(&plug);
1804 	while (!list_empty(&tmp_list)) {
1805 		bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1806 		ASSERT(target == bp->b_target);
1807 		list_del_init(&bp->b_list);
1808 		if (wait) {
1809 			bp->b_flags &= ~XBF_ASYNC;
1810 			list_add(&bp->b_list, &wait_list);
1811 		}
1812 		xfs_bdstrat_cb(bp);
1813 	}
1814 	blk_finish_plug(&plug);
1815 
1816 	if (wait) {
1817 		/* Wait for IO to complete. */
1818 		while (!list_empty(&wait_list)) {
1819 			bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1820 
1821 			list_del_init(&bp->b_list);
1822 			xfs_buf_iowait(bp);
1823 			xfs_buf_relse(bp);
1824 		}
1825 	}
1826 
1827 	return pincount;
1828 }
1829 
1830 int __init
xfs_buf_init(void)1831 xfs_buf_init(void)
1832 {
1833 	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1834 						KM_ZONE_HWALIGN, NULL);
1835 	if (!xfs_buf_zone)
1836 		goto out;
1837 
1838 	xfslogd_workqueue = alloc_workqueue("xfslogd",
1839 					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1840 	if (!xfslogd_workqueue)
1841 		goto out_free_buf_zone;
1842 
1843 	xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
1844 	if (!xfsdatad_workqueue)
1845 		goto out_destroy_xfslogd_workqueue;
1846 
1847 	xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
1848 						WQ_MEM_RECLAIM, 1);
1849 	if (!xfsconvertd_workqueue)
1850 		goto out_destroy_xfsdatad_workqueue;
1851 
1852 	return 0;
1853 
1854  out_destroy_xfsdatad_workqueue:
1855 	destroy_workqueue(xfsdatad_workqueue);
1856  out_destroy_xfslogd_workqueue:
1857 	destroy_workqueue(xfslogd_workqueue);
1858  out_free_buf_zone:
1859 	kmem_zone_destroy(xfs_buf_zone);
1860  out:
1861 	return -ENOMEM;
1862 }
1863 
1864 void
xfs_buf_terminate(void)1865 xfs_buf_terminate(void)
1866 {
1867 	destroy_workqueue(xfsconvertd_workqueue);
1868 	destroy_workqueue(xfsdatad_workqueue);
1869 	destroy_workqueue(xfslogd_workqueue);
1870 	kmem_zone_destroy(xfs_buf_zone);
1871 }
1872 
1873 #ifdef CONFIG_KDB_MODULES
1874 struct list_head *
xfs_get_buftarg_list(void)1875 xfs_get_buftarg_list(void)
1876 {
1877 	return &xfs_buftarg_list;
1878 }
1879 #endif
1880