1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 
37 #include "xfs_sb.h"
38 #include "xfs_inum.h"
39 #include "xfs_log.h"
40 #include "xfs_ag.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43 
44 static kmem_zone_t *xfs_buf_zone;
45 STATIC int xfsbufd(void *);
46 
47 static struct workqueue_struct *xfslogd_workqueue;
48 
49 #ifdef XFS_BUF_LOCK_TRACKING
50 # define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
51 # define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
52 # define XB_GET_OWNER(bp)	((bp)->b_last_holder)
53 #else
54 # define XB_SET_OWNER(bp)	do { } while (0)
55 # define XB_CLEAR_OWNER(bp)	do { } while (0)
56 # define XB_GET_OWNER(bp)	do { } while (0)
57 #endif
58 
59 #define xb_to_gfp(flags) \
60 	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
61 	  ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
62 
63 #define xb_to_km(flags) \
64 	 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
65 
66 
67 static inline int
xfs_buf_is_vmapped(struct xfs_buf * bp)68 xfs_buf_is_vmapped(
69 	struct xfs_buf	*bp)
70 {
71 	/*
72 	 * Return true if the buffer is vmapped.
73 	 *
74 	 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
75 	 * code is clever enough to know it doesn't have to map a single page,
76 	 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
77 	 */
78 	return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
79 }
80 
81 static inline int
xfs_buf_vmap_len(struct xfs_buf * bp)82 xfs_buf_vmap_len(
83 	struct xfs_buf	*bp)
84 {
85 	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
86 }
87 
88 /*
89  * xfs_buf_lru_add - add a buffer to the LRU.
90  *
91  * The LRU takes a new reference to the buffer so that it will only be freed
92  * once the shrinker takes the buffer off the LRU.
93  */
94 STATIC void
xfs_buf_lru_add(struct xfs_buf * bp)95 xfs_buf_lru_add(
96 	struct xfs_buf	*bp)
97 {
98 	struct xfs_buftarg *btp = bp->b_target;
99 
100 	spin_lock(&btp->bt_lru_lock);
101 	if (list_empty(&bp->b_lru)) {
102 		atomic_inc(&bp->b_hold);
103 		list_add_tail(&bp->b_lru, &btp->bt_lru);
104 		btp->bt_lru_nr++;
105 	}
106 	spin_unlock(&btp->bt_lru_lock);
107 }
108 
109 /*
110  * xfs_buf_lru_del - remove a buffer from the LRU
111  *
112  * The unlocked check is safe here because it only occurs when there are not
113  * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
114  * to optimise the shrinker removing the buffer from the LRU and calling
115  * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
116  * bt_lru_lock.
117  */
118 STATIC void
xfs_buf_lru_del(struct xfs_buf * bp)119 xfs_buf_lru_del(
120 	struct xfs_buf	*bp)
121 {
122 	struct xfs_buftarg *btp = bp->b_target;
123 
124 	if (list_empty(&bp->b_lru))
125 		return;
126 
127 	spin_lock(&btp->bt_lru_lock);
128 	if (!list_empty(&bp->b_lru)) {
129 		list_del_init(&bp->b_lru);
130 		btp->bt_lru_nr--;
131 	}
132 	spin_unlock(&btp->bt_lru_lock);
133 }
134 
135 /*
136  * When we mark a buffer stale, we remove the buffer from the LRU and clear the
137  * b_lru_ref count so that the buffer is freed immediately when the buffer
138  * reference count falls to zero. If the buffer is already on the LRU, we need
139  * to remove the reference that LRU holds on the buffer.
140  *
141  * This prevents build-up of stale buffers on the LRU.
142  */
143 void
xfs_buf_stale(struct xfs_buf * bp)144 xfs_buf_stale(
145 	struct xfs_buf	*bp)
146 {
147 	bp->b_flags |= XBF_STALE;
148 	xfs_buf_delwri_dequeue(bp);
149 	atomic_set(&(bp)->b_lru_ref, 0);
150 	if (!list_empty(&bp->b_lru)) {
151 		struct xfs_buftarg *btp = bp->b_target;
152 
153 		spin_lock(&btp->bt_lru_lock);
154 		if (!list_empty(&bp->b_lru)) {
155 			list_del_init(&bp->b_lru);
156 			btp->bt_lru_nr--;
157 			atomic_dec(&bp->b_hold);
158 		}
159 		spin_unlock(&btp->bt_lru_lock);
160 	}
161 	ASSERT(atomic_read(&bp->b_hold) >= 1);
162 }
163 
164 struct xfs_buf *
xfs_buf_alloc(struct xfs_buftarg * target,xfs_off_t range_base,size_t range_length,xfs_buf_flags_t flags)165 xfs_buf_alloc(
166 	struct xfs_buftarg	*target,
167 	xfs_off_t		range_base,
168 	size_t			range_length,
169 	xfs_buf_flags_t		flags)
170 {
171 	struct xfs_buf		*bp;
172 
173 	bp = kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags));
174 	if (unlikely(!bp))
175 		return NULL;
176 
177 	/*
178 	 * We don't want certain flags to appear in b_flags.
179 	 */
180 	flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
181 
182 	memset(bp, 0, sizeof(xfs_buf_t));
183 	atomic_set(&bp->b_hold, 1);
184 	atomic_set(&bp->b_lru_ref, 1);
185 	init_completion(&bp->b_iowait);
186 	INIT_LIST_HEAD(&bp->b_lru);
187 	INIT_LIST_HEAD(&bp->b_list);
188 	RB_CLEAR_NODE(&bp->b_rbnode);
189 	sema_init(&bp->b_sema, 0); /* held, no waiters */
190 	XB_SET_OWNER(bp);
191 	bp->b_target = target;
192 	bp->b_file_offset = range_base;
193 	/*
194 	 * Set buffer_length and count_desired to the same value initially.
195 	 * I/O routines should use count_desired, which will be the same in
196 	 * most cases but may be reset (e.g. XFS recovery).
197 	 */
198 	bp->b_buffer_length = bp->b_count_desired = range_length;
199 	bp->b_flags = flags;
200 	bp->b_bn = XFS_BUF_DADDR_NULL;
201 	atomic_set(&bp->b_pin_count, 0);
202 	init_waitqueue_head(&bp->b_waiters);
203 
204 	XFS_STATS_INC(xb_create);
205 	trace_xfs_buf_init(bp, _RET_IP_);
206 
207 	return bp;
208 }
209 
210 /*
211  *	Allocate a page array capable of holding a specified number
212  *	of pages, and point the page buf at it.
213  */
214 STATIC int
_xfs_buf_get_pages(xfs_buf_t * bp,int page_count,xfs_buf_flags_t flags)215 _xfs_buf_get_pages(
216 	xfs_buf_t		*bp,
217 	int			page_count,
218 	xfs_buf_flags_t		flags)
219 {
220 	/* Make sure that we have a page list */
221 	if (bp->b_pages == NULL) {
222 		bp->b_offset = xfs_buf_poff(bp->b_file_offset);
223 		bp->b_page_count = page_count;
224 		if (page_count <= XB_PAGES) {
225 			bp->b_pages = bp->b_page_array;
226 		} else {
227 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
228 					page_count, xb_to_km(flags));
229 			if (bp->b_pages == NULL)
230 				return -ENOMEM;
231 		}
232 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
233 	}
234 	return 0;
235 }
236 
237 /*
238  *	Frees b_pages if it was allocated.
239  */
240 STATIC void
_xfs_buf_free_pages(xfs_buf_t * bp)241 _xfs_buf_free_pages(
242 	xfs_buf_t	*bp)
243 {
244 	if (bp->b_pages != bp->b_page_array) {
245 		kmem_free(bp->b_pages);
246 		bp->b_pages = NULL;
247 	}
248 }
249 
250 /*
251  *	Releases the specified buffer.
252  *
253  * 	The modification state of any associated pages is left unchanged.
254  * 	The buffer most not be on any hash - use xfs_buf_rele instead for
255  * 	hashed and refcounted buffers
256  */
257 void
xfs_buf_free(xfs_buf_t * bp)258 xfs_buf_free(
259 	xfs_buf_t		*bp)
260 {
261 	trace_xfs_buf_free(bp, _RET_IP_);
262 
263 	ASSERT(list_empty(&bp->b_lru));
264 
265 	if (bp->b_flags & _XBF_PAGES) {
266 		uint		i;
267 
268 		if (xfs_buf_is_vmapped(bp))
269 			vm_unmap_ram(bp->b_addr - bp->b_offset,
270 					bp->b_page_count);
271 
272 		for (i = 0; i < bp->b_page_count; i++) {
273 			struct page	*page = bp->b_pages[i];
274 
275 			__free_page(page);
276 		}
277 	} else if (bp->b_flags & _XBF_KMEM)
278 		kmem_free(bp->b_addr);
279 	_xfs_buf_free_pages(bp);
280 	kmem_zone_free(xfs_buf_zone, bp);
281 }
282 
283 /*
284  * Allocates all the pages for buffer in question and builds it's page list.
285  */
286 STATIC int
xfs_buf_allocate_memory(xfs_buf_t * bp,uint flags)287 xfs_buf_allocate_memory(
288 	xfs_buf_t		*bp,
289 	uint			flags)
290 {
291 	size_t			size = bp->b_count_desired;
292 	size_t			nbytes, offset;
293 	gfp_t			gfp_mask = xb_to_gfp(flags);
294 	unsigned short		page_count, i;
295 	xfs_off_t		end;
296 	int			error;
297 
298 	/*
299 	 * for buffers that are contained within a single page, just allocate
300 	 * the memory from the heap - there's no need for the complexity of
301 	 * page arrays to keep allocation down to order 0.
302 	 */
303 	if (bp->b_buffer_length < PAGE_SIZE) {
304 		bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
305 		if (!bp->b_addr) {
306 			/* low memory - use alloc_page loop instead */
307 			goto use_alloc_page;
308 		}
309 
310 		if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
311 								PAGE_MASK) !=
312 		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
313 			/* b_addr spans two pages - use alloc_page instead */
314 			kmem_free(bp->b_addr);
315 			bp->b_addr = NULL;
316 			goto use_alloc_page;
317 		}
318 		bp->b_offset = offset_in_page(bp->b_addr);
319 		bp->b_pages = bp->b_page_array;
320 		bp->b_pages[0] = virt_to_page(bp->b_addr);
321 		bp->b_page_count = 1;
322 		bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
323 		return 0;
324 	}
325 
326 use_alloc_page:
327 	end = bp->b_file_offset + bp->b_buffer_length;
328 	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
329 	error = _xfs_buf_get_pages(bp, page_count, flags);
330 	if (unlikely(error))
331 		return error;
332 
333 	offset = bp->b_offset;
334 	bp->b_flags |= _XBF_PAGES;
335 
336 	for (i = 0; i < bp->b_page_count; i++) {
337 		struct page	*page;
338 		uint		retries = 0;
339 retry:
340 		page = alloc_page(gfp_mask);
341 		if (unlikely(page == NULL)) {
342 			if (flags & XBF_READ_AHEAD) {
343 				bp->b_page_count = i;
344 				error = ENOMEM;
345 				goto out_free_pages;
346 			}
347 
348 			/*
349 			 * This could deadlock.
350 			 *
351 			 * But until all the XFS lowlevel code is revamped to
352 			 * handle buffer allocation failures we can't do much.
353 			 */
354 			if (!(++retries % 100))
355 				xfs_err(NULL,
356 		"possible memory allocation deadlock in %s (mode:0x%x)",
357 					__func__, gfp_mask);
358 
359 			XFS_STATS_INC(xb_page_retries);
360 			congestion_wait(BLK_RW_ASYNC, HZ/50);
361 			goto retry;
362 		}
363 
364 		XFS_STATS_INC(xb_page_found);
365 
366 		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
367 		size -= nbytes;
368 		bp->b_pages[i] = page;
369 		offset = 0;
370 	}
371 	return 0;
372 
373 out_free_pages:
374 	for (i = 0; i < bp->b_page_count; i++)
375 		__free_page(bp->b_pages[i]);
376 	return error;
377 }
378 
379 /*
380  *	Map buffer into kernel address-space if necessary.
381  */
382 STATIC int
_xfs_buf_map_pages(xfs_buf_t * bp,uint flags)383 _xfs_buf_map_pages(
384 	xfs_buf_t		*bp,
385 	uint			flags)
386 {
387 	ASSERT(bp->b_flags & _XBF_PAGES);
388 	if (bp->b_page_count == 1) {
389 		/* A single page buffer is always mappable */
390 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
391 		bp->b_flags |= XBF_MAPPED;
392 	} else if (flags & XBF_MAPPED) {
393 		int retried = 0;
394 
395 		do {
396 			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
397 						-1, PAGE_KERNEL);
398 			if (bp->b_addr)
399 				break;
400 			vm_unmap_aliases();
401 		} while (retried++ <= 1);
402 
403 		if (!bp->b_addr)
404 			return -ENOMEM;
405 		bp->b_addr += bp->b_offset;
406 		bp->b_flags |= XBF_MAPPED;
407 	}
408 
409 	return 0;
410 }
411 
412 /*
413  *	Finding and Reading Buffers
414  */
415 
416 /*
417  *	Look up, and creates if absent, a lockable buffer for
418  *	a given range of an inode.  The buffer is returned
419  *	locked.	No I/O is implied by this call.
420  */
421 xfs_buf_t *
_xfs_buf_find(xfs_buftarg_t * btp,xfs_off_t ioff,size_t isize,xfs_buf_flags_t flags,xfs_buf_t * new_bp)422 _xfs_buf_find(
423 	xfs_buftarg_t		*btp,	/* block device target		*/
424 	xfs_off_t		ioff,	/* starting offset of range	*/
425 	size_t			isize,	/* length of range		*/
426 	xfs_buf_flags_t		flags,
427 	xfs_buf_t		*new_bp)
428 {
429 	xfs_off_t		range_base;
430 	size_t			range_length;
431 	struct xfs_perag	*pag;
432 	struct rb_node		**rbp;
433 	struct rb_node		*parent;
434 	xfs_buf_t		*bp;
435 
436 	range_base = (ioff << BBSHIFT);
437 	range_length = (isize << BBSHIFT);
438 
439 	/* Check for IOs smaller than the sector size / not sector aligned */
440 	ASSERT(!(range_length < (1 << btp->bt_sshift)));
441 	ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
442 
443 	/* get tree root */
444 	pag = xfs_perag_get(btp->bt_mount,
445 				xfs_daddr_to_agno(btp->bt_mount, ioff));
446 
447 	/* walk tree */
448 	spin_lock(&pag->pag_buf_lock);
449 	rbp = &pag->pag_buf_tree.rb_node;
450 	parent = NULL;
451 	bp = NULL;
452 	while (*rbp) {
453 		parent = *rbp;
454 		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
455 
456 		if (range_base < bp->b_file_offset)
457 			rbp = &(*rbp)->rb_left;
458 		else if (range_base > bp->b_file_offset)
459 			rbp = &(*rbp)->rb_right;
460 		else {
461 			/*
462 			 * found a block offset match. If the range doesn't
463 			 * match, the only way this is allowed is if the buffer
464 			 * in the cache is stale and the transaction that made
465 			 * it stale has not yet committed. i.e. we are
466 			 * reallocating a busy extent. Skip this buffer and
467 			 * continue searching to the right for an exact match.
468 			 */
469 			if (bp->b_buffer_length != range_length) {
470 				ASSERT(bp->b_flags & XBF_STALE);
471 				rbp = &(*rbp)->rb_right;
472 				continue;
473 			}
474 			atomic_inc(&bp->b_hold);
475 			goto found;
476 		}
477 	}
478 
479 	/* No match found */
480 	if (new_bp) {
481 		rb_link_node(&new_bp->b_rbnode, parent, rbp);
482 		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
483 		/* the buffer keeps the perag reference until it is freed */
484 		new_bp->b_pag = pag;
485 		spin_unlock(&pag->pag_buf_lock);
486 	} else {
487 		XFS_STATS_INC(xb_miss_locked);
488 		spin_unlock(&pag->pag_buf_lock);
489 		xfs_perag_put(pag);
490 	}
491 	return new_bp;
492 
493 found:
494 	spin_unlock(&pag->pag_buf_lock);
495 	xfs_perag_put(pag);
496 
497 	if (!xfs_buf_trylock(bp)) {
498 		if (flags & XBF_TRYLOCK) {
499 			xfs_buf_rele(bp);
500 			XFS_STATS_INC(xb_busy_locked);
501 			return NULL;
502 		}
503 		xfs_buf_lock(bp);
504 		XFS_STATS_INC(xb_get_locked_waited);
505 	}
506 
507 	/*
508 	 * if the buffer is stale, clear all the external state associated with
509 	 * it. We need to keep flags such as how we allocated the buffer memory
510 	 * intact here.
511 	 */
512 	if (bp->b_flags & XBF_STALE) {
513 		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
514 		bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
515 	}
516 
517 	trace_xfs_buf_find(bp, flags, _RET_IP_);
518 	XFS_STATS_INC(xb_get_locked);
519 	return bp;
520 }
521 
522 /*
523  * Assembles a buffer covering the specified range. The code is optimised for
524  * cache hits, as metadata intensive workloads will see 3 orders of magnitude
525  * more hits than misses.
526  */
527 struct xfs_buf *
xfs_buf_get(xfs_buftarg_t * target,xfs_off_t ioff,size_t isize,xfs_buf_flags_t flags)528 xfs_buf_get(
529 	xfs_buftarg_t		*target,/* target for buffer		*/
530 	xfs_off_t		ioff,	/* starting offset of range	*/
531 	size_t			isize,	/* length of range		*/
532 	xfs_buf_flags_t		flags)
533 {
534 	struct xfs_buf		*bp;
535 	struct xfs_buf		*new_bp;
536 	int			error = 0;
537 
538 	bp = _xfs_buf_find(target, ioff, isize, flags, NULL);
539 	if (likely(bp))
540 		goto found;
541 
542 	new_bp = xfs_buf_alloc(target, ioff << BBSHIFT, isize << BBSHIFT,
543 			       flags);
544 	if (unlikely(!new_bp))
545 		return NULL;
546 
547 	bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
548 	if (!bp) {
549 		kmem_zone_free(xfs_buf_zone, new_bp);
550 		return NULL;
551 	}
552 
553 	if (bp == new_bp) {
554 		error = xfs_buf_allocate_memory(bp, flags);
555 		if (error)
556 			goto no_buffer;
557 	} else
558 		kmem_zone_free(xfs_buf_zone, new_bp);
559 
560 	/*
561 	 * Now we have a workable buffer, fill in the block number so
562 	 * that we can do IO on it.
563 	 */
564 	bp->b_bn = ioff;
565 	bp->b_count_desired = bp->b_buffer_length;
566 
567 found:
568 	if (!(bp->b_flags & XBF_MAPPED)) {
569 		error = _xfs_buf_map_pages(bp, flags);
570 		if (unlikely(error)) {
571 			xfs_warn(target->bt_mount,
572 				"%s: failed to map pages\n", __func__);
573 			goto no_buffer;
574 		}
575 	}
576 
577 	XFS_STATS_INC(xb_get);
578 	trace_xfs_buf_get(bp, flags, _RET_IP_);
579 	return bp;
580 
581 no_buffer:
582 	if (flags & (XBF_LOCK | XBF_TRYLOCK))
583 		xfs_buf_unlock(bp);
584 	xfs_buf_rele(bp);
585 	return NULL;
586 }
587 
588 STATIC int
_xfs_buf_read(xfs_buf_t * bp,xfs_buf_flags_t flags)589 _xfs_buf_read(
590 	xfs_buf_t		*bp,
591 	xfs_buf_flags_t		flags)
592 {
593 	int			status;
594 
595 	ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
596 	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
597 
598 	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
599 	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
600 
601 	status = xfs_buf_iorequest(bp);
602 	if (status || bp->b_error || (flags & XBF_ASYNC))
603 		return status;
604 	return xfs_buf_iowait(bp);
605 }
606 
607 xfs_buf_t *
xfs_buf_read(xfs_buftarg_t * target,xfs_off_t ioff,size_t isize,xfs_buf_flags_t flags)608 xfs_buf_read(
609 	xfs_buftarg_t		*target,
610 	xfs_off_t		ioff,
611 	size_t			isize,
612 	xfs_buf_flags_t		flags)
613 {
614 	xfs_buf_t		*bp;
615 
616 	flags |= XBF_READ;
617 
618 	bp = xfs_buf_get(target, ioff, isize, flags);
619 	if (bp) {
620 		trace_xfs_buf_read(bp, flags, _RET_IP_);
621 
622 		if (!XFS_BUF_ISDONE(bp)) {
623 			XFS_STATS_INC(xb_get_read);
624 			_xfs_buf_read(bp, flags);
625 		} else if (flags & XBF_ASYNC) {
626 			/*
627 			 * Read ahead call which is already satisfied,
628 			 * drop the buffer
629 			 */
630 			goto no_buffer;
631 		} else {
632 			/* We do not want read in the flags */
633 			bp->b_flags &= ~XBF_READ;
634 		}
635 	}
636 
637 	return bp;
638 
639  no_buffer:
640 	if (flags & (XBF_LOCK | XBF_TRYLOCK))
641 		xfs_buf_unlock(bp);
642 	xfs_buf_rele(bp);
643 	return NULL;
644 }
645 
646 /*
647  *	If we are not low on memory then do the readahead in a deadlock
648  *	safe manner.
649  */
650 void
xfs_buf_readahead(xfs_buftarg_t * target,xfs_off_t ioff,size_t isize)651 xfs_buf_readahead(
652 	xfs_buftarg_t		*target,
653 	xfs_off_t		ioff,
654 	size_t			isize)
655 {
656 	if (bdi_read_congested(target->bt_bdi))
657 		return;
658 
659 	xfs_buf_read(target, ioff, isize,
660 		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
661 }
662 
663 /*
664  * Read an uncached buffer from disk. Allocates and returns a locked
665  * buffer containing the disk contents or nothing.
666  */
667 struct xfs_buf *
xfs_buf_read_uncached(struct xfs_mount * mp,struct xfs_buftarg * target,xfs_daddr_t daddr,size_t length,int flags)668 xfs_buf_read_uncached(
669 	struct xfs_mount	*mp,
670 	struct xfs_buftarg	*target,
671 	xfs_daddr_t		daddr,
672 	size_t			length,
673 	int			flags)
674 {
675 	xfs_buf_t		*bp;
676 	int			error;
677 
678 	bp = xfs_buf_get_uncached(target, length, flags);
679 	if (!bp)
680 		return NULL;
681 
682 	/* set up the buffer for a read IO */
683 	XFS_BUF_SET_ADDR(bp, daddr);
684 	XFS_BUF_READ(bp);
685 
686 	xfsbdstrat(mp, bp);
687 	error = xfs_buf_iowait(bp);
688 	if (error || bp->b_error) {
689 		xfs_buf_relse(bp);
690 		return NULL;
691 	}
692 	return bp;
693 }
694 
695 /*
696  * Return a buffer allocated as an empty buffer and associated to external
697  * memory via xfs_buf_associate_memory() back to it's empty state.
698  */
699 void
xfs_buf_set_empty(struct xfs_buf * bp,size_t len)700 xfs_buf_set_empty(
701 	struct xfs_buf		*bp,
702 	size_t			len)
703 {
704 	if (bp->b_pages)
705 		_xfs_buf_free_pages(bp);
706 
707 	bp->b_pages = NULL;
708 	bp->b_page_count = 0;
709 	bp->b_addr = NULL;
710 	bp->b_file_offset = 0;
711 	bp->b_buffer_length = bp->b_count_desired = len;
712 	bp->b_bn = XFS_BUF_DADDR_NULL;
713 	bp->b_flags &= ~XBF_MAPPED;
714 }
715 
716 static inline struct page *
mem_to_page(void * addr)717 mem_to_page(
718 	void			*addr)
719 {
720 	if ((!is_vmalloc_addr(addr))) {
721 		return virt_to_page(addr);
722 	} else {
723 		return vmalloc_to_page(addr);
724 	}
725 }
726 
727 int
xfs_buf_associate_memory(xfs_buf_t * bp,void * mem,size_t len)728 xfs_buf_associate_memory(
729 	xfs_buf_t		*bp,
730 	void			*mem,
731 	size_t			len)
732 {
733 	int			rval;
734 	int			i = 0;
735 	unsigned long		pageaddr;
736 	unsigned long		offset;
737 	size_t			buflen;
738 	int			page_count;
739 
740 	pageaddr = (unsigned long)mem & PAGE_MASK;
741 	offset = (unsigned long)mem - pageaddr;
742 	buflen = PAGE_ALIGN(len + offset);
743 	page_count = buflen >> PAGE_SHIFT;
744 
745 	/* Free any previous set of page pointers */
746 	if (bp->b_pages)
747 		_xfs_buf_free_pages(bp);
748 
749 	bp->b_pages = NULL;
750 	bp->b_addr = mem;
751 
752 	rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
753 	if (rval)
754 		return rval;
755 
756 	bp->b_offset = offset;
757 
758 	for (i = 0; i < bp->b_page_count; i++) {
759 		bp->b_pages[i] = mem_to_page((void *)pageaddr);
760 		pageaddr += PAGE_SIZE;
761 	}
762 
763 	bp->b_count_desired = len;
764 	bp->b_buffer_length = buflen;
765 	bp->b_flags |= XBF_MAPPED;
766 
767 	return 0;
768 }
769 
770 xfs_buf_t *
xfs_buf_get_uncached(struct xfs_buftarg * target,size_t len,int flags)771 xfs_buf_get_uncached(
772 	struct xfs_buftarg	*target,
773 	size_t			len,
774 	int			flags)
775 {
776 	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
777 	int			error, i;
778 	xfs_buf_t		*bp;
779 
780 	bp = xfs_buf_alloc(target, 0, len, 0);
781 	if (unlikely(bp == NULL))
782 		goto fail;
783 
784 	error = _xfs_buf_get_pages(bp, page_count, 0);
785 	if (error)
786 		goto fail_free_buf;
787 
788 	for (i = 0; i < page_count; i++) {
789 		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
790 		if (!bp->b_pages[i])
791 			goto fail_free_mem;
792 	}
793 	bp->b_flags |= _XBF_PAGES;
794 
795 	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
796 	if (unlikely(error)) {
797 		xfs_warn(target->bt_mount,
798 			"%s: failed to map pages\n", __func__);
799 		goto fail_free_mem;
800 	}
801 
802 	trace_xfs_buf_get_uncached(bp, _RET_IP_);
803 	return bp;
804 
805  fail_free_mem:
806 	while (--i >= 0)
807 		__free_page(bp->b_pages[i]);
808 	_xfs_buf_free_pages(bp);
809  fail_free_buf:
810 	kmem_zone_free(xfs_buf_zone, bp);
811  fail:
812 	return NULL;
813 }
814 
815 /*
816  *	Increment reference count on buffer, to hold the buffer concurrently
817  *	with another thread which may release (free) the buffer asynchronously.
818  *	Must hold the buffer already to call this function.
819  */
820 void
xfs_buf_hold(xfs_buf_t * bp)821 xfs_buf_hold(
822 	xfs_buf_t		*bp)
823 {
824 	trace_xfs_buf_hold(bp, _RET_IP_);
825 	atomic_inc(&bp->b_hold);
826 }
827 
828 /*
829  *	Releases a hold on the specified buffer.  If the
830  *	the hold count is 1, calls xfs_buf_free.
831  */
832 void
xfs_buf_rele(xfs_buf_t * bp)833 xfs_buf_rele(
834 	xfs_buf_t		*bp)
835 {
836 	struct xfs_perag	*pag = bp->b_pag;
837 
838 	trace_xfs_buf_rele(bp, _RET_IP_);
839 
840 	if (!pag) {
841 		ASSERT(list_empty(&bp->b_lru));
842 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
843 		if (atomic_dec_and_test(&bp->b_hold))
844 			xfs_buf_free(bp);
845 		return;
846 	}
847 
848 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
849 
850 	ASSERT(atomic_read(&bp->b_hold) > 0);
851 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
852 		if (!(bp->b_flags & XBF_STALE) &&
853 			   atomic_read(&bp->b_lru_ref)) {
854 			xfs_buf_lru_add(bp);
855 			spin_unlock(&pag->pag_buf_lock);
856 		} else {
857 			xfs_buf_lru_del(bp);
858 			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
859 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
860 			spin_unlock(&pag->pag_buf_lock);
861 			xfs_perag_put(pag);
862 			xfs_buf_free(bp);
863 		}
864 	}
865 }
866 
867 
868 /*
869  *	Lock a buffer object, if it is not already locked.
870  *
871  *	If we come across a stale, pinned, locked buffer, we know that we are
872  *	being asked to lock a buffer that has been reallocated. Because it is
873  *	pinned, we know that the log has not been pushed to disk and hence it
874  *	will still be locked.  Rather than continuing to have trylock attempts
875  *	fail until someone else pushes the log, push it ourselves before
876  *	returning.  This means that the xfsaild will not get stuck trying
877  *	to push on stale inode buffers.
878  */
879 int
xfs_buf_trylock(struct xfs_buf * bp)880 xfs_buf_trylock(
881 	struct xfs_buf		*bp)
882 {
883 	int			locked;
884 
885 	locked = down_trylock(&bp->b_sema) == 0;
886 	if (locked)
887 		XB_SET_OWNER(bp);
888 	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
889 		xfs_log_force(bp->b_target->bt_mount, 0);
890 
891 	trace_xfs_buf_trylock(bp, _RET_IP_);
892 	return locked;
893 }
894 
895 /*
896  *	Lock a buffer object.
897  *
898  *	If we come across a stale, pinned, locked buffer, we know that we
899  *	are being asked to lock a buffer that has been reallocated. Because
900  *	it is pinned, we know that the log has not been pushed to disk and
901  *	hence it will still be locked. Rather than sleeping until someone
902  *	else pushes the log, push it ourselves before trying to get the lock.
903  */
904 void
xfs_buf_lock(struct xfs_buf * bp)905 xfs_buf_lock(
906 	struct xfs_buf		*bp)
907 {
908 	trace_xfs_buf_lock(bp, _RET_IP_);
909 
910 	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
911 		xfs_log_force(bp->b_target->bt_mount, 0);
912 	down(&bp->b_sema);
913 	XB_SET_OWNER(bp);
914 
915 	trace_xfs_buf_lock_done(bp, _RET_IP_);
916 }
917 
918 /*
919  *	Releases the lock on the buffer object.
920  *	If the buffer is marked delwri but is not queued, do so before we
921  *	unlock the buffer as we need to set flags correctly.  We also need to
922  *	take a reference for the delwri queue because the unlocker is going to
923  *	drop their's and they don't know we just queued it.
924  */
925 void
xfs_buf_unlock(struct xfs_buf * bp)926 xfs_buf_unlock(
927 	struct xfs_buf		*bp)
928 {
929 	XB_CLEAR_OWNER(bp);
930 	up(&bp->b_sema);
931 
932 	trace_xfs_buf_unlock(bp, _RET_IP_);
933 }
934 
935 STATIC void
xfs_buf_wait_unpin(xfs_buf_t * bp)936 xfs_buf_wait_unpin(
937 	xfs_buf_t		*bp)
938 {
939 	DECLARE_WAITQUEUE	(wait, current);
940 
941 	if (atomic_read(&bp->b_pin_count) == 0)
942 		return;
943 
944 	add_wait_queue(&bp->b_waiters, &wait);
945 	for (;;) {
946 		set_current_state(TASK_UNINTERRUPTIBLE);
947 		if (atomic_read(&bp->b_pin_count) == 0)
948 			break;
949 		io_schedule();
950 	}
951 	remove_wait_queue(&bp->b_waiters, &wait);
952 	set_current_state(TASK_RUNNING);
953 }
954 
955 /*
956  *	Buffer Utility Routines
957  */
958 
959 STATIC void
xfs_buf_iodone_work(struct work_struct * work)960 xfs_buf_iodone_work(
961 	struct work_struct	*work)
962 {
963 	xfs_buf_t		*bp =
964 		container_of(work, xfs_buf_t, b_iodone_work);
965 
966 	if (bp->b_iodone)
967 		(*(bp->b_iodone))(bp);
968 	else if (bp->b_flags & XBF_ASYNC)
969 		xfs_buf_relse(bp);
970 }
971 
972 void
xfs_buf_ioend(xfs_buf_t * bp,int schedule)973 xfs_buf_ioend(
974 	xfs_buf_t		*bp,
975 	int			schedule)
976 {
977 	trace_xfs_buf_iodone(bp, _RET_IP_);
978 
979 	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
980 	if (bp->b_error == 0)
981 		bp->b_flags |= XBF_DONE;
982 
983 	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
984 		if (schedule) {
985 			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
986 			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
987 		} else {
988 			xfs_buf_iodone_work(&bp->b_iodone_work);
989 		}
990 	} else {
991 		complete(&bp->b_iowait);
992 	}
993 }
994 
995 void
xfs_buf_ioerror(xfs_buf_t * bp,int error)996 xfs_buf_ioerror(
997 	xfs_buf_t		*bp,
998 	int			error)
999 {
1000 	ASSERT(error >= 0 && error <= 0xffff);
1001 	bp->b_error = (unsigned short)error;
1002 	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1003 }
1004 
1005 void
xfs_buf_ioerror_alert(struct xfs_buf * bp,const char * func)1006 xfs_buf_ioerror_alert(
1007 	struct xfs_buf		*bp,
1008 	const char		*func)
1009 {
1010 	xfs_alert(bp->b_target->bt_mount,
1011 "metadata I/O error: block 0x%llx (\"%s\") error %d buf count %zd",
1012 		(__uint64_t)XFS_BUF_ADDR(bp), func,
1013 		bp->b_error, XFS_BUF_COUNT(bp));
1014 }
1015 
1016 int
xfs_bwrite(struct xfs_buf * bp)1017 xfs_bwrite(
1018 	struct xfs_buf		*bp)
1019 {
1020 	int			error;
1021 
1022 	bp->b_flags |= XBF_WRITE;
1023 	bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
1024 
1025 	xfs_buf_delwri_dequeue(bp);
1026 	xfs_bdstrat_cb(bp);
1027 
1028 	error = xfs_buf_iowait(bp);
1029 	if (error) {
1030 		xfs_force_shutdown(bp->b_target->bt_mount,
1031 				   SHUTDOWN_META_IO_ERROR);
1032 	}
1033 	return error;
1034 }
1035 
1036 /*
1037  * Called when we want to stop a buffer from getting written or read.
1038  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1039  * so that the proper iodone callbacks get called.
1040  */
1041 STATIC int
xfs_bioerror(xfs_buf_t * bp)1042 xfs_bioerror(
1043 	xfs_buf_t *bp)
1044 {
1045 #ifdef XFSERRORDEBUG
1046 	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1047 #endif
1048 
1049 	/*
1050 	 * No need to wait until the buffer is unpinned, we aren't flushing it.
1051 	 */
1052 	xfs_buf_ioerror(bp, EIO);
1053 
1054 	/*
1055 	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1056 	 */
1057 	XFS_BUF_UNREAD(bp);
1058 	XFS_BUF_UNDONE(bp);
1059 	xfs_buf_stale(bp);
1060 
1061 	xfs_buf_ioend(bp, 0);
1062 
1063 	return EIO;
1064 }
1065 
1066 /*
1067  * Same as xfs_bioerror, except that we are releasing the buffer
1068  * here ourselves, and avoiding the xfs_buf_ioend call.
1069  * This is meant for userdata errors; metadata bufs come with
1070  * iodone functions attached, so that we can track down errors.
1071  */
1072 STATIC int
xfs_bioerror_relse(struct xfs_buf * bp)1073 xfs_bioerror_relse(
1074 	struct xfs_buf	*bp)
1075 {
1076 	int64_t		fl = bp->b_flags;
1077 	/*
1078 	 * No need to wait until the buffer is unpinned.
1079 	 * We aren't flushing it.
1080 	 *
1081 	 * chunkhold expects B_DONE to be set, whether
1082 	 * we actually finish the I/O or not. We don't want to
1083 	 * change that interface.
1084 	 */
1085 	XFS_BUF_UNREAD(bp);
1086 	XFS_BUF_DONE(bp);
1087 	xfs_buf_stale(bp);
1088 	bp->b_iodone = NULL;
1089 	if (!(fl & XBF_ASYNC)) {
1090 		/*
1091 		 * Mark b_error and B_ERROR _both_.
1092 		 * Lot's of chunkcache code assumes that.
1093 		 * There's no reason to mark error for
1094 		 * ASYNC buffers.
1095 		 */
1096 		xfs_buf_ioerror(bp, EIO);
1097 		complete(&bp->b_iowait);
1098 	} else {
1099 		xfs_buf_relse(bp);
1100 	}
1101 
1102 	return EIO;
1103 }
1104 
1105 
1106 /*
1107  * All xfs metadata buffers except log state machine buffers
1108  * get this attached as their b_bdstrat callback function.
1109  * This is so that we can catch a buffer
1110  * after prematurely unpinning it to forcibly shutdown the filesystem.
1111  */
1112 int
xfs_bdstrat_cb(struct xfs_buf * bp)1113 xfs_bdstrat_cb(
1114 	struct xfs_buf	*bp)
1115 {
1116 	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1117 		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1118 		/*
1119 		 * Metadata write that didn't get logged but
1120 		 * written delayed anyway. These aren't associated
1121 		 * with a transaction, and can be ignored.
1122 		 */
1123 		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1124 			return xfs_bioerror_relse(bp);
1125 		else
1126 			return xfs_bioerror(bp);
1127 	}
1128 
1129 	xfs_buf_iorequest(bp);
1130 	return 0;
1131 }
1132 
1133 /*
1134  * Wrapper around bdstrat so that we can stop data from going to disk in case
1135  * we are shutting down the filesystem.  Typically user data goes thru this
1136  * path; one of the exceptions is the superblock.
1137  */
1138 void
xfsbdstrat(struct xfs_mount * mp,struct xfs_buf * bp)1139 xfsbdstrat(
1140 	struct xfs_mount	*mp,
1141 	struct xfs_buf		*bp)
1142 {
1143 	if (XFS_FORCED_SHUTDOWN(mp)) {
1144 		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1145 		xfs_bioerror_relse(bp);
1146 		return;
1147 	}
1148 
1149 	xfs_buf_iorequest(bp);
1150 }
1151 
1152 STATIC void
_xfs_buf_ioend(xfs_buf_t * bp,int schedule)1153 _xfs_buf_ioend(
1154 	xfs_buf_t		*bp,
1155 	int			schedule)
1156 {
1157 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1158 		xfs_buf_ioend(bp, schedule);
1159 }
1160 
1161 STATIC void
xfs_buf_bio_end_io(struct bio * bio,int error)1162 xfs_buf_bio_end_io(
1163 	struct bio		*bio,
1164 	int			error)
1165 {
1166 	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
1167 
1168 	/*
1169 	 * don't overwrite existing errors - otherwise we can lose errors on
1170 	 * buffers that require multiple bios to complete.
1171 	 */
1172 	if (!bp->b_error)
1173 		xfs_buf_ioerror(bp, -error);
1174 
1175 	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1176 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1177 
1178 	_xfs_buf_ioend(bp, 1);
1179 	bio_put(bio);
1180 }
1181 
1182 STATIC void
_xfs_buf_ioapply(xfs_buf_t * bp)1183 _xfs_buf_ioapply(
1184 	xfs_buf_t		*bp)
1185 {
1186 	int			rw, map_i, total_nr_pages, nr_pages;
1187 	struct bio		*bio;
1188 	int			offset = bp->b_offset;
1189 	int			size = bp->b_count_desired;
1190 	sector_t		sector = bp->b_bn;
1191 
1192 	total_nr_pages = bp->b_page_count;
1193 	map_i = 0;
1194 
1195 	if (bp->b_flags & XBF_WRITE) {
1196 		if (bp->b_flags & XBF_SYNCIO)
1197 			rw = WRITE_SYNC;
1198 		else
1199 			rw = WRITE;
1200 		if (bp->b_flags & XBF_FUA)
1201 			rw |= REQ_FUA;
1202 		if (bp->b_flags & XBF_FLUSH)
1203 			rw |= REQ_FLUSH;
1204 	} else if (bp->b_flags & XBF_READ_AHEAD) {
1205 		rw = READA;
1206 	} else {
1207 		rw = READ;
1208 	}
1209 
1210 	/* we only use the buffer cache for meta-data */
1211 	rw |= REQ_META;
1212 
1213 next_chunk:
1214 	atomic_inc(&bp->b_io_remaining);
1215 	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1216 	if (nr_pages > total_nr_pages)
1217 		nr_pages = total_nr_pages;
1218 
1219 	bio = bio_alloc(GFP_NOIO, nr_pages);
1220 	bio->bi_bdev = bp->b_target->bt_bdev;
1221 	bio->bi_sector = sector;
1222 	bio->bi_end_io = xfs_buf_bio_end_io;
1223 	bio->bi_private = bp;
1224 
1225 
1226 	for (; size && nr_pages; nr_pages--, map_i++) {
1227 		int	rbytes, nbytes = PAGE_SIZE - offset;
1228 
1229 		if (nbytes > size)
1230 			nbytes = size;
1231 
1232 		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1233 		if (rbytes < nbytes)
1234 			break;
1235 
1236 		offset = 0;
1237 		sector += nbytes >> BBSHIFT;
1238 		size -= nbytes;
1239 		total_nr_pages--;
1240 	}
1241 
1242 	if (likely(bio->bi_size)) {
1243 		if (xfs_buf_is_vmapped(bp)) {
1244 			flush_kernel_vmap_range(bp->b_addr,
1245 						xfs_buf_vmap_len(bp));
1246 		}
1247 		submit_bio(rw, bio);
1248 		if (size)
1249 			goto next_chunk;
1250 	} else {
1251 		/*
1252 		 * This is guaranteed not to be the last io reference count
1253 		 * because the caller (xfs_buf_iorequest) holds a count itself.
1254 		 */
1255 		atomic_dec(&bp->b_io_remaining);
1256 		xfs_buf_ioerror(bp, EIO);
1257 		bio_put(bio);
1258 	}
1259 }
1260 
1261 int
xfs_buf_iorequest(xfs_buf_t * bp)1262 xfs_buf_iorequest(
1263 	xfs_buf_t		*bp)
1264 {
1265 	trace_xfs_buf_iorequest(bp, _RET_IP_);
1266 
1267 	ASSERT(!(bp->b_flags & XBF_DELWRI));
1268 
1269 	if (bp->b_flags & XBF_WRITE)
1270 		xfs_buf_wait_unpin(bp);
1271 	xfs_buf_hold(bp);
1272 
1273 	/* Set the count to 1 initially, this will stop an I/O
1274 	 * completion callout which happens before we have started
1275 	 * all the I/O from calling xfs_buf_ioend too early.
1276 	 */
1277 	atomic_set(&bp->b_io_remaining, 1);
1278 	_xfs_buf_ioapply(bp);
1279 	_xfs_buf_ioend(bp, 0);
1280 
1281 	xfs_buf_rele(bp);
1282 	return 0;
1283 }
1284 
1285 /*
1286  *	Waits for I/O to complete on the buffer supplied.
1287  *	It returns immediately if no I/O is pending.
1288  *	It returns the I/O error code, if any, or 0 if there was no error.
1289  */
1290 int
xfs_buf_iowait(xfs_buf_t * bp)1291 xfs_buf_iowait(
1292 	xfs_buf_t		*bp)
1293 {
1294 	trace_xfs_buf_iowait(bp, _RET_IP_);
1295 
1296 	wait_for_completion(&bp->b_iowait);
1297 
1298 	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1299 	return bp->b_error;
1300 }
1301 
1302 xfs_caddr_t
xfs_buf_offset(xfs_buf_t * bp,size_t offset)1303 xfs_buf_offset(
1304 	xfs_buf_t		*bp,
1305 	size_t			offset)
1306 {
1307 	struct page		*page;
1308 
1309 	if (bp->b_flags & XBF_MAPPED)
1310 		return bp->b_addr + offset;
1311 
1312 	offset += bp->b_offset;
1313 	page = bp->b_pages[offset >> PAGE_SHIFT];
1314 	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1315 }
1316 
1317 /*
1318  *	Move data into or out of a buffer.
1319  */
1320 void
xfs_buf_iomove(xfs_buf_t * bp,size_t boff,size_t bsize,void * data,xfs_buf_rw_t mode)1321 xfs_buf_iomove(
1322 	xfs_buf_t		*bp,	/* buffer to process		*/
1323 	size_t			boff,	/* starting buffer offset	*/
1324 	size_t			bsize,	/* length to copy		*/
1325 	void			*data,	/* data address			*/
1326 	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1327 {
1328 	size_t			bend, cpoff, csize;
1329 	struct page		*page;
1330 
1331 	bend = boff + bsize;
1332 	while (boff < bend) {
1333 		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1334 		cpoff = xfs_buf_poff(boff + bp->b_offset);
1335 		csize = min_t(size_t,
1336 			      PAGE_SIZE-cpoff, bp->b_count_desired-boff);
1337 
1338 		ASSERT(((csize + cpoff) <= PAGE_SIZE));
1339 
1340 		switch (mode) {
1341 		case XBRW_ZERO:
1342 			memset(page_address(page) + cpoff, 0, csize);
1343 			break;
1344 		case XBRW_READ:
1345 			memcpy(data, page_address(page) + cpoff, csize);
1346 			break;
1347 		case XBRW_WRITE:
1348 			memcpy(page_address(page) + cpoff, data, csize);
1349 		}
1350 
1351 		boff += csize;
1352 		data += csize;
1353 	}
1354 }
1355 
1356 /*
1357  *	Handling of buffer targets (buftargs).
1358  */
1359 
1360 /*
1361  * Wait for any bufs with callbacks that have been submitted but have not yet
1362  * returned. These buffers will have an elevated hold count, so wait on those
1363  * while freeing all the buffers only held by the LRU.
1364  */
1365 void
xfs_wait_buftarg(struct xfs_buftarg * btp)1366 xfs_wait_buftarg(
1367 	struct xfs_buftarg	*btp)
1368 {
1369 	struct xfs_buf		*bp;
1370 
1371 restart:
1372 	spin_lock(&btp->bt_lru_lock);
1373 	while (!list_empty(&btp->bt_lru)) {
1374 		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1375 		if (atomic_read(&bp->b_hold) > 1) {
1376 			spin_unlock(&btp->bt_lru_lock);
1377 			delay(100);
1378 			goto restart;
1379 		}
1380 		/*
1381 		 * clear the LRU reference count so the buffer doesn't get
1382 		 * ignored in xfs_buf_rele().
1383 		 */
1384 		atomic_set(&bp->b_lru_ref, 0);
1385 		spin_unlock(&btp->bt_lru_lock);
1386 		xfs_buf_rele(bp);
1387 		spin_lock(&btp->bt_lru_lock);
1388 	}
1389 	spin_unlock(&btp->bt_lru_lock);
1390 }
1391 
1392 int
xfs_buftarg_shrink(struct shrinker * shrink,struct shrink_control * sc)1393 xfs_buftarg_shrink(
1394 	struct shrinker		*shrink,
1395 	struct shrink_control	*sc)
1396 {
1397 	struct xfs_buftarg	*btp = container_of(shrink,
1398 					struct xfs_buftarg, bt_shrinker);
1399 	struct xfs_buf		*bp;
1400 	int nr_to_scan = sc->nr_to_scan;
1401 	LIST_HEAD(dispose);
1402 
1403 	if (!nr_to_scan)
1404 		return btp->bt_lru_nr;
1405 
1406 	spin_lock(&btp->bt_lru_lock);
1407 	while (!list_empty(&btp->bt_lru)) {
1408 		if (nr_to_scan-- <= 0)
1409 			break;
1410 
1411 		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1412 
1413 		/*
1414 		 * Decrement the b_lru_ref count unless the value is already
1415 		 * zero. If the value is already zero, we need to reclaim the
1416 		 * buffer, otherwise it gets another trip through the LRU.
1417 		 */
1418 		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1419 			list_move_tail(&bp->b_lru, &btp->bt_lru);
1420 			continue;
1421 		}
1422 
1423 		/*
1424 		 * remove the buffer from the LRU now to avoid needing another
1425 		 * lock round trip inside xfs_buf_rele().
1426 		 */
1427 		list_move(&bp->b_lru, &dispose);
1428 		btp->bt_lru_nr--;
1429 	}
1430 	spin_unlock(&btp->bt_lru_lock);
1431 
1432 	while (!list_empty(&dispose)) {
1433 		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1434 		list_del_init(&bp->b_lru);
1435 		xfs_buf_rele(bp);
1436 	}
1437 
1438 	return btp->bt_lru_nr;
1439 }
1440 
1441 void
xfs_free_buftarg(struct xfs_mount * mp,struct xfs_buftarg * btp)1442 xfs_free_buftarg(
1443 	struct xfs_mount	*mp,
1444 	struct xfs_buftarg	*btp)
1445 {
1446 	unregister_shrinker(&btp->bt_shrinker);
1447 
1448 	xfs_flush_buftarg(btp, 1);
1449 	if (mp->m_flags & XFS_MOUNT_BARRIER)
1450 		xfs_blkdev_issue_flush(btp);
1451 
1452 	kthread_stop(btp->bt_task);
1453 	kmem_free(btp);
1454 }
1455 
1456 STATIC int
xfs_setsize_buftarg_flags(xfs_buftarg_t * btp,unsigned int blocksize,unsigned int sectorsize,int verbose)1457 xfs_setsize_buftarg_flags(
1458 	xfs_buftarg_t		*btp,
1459 	unsigned int		blocksize,
1460 	unsigned int		sectorsize,
1461 	int			verbose)
1462 {
1463 	btp->bt_bsize = blocksize;
1464 	btp->bt_sshift = ffs(sectorsize) - 1;
1465 	btp->bt_smask = sectorsize - 1;
1466 
1467 	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1468 		char name[BDEVNAME_SIZE];
1469 
1470 		bdevname(btp->bt_bdev, name);
1471 
1472 		xfs_warn(btp->bt_mount,
1473 			"Cannot set_blocksize to %u on device %s\n",
1474 			sectorsize, name);
1475 		return EINVAL;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 /*
1482  *	When allocating the initial buffer target we have not yet
1483  *	read in the superblock, so don't know what sized sectors
1484  *	are being used is at this early stage.  Play safe.
1485  */
1486 STATIC int
xfs_setsize_buftarg_early(xfs_buftarg_t * btp,struct block_device * bdev)1487 xfs_setsize_buftarg_early(
1488 	xfs_buftarg_t		*btp,
1489 	struct block_device	*bdev)
1490 {
1491 	return xfs_setsize_buftarg_flags(btp,
1492 			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1493 }
1494 
1495 int
xfs_setsize_buftarg(xfs_buftarg_t * btp,unsigned int blocksize,unsigned int sectorsize)1496 xfs_setsize_buftarg(
1497 	xfs_buftarg_t		*btp,
1498 	unsigned int		blocksize,
1499 	unsigned int		sectorsize)
1500 {
1501 	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1502 }
1503 
1504 STATIC int
xfs_alloc_delwri_queue(xfs_buftarg_t * btp,const char * fsname)1505 xfs_alloc_delwri_queue(
1506 	xfs_buftarg_t		*btp,
1507 	const char		*fsname)
1508 {
1509 	INIT_LIST_HEAD(&btp->bt_delwri_queue);
1510 	spin_lock_init(&btp->bt_delwri_lock);
1511 	btp->bt_flags = 0;
1512 	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1513 	if (IS_ERR(btp->bt_task))
1514 		return PTR_ERR(btp->bt_task);
1515 	return 0;
1516 }
1517 
1518 xfs_buftarg_t *
xfs_alloc_buftarg(struct xfs_mount * mp,struct block_device * bdev,int external,const char * fsname)1519 xfs_alloc_buftarg(
1520 	struct xfs_mount	*mp,
1521 	struct block_device	*bdev,
1522 	int			external,
1523 	const char		*fsname)
1524 {
1525 	xfs_buftarg_t		*btp;
1526 
1527 	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1528 
1529 	btp->bt_mount = mp;
1530 	btp->bt_dev =  bdev->bd_dev;
1531 	btp->bt_bdev = bdev;
1532 	btp->bt_bdi = blk_get_backing_dev_info(bdev);
1533 	if (!btp->bt_bdi)
1534 		goto error;
1535 
1536 	INIT_LIST_HEAD(&btp->bt_lru);
1537 	spin_lock_init(&btp->bt_lru_lock);
1538 	if (xfs_setsize_buftarg_early(btp, bdev))
1539 		goto error;
1540 	if (xfs_alloc_delwri_queue(btp, fsname))
1541 		goto error;
1542 	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1543 	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1544 	register_shrinker(&btp->bt_shrinker);
1545 	return btp;
1546 
1547 error:
1548 	kmem_free(btp);
1549 	return NULL;
1550 }
1551 
1552 
1553 /*
1554  *	Delayed write buffer handling
1555  */
1556 void
xfs_buf_delwri_queue(xfs_buf_t * bp)1557 xfs_buf_delwri_queue(
1558 	xfs_buf_t		*bp)
1559 {
1560 	struct xfs_buftarg	*btp = bp->b_target;
1561 
1562 	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1563 
1564 	ASSERT(!(bp->b_flags & XBF_READ));
1565 
1566 	spin_lock(&btp->bt_delwri_lock);
1567 	if (!list_empty(&bp->b_list)) {
1568 		/* if already in the queue, move it to the tail */
1569 		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1570 		list_move_tail(&bp->b_list, &btp->bt_delwri_queue);
1571 	} else {
1572 		/* start xfsbufd as it is about to have something to do */
1573 		if (list_empty(&btp->bt_delwri_queue))
1574 			wake_up_process(bp->b_target->bt_task);
1575 
1576 		atomic_inc(&bp->b_hold);
1577 		bp->b_flags |= XBF_DELWRI | _XBF_DELWRI_Q | XBF_ASYNC;
1578 		list_add_tail(&bp->b_list, &btp->bt_delwri_queue);
1579 	}
1580 	bp->b_queuetime = jiffies;
1581 	spin_unlock(&btp->bt_delwri_lock);
1582 }
1583 
1584 void
xfs_buf_delwri_dequeue(xfs_buf_t * bp)1585 xfs_buf_delwri_dequeue(
1586 	xfs_buf_t		*bp)
1587 {
1588 	int			dequeued = 0;
1589 
1590 	spin_lock(&bp->b_target->bt_delwri_lock);
1591 	if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1592 		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1593 		list_del_init(&bp->b_list);
1594 		dequeued = 1;
1595 	}
1596 	bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1597 	spin_unlock(&bp->b_target->bt_delwri_lock);
1598 
1599 	if (dequeued)
1600 		xfs_buf_rele(bp);
1601 
1602 	trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1603 }
1604 
1605 /*
1606  * If a delwri buffer needs to be pushed before it has aged out, then promote
1607  * it to the head of the delwri queue so that it will be flushed on the next
1608  * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1609  * than the age currently needed to flush the buffer. Hence the next time the
1610  * xfsbufd sees it is guaranteed to be considered old enough to flush.
1611  */
1612 void
xfs_buf_delwri_promote(struct xfs_buf * bp)1613 xfs_buf_delwri_promote(
1614 	struct xfs_buf	*bp)
1615 {
1616 	struct xfs_buftarg *btp = bp->b_target;
1617 	long		age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1618 
1619 	ASSERT(bp->b_flags & XBF_DELWRI);
1620 	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1621 
1622 	/*
1623 	 * Check the buffer age before locking the delayed write queue as we
1624 	 * don't need to promote buffers that are already past the flush age.
1625 	 */
1626 	if (bp->b_queuetime < jiffies - age)
1627 		return;
1628 	bp->b_queuetime = jiffies - age;
1629 	spin_lock(&btp->bt_delwri_lock);
1630 	list_move(&bp->b_list, &btp->bt_delwri_queue);
1631 	spin_unlock(&btp->bt_delwri_lock);
1632 }
1633 
1634 /*
1635  * Move as many buffers as specified to the supplied list
1636  * idicating if we skipped any buffers to prevent deadlocks.
1637  */
1638 STATIC int
xfs_buf_delwri_split(xfs_buftarg_t * target,struct list_head * list,unsigned long age)1639 xfs_buf_delwri_split(
1640 	xfs_buftarg_t	*target,
1641 	struct list_head *list,
1642 	unsigned long	age)
1643 {
1644 	xfs_buf_t	*bp, *n;
1645 	int		skipped = 0;
1646 	int		force;
1647 
1648 	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1649 	INIT_LIST_HEAD(list);
1650 	spin_lock(&target->bt_delwri_lock);
1651 	list_for_each_entry_safe(bp, n, &target->bt_delwri_queue, b_list) {
1652 		ASSERT(bp->b_flags & XBF_DELWRI);
1653 
1654 		if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
1655 			if (!force &&
1656 			    time_before(jiffies, bp->b_queuetime + age)) {
1657 				xfs_buf_unlock(bp);
1658 				break;
1659 			}
1660 
1661 			bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
1662 			bp->b_flags |= XBF_WRITE;
1663 			list_move_tail(&bp->b_list, list);
1664 			trace_xfs_buf_delwri_split(bp, _RET_IP_);
1665 		} else
1666 			skipped++;
1667 	}
1668 
1669 	spin_unlock(&target->bt_delwri_lock);
1670 	return skipped;
1671 }
1672 
1673 /*
1674  * Compare function is more complex than it needs to be because
1675  * the return value is only 32 bits and we are doing comparisons
1676  * on 64 bit values
1677  */
1678 static int
xfs_buf_cmp(void * priv,struct list_head * a,struct list_head * b)1679 xfs_buf_cmp(
1680 	void		*priv,
1681 	struct list_head *a,
1682 	struct list_head *b)
1683 {
1684 	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1685 	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1686 	xfs_daddr_t		diff;
1687 
1688 	diff = ap->b_bn - bp->b_bn;
1689 	if (diff < 0)
1690 		return -1;
1691 	if (diff > 0)
1692 		return 1;
1693 	return 0;
1694 }
1695 
1696 STATIC int
xfsbufd(void * data)1697 xfsbufd(
1698 	void		*data)
1699 {
1700 	xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1701 
1702 	current->flags |= PF_MEMALLOC;
1703 
1704 	set_freezable();
1705 
1706 	do {
1707 		long	age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1708 		long	tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1709 		struct list_head tmp;
1710 		struct blk_plug plug;
1711 
1712 		if (unlikely(freezing(current)))
1713 			try_to_freeze();
1714 
1715 		/* sleep for a long time if there is nothing to do. */
1716 		if (list_empty(&target->bt_delwri_queue))
1717 			tout = MAX_SCHEDULE_TIMEOUT;
1718 		schedule_timeout_interruptible(tout);
1719 
1720 		xfs_buf_delwri_split(target, &tmp, age);
1721 		list_sort(NULL, &tmp, xfs_buf_cmp);
1722 
1723 		blk_start_plug(&plug);
1724 		while (!list_empty(&tmp)) {
1725 			struct xfs_buf *bp;
1726 			bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1727 			list_del_init(&bp->b_list);
1728 			xfs_bdstrat_cb(bp);
1729 		}
1730 		blk_finish_plug(&plug);
1731 	} while (!kthread_should_stop());
1732 
1733 	return 0;
1734 }
1735 
1736 /*
1737  *	Go through all incore buffers, and release buffers if they belong to
1738  *	the given device. This is used in filesystem error handling to
1739  *	preserve the consistency of its metadata.
1740  */
1741 int
xfs_flush_buftarg(xfs_buftarg_t * target,int wait)1742 xfs_flush_buftarg(
1743 	xfs_buftarg_t	*target,
1744 	int		wait)
1745 {
1746 	xfs_buf_t	*bp;
1747 	int		pincount = 0;
1748 	LIST_HEAD(tmp_list);
1749 	LIST_HEAD(wait_list);
1750 	struct blk_plug plug;
1751 
1752 	flush_workqueue(xfslogd_workqueue);
1753 
1754 	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1755 	pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1756 
1757 	/*
1758 	 * Dropped the delayed write list lock, now walk the temporary list.
1759 	 * All I/O is issued async and then if we need to wait for completion
1760 	 * we do that after issuing all the IO.
1761 	 */
1762 	list_sort(NULL, &tmp_list, xfs_buf_cmp);
1763 
1764 	blk_start_plug(&plug);
1765 	while (!list_empty(&tmp_list)) {
1766 		bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1767 		ASSERT(target == bp->b_target);
1768 		list_del_init(&bp->b_list);
1769 		if (wait) {
1770 			bp->b_flags &= ~XBF_ASYNC;
1771 			list_add(&bp->b_list, &wait_list);
1772 		}
1773 		xfs_bdstrat_cb(bp);
1774 	}
1775 	blk_finish_plug(&plug);
1776 
1777 	if (wait) {
1778 		/* Wait for IO to complete. */
1779 		while (!list_empty(&wait_list)) {
1780 			bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1781 
1782 			list_del_init(&bp->b_list);
1783 			xfs_buf_iowait(bp);
1784 			xfs_buf_relse(bp);
1785 		}
1786 	}
1787 
1788 	return pincount;
1789 }
1790 
1791 int __init
xfs_buf_init(void)1792 xfs_buf_init(void)
1793 {
1794 	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1795 						KM_ZONE_HWALIGN, NULL);
1796 	if (!xfs_buf_zone)
1797 		goto out;
1798 
1799 	xfslogd_workqueue = alloc_workqueue("xfslogd",
1800 					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1801 	if (!xfslogd_workqueue)
1802 		goto out_free_buf_zone;
1803 
1804 	return 0;
1805 
1806  out_free_buf_zone:
1807 	kmem_zone_destroy(xfs_buf_zone);
1808  out:
1809 	return -ENOMEM;
1810 }
1811 
1812 void
xfs_buf_terminate(void)1813 xfs_buf_terminate(void)
1814 {
1815 	destroy_workqueue(xfslogd_workqueue);
1816 	kmem_zone_destroy(xfs_buf_zone);
1817 }
1818