1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_dinode.h"
32 #include "xfs_error.h"
33 #include "xfs_filestream.h"
34 #include "xfs_vnodeops.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_quota.h"
37 #include "xfs_trace.h"
38 #include "xfs_fsops.h"
39 
40 #include <linux/kthread.h>
41 #include <linux/freezer.h>
42 
43 struct workqueue_struct	*xfs_syncd_wq;	/* sync workqueue */
44 
45 /*
46  * The inode lookup is done in batches to keep the amount of lock traffic and
47  * radix tree lookups to a minimum. The batch size is a trade off between
48  * lookup reduction and stack usage. This is in the reclaim path, so we can't
49  * be too greedy.
50  */
51 #define XFS_LOOKUP_BATCH	32
52 
53 STATIC int
xfs_inode_ag_walk_grab(struct xfs_inode * ip)54 xfs_inode_ag_walk_grab(
55 	struct xfs_inode	*ip)
56 {
57 	struct inode		*inode = VFS_I(ip);
58 
59 	ASSERT(rcu_read_lock_held());
60 
61 	/*
62 	 * check for stale RCU freed inode
63 	 *
64 	 * If the inode has been reallocated, it doesn't matter if it's not in
65 	 * the AG we are walking - we are walking for writeback, so if it
66 	 * passes all the "valid inode" checks and is dirty, then we'll write
67 	 * it back anyway.  If it has been reallocated and still being
68 	 * initialised, the XFS_INEW check below will catch it.
69 	 */
70 	spin_lock(&ip->i_flags_lock);
71 	if (!ip->i_ino)
72 		goto out_unlock_noent;
73 
74 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
75 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
76 		goto out_unlock_noent;
77 	spin_unlock(&ip->i_flags_lock);
78 
79 	/* nothing to sync during shutdown */
80 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
81 		return EFSCORRUPTED;
82 
83 	/* If we can't grab the inode, it must on it's way to reclaim. */
84 	if (!igrab(inode))
85 		return ENOENT;
86 
87 	if (is_bad_inode(inode)) {
88 		IRELE(ip);
89 		return ENOENT;
90 	}
91 
92 	/* inode is valid */
93 	return 0;
94 
95 out_unlock_noent:
96 	spin_unlock(&ip->i_flags_lock);
97 	return ENOENT;
98 }
99 
100 STATIC int
xfs_inode_ag_walk(struct xfs_mount * mp,struct xfs_perag * pag,int (* execute)(struct xfs_inode * ip,struct xfs_perag * pag,int flags),int flags)101 xfs_inode_ag_walk(
102 	struct xfs_mount	*mp,
103 	struct xfs_perag	*pag,
104 	int			(*execute)(struct xfs_inode *ip,
105 					   struct xfs_perag *pag, int flags),
106 	int			flags)
107 {
108 	uint32_t		first_index;
109 	int			last_error = 0;
110 	int			skipped;
111 	int			done;
112 	int			nr_found;
113 
114 restart:
115 	done = 0;
116 	skipped = 0;
117 	first_index = 0;
118 	nr_found = 0;
119 	do {
120 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
121 		int		error = 0;
122 		int		i;
123 
124 		rcu_read_lock();
125 		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
126 					(void **)batch, first_index,
127 					XFS_LOOKUP_BATCH);
128 		if (!nr_found) {
129 			rcu_read_unlock();
130 			break;
131 		}
132 
133 		/*
134 		 * Grab the inodes before we drop the lock. if we found
135 		 * nothing, nr == 0 and the loop will be skipped.
136 		 */
137 		for (i = 0; i < nr_found; i++) {
138 			struct xfs_inode *ip = batch[i];
139 
140 			if (done || xfs_inode_ag_walk_grab(ip))
141 				batch[i] = NULL;
142 
143 			/*
144 			 * Update the index for the next lookup. Catch
145 			 * overflows into the next AG range which can occur if
146 			 * we have inodes in the last block of the AG and we
147 			 * are currently pointing to the last inode.
148 			 *
149 			 * Because we may see inodes that are from the wrong AG
150 			 * due to RCU freeing and reallocation, only update the
151 			 * index if it lies in this AG. It was a race that lead
152 			 * us to see this inode, so another lookup from the
153 			 * same index will not find it again.
154 			 */
155 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
156 				continue;
157 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
158 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
159 				done = 1;
160 		}
161 
162 		/* unlock now we've grabbed the inodes. */
163 		rcu_read_unlock();
164 
165 		for (i = 0; i < nr_found; i++) {
166 			if (!batch[i])
167 				continue;
168 			error = execute(batch[i], pag, flags);
169 			IRELE(batch[i]);
170 			if (error == EAGAIN) {
171 				skipped++;
172 				continue;
173 			}
174 			if (error && last_error != EFSCORRUPTED)
175 				last_error = error;
176 		}
177 
178 		/* bail out if the filesystem is corrupted.  */
179 		if (error == EFSCORRUPTED)
180 			break;
181 
182 		cond_resched();
183 
184 	} while (nr_found && !done);
185 
186 	if (skipped) {
187 		delay(1);
188 		goto restart;
189 	}
190 	return last_error;
191 }
192 
193 int
xfs_inode_ag_iterator(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,struct xfs_perag * pag,int flags),int flags)194 xfs_inode_ag_iterator(
195 	struct xfs_mount	*mp,
196 	int			(*execute)(struct xfs_inode *ip,
197 					   struct xfs_perag *pag, int flags),
198 	int			flags)
199 {
200 	struct xfs_perag	*pag;
201 	int			error = 0;
202 	int			last_error = 0;
203 	xfs_agnumber_t		ag;
204 
205 	ag = 0;
206 	while ((pag = xfs_perag_get(mp, ag))) {
207 		ag = pag->pag_agno + 1;
208 		error = xfs_inode_ag_walk(mp, pag, execute, flags);
209 		xfs_perag_put(pag);
210 		if (error) {
211 			last_error = error;
212 			if (error == EFSCORRUPTED)
213 				break;
214 		}
215 	}
216 	return XFS_ERROR(last_error);
217 }
218 
219 STATIC int
xfs_sync_inode_data(struct xfs_inode * ip,struct xfs_perag * pag,int flags)220 xfs_sync_inode_data(
221 	struct xfs_inode	*ip,
222 	struct xfs_perag	*pag,
223 	int			flags)
224 {
225 	struct inode		*inode = VFS_I(ip);
226 	struct address_space *mapping = inode->i_mapping;
227 	int			error = 0;
228 
229 	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
230 		return 0;
231 
232 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
233 		if (flags & SYNC_TRYLOCK)
234 			return 0;
235 		xfs_ilock(ip, XFS_IOLOCK_SHARED);
236 	}
237 
238 	error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
239 				0 : XBF_ASYNC, FI_NONE);
240 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
241 	return error;
242 }
243 
244 STATIC int
xfs_sync_inode_attr(struct xfs_inode * ip,struct xfs_perag * pag,int flags)245 xfs_sync_inode_attr(
246 	struct xfs_inode	*ip,
247 	struct xfs_perag	*pag,
248 	int			flags)
249 {
250 	int			error = 0;
251 
252 	xfs_ilock(ip, XFS_ILOCK_SHARED);
253 	if (xfs_inode_clean(ip))
254 		goto out_unlock;
255 	if (!xfs_iflock_nowait(ip)) {
256 		if (!(flags & SYNC_WAIT))
257 			goto out_unlock;
258 		xfs_iflock(ip);
259 	}
260 
261 	if (xfs_inode_clean(ip)) {
262 		xfs_ifunlock(ip);
263 		goto out_unlock;
264 	}
265 
266 	error = xfs_iflush(ip, flags);
267 
268 	/*
269 	 * We don't want to try again on non-blocking flushes that can't run
270 	 * again immediately. If an inode really must be written, then that's
271 	 * what the SYNC_WAIT flag is for.
272 	 */
273 	if (error == EAGAIN) {
274 		ASSERT(!(flags & SYNC_WAIT));
275 		error = 0;
276 	}
277 
278  out_unlock:
279 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
280 	return error;
281 }
282 
283 /*
284  * Write out pagecache data for the whole filesystem.
285  */
286 STATIC int
xfs_sync_data(struct xfs_mount * mp,int flags)287 xfs_sync_data(
288 	struct xfs_mount	*mp,
289 	int			flags)
290 {
291 	int			error;
292 
293 	ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
294 
295 	error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
296 	if (error)
297 		return XFS_ERROR(error);
298 
299 	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
300 	return 0;
301 }
302 
303 /*
304  * Write out inode metadata (attributes) for the whole filesystem.
305  */
306 STATIC int
xfs_sync_attr(struct xfs_mount * mp,int flags)307 xfs_sync_attr(
308 	struct xfs_mount	*mp,
309 	int			flags)
310 {
311 	ASSERT((flags & ~SYNC_WAIT) == 0);
312 
313 	return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
314 }
315 
316 STATIC int
xfs_sync_fsdata(struct xfs_mount * mp)317 xfs_sync_fsdata(
318 	struct xfs_mount	*mp)
319 {
320 	struct xfs_buf		*bp;
321 	int			error;
322 
323 	/*
324 	 * If the buffer is pinned then push on the log so we won't get stuck
325 	 * waiting in the write for someone, maybe ourselves, to flush the log.
326 	 *
327 	 * Even though we just pushed the log above, we did not have the
328 	 * superblock buffer locked at that point so it can become pinned in
329 	 * between there and here.
330 	 */
331 	bp = xfs_getsb(mp, 0);
332 	if (xfs_buf_ispinned(bp))
333 		xfs_log_force(mp, 0);
334 	error = xfs_bwrite(bp);
335 	xfs_buf_relse(bp);
336 	return error;
337 }
338 
339 /*
340  * When remounting a filesystem read-only or freezing the filesystem, we have
341  * two phases to execute. This first phase is syncing the data before we
342  * quiesce the filesystem, and the second is flushing all the inodes out after
343  * we've waited for all the transactions created by the first phase to
344  * complete. The second phase ensures that the inodes are written to their
345  * location on disk rather than just existing in transactions in the log. This
346  * means after a quiesce there is no log replay required to write the inodes to
347  * disk (this is the main difference between a sync and a quiesce).
348  */
349 /*
350  * First stage of freeze - no writers will make progress now we are here,
351  * so we flush delwri and delalloc buffers here, then wait for all I/O to
352  * complete.  Data is frozen at that point. Metadata is not frozen,
353  * transactions can still occur here so don't bother flushing the buftarg
354  * because it'll just get dirty again.
355  */
356 int
xfs_quiesce_data(struct xfs_mount * mp)357 xfs_quiesce_data(
358 	struct xfs_mount	*mp)
359 {
360 	int			error, error2 = 0;
361 
362 	/* force out the log */
363 	xfs_log_force(mp, XFS_LOG_SYNC);
364 
365 	/* write superblock and hoover up shutdown errors */
366 	error = xfs_sync_fsdata(mp);
367 
368 	/* make sure all delwri buffers are written out */
369 	xfs_flush_buftarg(mp->m_ddev_targp, 1);
370 
371 	/* mark the log as covered if needed */
372 	if (xfs_log_need_covered(mp))
373 		error2 = xfs_fs_log_dummy(mp);
374 
375 	/* flush data-only devices */
376 	if (mp->m_rtdev_targp)
377 		xfs_flush_buftarg(mp->m_rtdev_targp, 1);
378 
379 	return error ? error : error2;
380 }
381 
382 STATIC void
xfs_quiesce_fs(struct xfs_mount * mp)383 xfs_quiesce_fs(
384 	struct xfs_mount	*mp)
385 {
386 	int	count = 0, pincount;
387 
388 	xfs_reclaim_inodes(mp, 0);
389 	xfs_flush_buftarg(mp->m_ddev_targp, 0);
390 
391 	/*
392 	 * This loop must run at least twice.  The first instance of the loop
393 	 * will flush most meta data but that will generate more meta data
394 	 * (typically directory updates).  Which then must be flushed and
395 	 * logged before we can write the unmount record. We also so sync
396 	 * reclaim of inodes to catch any that the above delwri flush skipped.
397 	 */
398 	do {
399 		xfs_reclaim_inodes(mp, SYNC_WAIT);
400 		xfs_sync_attr(mp, SYNC_WAIT);
401 		pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
402 		if (!pincount) {
403 			delay(50);
404 			count++;
405 		}
406 	} while (count < 2);
407 }
408 
409 /*
410  * Second stage of a quiesce. The data is already synced, now we have to take
411  * care of the metadata. New transactions are already blocked, so we need to
412  * wait for any remaining transactions to drain out before proceeding.
413  */
414 void
xfs_quiesce_attr(struct xfs_mount * mp)415 xfs_quiesce_attr(
416 	struct xfs_mount	*mp)
417 {
418 	int	error = 0;
419 
420 	/* wait for all modifications to complete */
421 	while (atomic_read(&mp->m_active_trans) > 0)
422 		delay(100);
423 
424 	/* flush inodes and push all remaining buffers out to disk */
425 	xfs_quiesce_fs(mp);
426 
427 	/*
428 	 * Just warn here till VFS can correctly support
429 	 * read-only remount without racing.
430 	 */
431 	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
432 
433 	/* Push the superblock and write an unmount record */
434 	error = xfs_log_sbcount(mp);
435 	if (error)
436 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
437 				"Frozen image may not be consistent.");
438 	xfs_log_unmount_write(mp);
439 	xfs_unmountfs_writesb(mp);
440 }
441 
442 static void
xfs_syncd_queue_sync(struct xfs_mount * mp)443 xfs_syncd_queue_sync(
444 	struct xfs_mount        *mp)
445 {
446 	queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
447 				msecs_to_jiffies(xfs_syncd_centisecs * 10));
448 }
449 
450 /*
451  * Every sync period we need to unpin all items, reclaim inodes and sync
452  * disk quotas.  We might need to cover the log to indicate that the
453  * filesystem is idle and not frozen.
454  */
455 STATIC void
xfs_sync_worker(struct work_struct * work)456 xfs_sync_worker(
457 	struct work_struct *work)
458 {
459 	struct xfs_mount *mp = container_of(to_delayed_work(work),
460 					struct xfs_mount, m_sync_work);
461 	int		error;
462 
463 	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
464 		/* dgc: errors ignored here */
465 		if (mp->m_super->s_frozen == SB_UNFROZEN &&
466 		    xfs_log_need_covered(mp))
467 			error = xfs_fs_log_dummy(mp);
468 		else
469 			xfs_log_force(mp, 0);
470 
471 		/* start pushing all the metadata that is currently dirty */
472 		xfs_ail_push_all(mp->m_ail);
473 	}
474 
475 	/* queue us up again */
476 	xfs_syncd_queue_sync(mp);
477 }
478 
479 /*
480  * Queue a new inode reclaim pass if there are reclaimable inodes and there
481  * isn't a reclaim pass already in progress. By default it runs every 5s based
482  * on the xfs syncd work default of 30s. Perhaps this should have it's own
483  * tunable, but that can be done if this method proves to be ineffective or too
484  * aggressive.
485  */
486 static void
xfs_syncd_queue_reclaim(struct xfs_mount * mp)487 xfs_syncd_queue_reclaim(
488 	struct xfs_mount        *mp)
489 {
490 
491 	/*
492 	 * We can have inodes enter reclaim after we've shut down the syncd
493 	 * workqueue during unmount, so don't allow reclaim work to be queued
494 	 * during unmount.
495 	 */
496 	if (!(mp->m_super->s_flags & MS_ACTIVE))
497 		return;
498 
499 	rcu_read_lock();
500 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
501 		queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
502 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
503 	}
504 	rcu_read_unlock();
505 }
506 
507 /*
508  * This is a fast pass over the inode cache to try to get reclaim moving on as
509  * many inodes as possible in a short period of time. It kicks itself every few
510  * seconds, as well as being kicked by the inode cache shrinker when memory
511  * goes low. It scans as quickly as possible avoiding locked inodes or those
512  * already being flushed, and once done schedules a future pass.
513  */
514 STATIC void
xfs_reclaim_worker(struct work_struct * work)515 xfs_reclaim_worker(
516 	struct work_struct *work)
517 {
518 	struct xfs_mount *mp = container_of(to_delayed_work(work),
519 					struct xfs_mount, m_reclaim_work);
520 
521 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
522 	xfs_syncd_queue_reclaim(mp);
523 }
524 
525 /*
526  * Flush delayed allocate data, attempting to free up reserved space
527  * from existing allocations.  At this point a new allocation attempt
528  * has failed with ENOSPC and we are in the process of scratching our
529  * heads, looking about for more room.
530  *
531  * Queue a new data flush if there isn't one already in progress and
532  * wait for completion of the flush. This means that we only ever have one
533  * inode flush in progress no matter how many ENOSPC events are occurring and
534  * so will prevent the system from bogging down due to every concurrent
535  * ENOSPC event scanning all the active inodes in the system for writeback.
536  */
537 void
xfs_flush_inodes(struct xfs_inode * ip)538 xfs_flush_inodes(
539 	struct xfs_inode	*ip)
540 {
541 	struct xfs_mount	*mp = ip->i_mount;
542 
543 	queue_work(xfs_syncd_wq, &mp->m_flush_work);
544 	flush_work_sync(&mp->m_flush_work);
545 }
546 
547 STATIC void
xfs_flush_worker(struct work_struct * work)548 xfs_flush_worker(
549 	struct work_struct *work)
550 {
551 	struct xfs_mount *mp = container_of(work,
552 					struct xfs_mount, m_flush_work);
553 
554 	xfs_sync_data(mp, SYNC_TRYLOCK);
555 	xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
556 }
557 
558 int
xfs_syncd_init(struct xfs_mount * mp)559 xfs_syncd_init(
560 	struct xfs_mount	*mp)
561 {
562 	INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
563 	INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
564 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
565 
566 	xfs_syncd_queue_sync(mp);
567 	xfs_syncd_queue_reclaim(mp);
568 
569 	return 0;
570 }
571 
572 void
xfs_syncd_stop(struct xfs_mount * mp)573 xfs_syncd_stop(
574 	struct xfs_mount	*mp)
575 {
576 	cancel_delayed_work_sync(&mp->m_sync_work);
577 	cancel_delayed_work_sync(&mp->m_reclaim_work);
578 	cancel_work_sync(&mp->m_flush_work);
579 }
580 
581 void
__xfs_inode_set_reclaim_tag(struct xfs_perag * pag,struct xfs_inode * ip)582 __xfs_inode_set_reclaim_tag(
583 	struct xfs_perag	*pag,
584 	struct xfs_inode	*ip)
585 {
586 	radix_tree_tag_set(&pag->pag_ici_root,
587 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
588 			   XFS_ICI_RECLAIM_TAG);
589 
590 	if (!pag->pag_ici_reclaimable) {
591 		/* propagate the reclaim tag up into the perag radix tree */
592 		spin_lock(&ip->i_mount->m_perag_lock);
593 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
594 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
595 				XFS_ICI_RECLAIM_TAG);
596 		spin_unlock(&ip->i_mount->m_perag_lock);
597 
598 		/* schedule periodic background inode reclaim */
599 		xfs_syncd_queue_reclaim(ip->i_mount);
600 
601 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
602 							-1, _RET_IP_);
603 	}
604 	pag->pag_ici_reclaimable++;
605 }
606 
607 /*
608  * We set the inode flag atomically with the radix tree tag.
609  * Once we get tag lookups on the radix tree, this inode flag
610  * can go away.
611  */
612 void
xfs_inode_set_reclaim_tag(xfs_inode_t * ip)613 xfs_inode_set_reclaim_tag(
614 	xfs_inode_t	*ip)
615 {
616 	struct xfs_mount *mp = ip->i_mount;
617 	struct xfs_perag *pag;
618 
619 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
620 	spin_lock(&pag->pag_ici_lock);
621 	spin_lock(&ip->i_flags_lock);
622 	__xfs_inode_set_reclaim_tag(pag, ip);
623 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
624 	spin_unlock(&ip->i_flags_lock);
625 	spin_unlock(&pag->pag_ici_lock);
626 	xfs_perag_put(pag);
627 }
628 
629 STATIC void
__xfs_inode_clear_reclaim(xfs_perag_t * pag,xfs_inode_t * ip)630 __xfs_inode_clear_reclaim(
631 	xfs_perag_t	*pag,
632 	xfs_inode_t	*ip)
633 {
634 	pag->pag_ici_reclaimable--;
635 	if (!pag->pag_ici_reclaimable) {
636 		/* clear the reclaim tag from the perag radix tree */
637 		spin_lock(&ip->i_mount->m_perag_lock);
638 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
639 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
640 				XFS_ICI_RECLAIM_TAG);
641 		spin_unlock(&ip->i_mount->m_perag_lock);
642 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
643 							-1, _RET_IP_);
644 	}
645 }
646 
647 void
__xfs_inode_clear_reclaim_tag(xfs_mount_t * mp,xfs_perag_t * pag,xfs_inode_t * ip)648 __xfs_inode_clear_reclaim_tag(
649 	xfs_mount_t	*mp,
650 	xfs_perag_t	*pag,
651 	xfs_inode_t	*ip)
652 {
653 	radix_tree_tag_clear(&pag->pag_ici_root,
654 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
655 	__xfs_inode_clear_reclaim(pag, ip);
656 }
657 
658 /*
659  * Grab the inode for reclaim exclusively.
660  * Return 0 if we grabbed it, non-zero otherwise.
661  */
662 STATIC int
xfs_reclaim_inode_grab(struct xfs_inode * ip,int flags)663 xfs_reclaim_inode_grab(
664 	struct xfs_inode	*ip,
665 	int			flags)
666 {
667 	ASSERT(rcu_read_lock_held());
668 
669 	/* quick check for stale RCU freed inode */
670 	if (!ip->i_ino)
671 		return 1;
672 
673 	/*
674 	 * If we are asked for non-blocking operation, do unlocked checks to
675 	 * see if the inode already is being flushed or in reclaim to avoid
676 	 * lock traffic.
677 	 */
678 	if ((flags & SYNC_TRYLOCK) &&
679 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
680 		return 1;
681 
682 	/*
683 	 * The radix tree lock here protects a thread in xfs_iget from racing
684 	 * with us starting reclaim on the inode.  Once we have the
685 	 * XFS_IRECLAIM flag set it will not touch us.
686 	 *
687 	 * Due to RCU lookup, we may find inodes that have been freed and only
688 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
689 	 * aren't candidates for reclaim at all, so we must check the
690 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
691 	 */
692 	spin_lock(&ip->i_flags_lock);
693 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
694 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
695 		/* not a reclaim candidate. */
696 		spin_unlock(&ip->i_flags_lock);
697 		return 1;
698 	}
699 	__xfs_iflags_set(ip, XFS_IRECLAIM);
700 	spin_unlock(&ip->i_flags_lock);
701 	return 0;
702 }
703 
704 /*
705  * Inodes in different states need to be treated differently, and the return
706  * value of xfs_iflush is not sufficient to get this right. The following table
707  * lists the inode states and the reclaim actions necessary for non-blocking
708  * reclaim:
709  *
710  *
711  *	inode state	     iflush ret		required action
712  *      ---------------      ----------         ---------------
713  *	bad			-		reclaim
714  *	shutdown		EIO		unpin and reclaim
715  *	clean, unpinned		0		reclaim
716  *	stale, unpinned		0		reclaim
717  *	clean, pinned(*)	0		requeue
718  *	stale, pinned		EAGAIN		requeue
719  *	dirty, delwri ok	0		requeue
720  *	dirty, delwri blocked	EAGAIN		requeue
721  *	dirty, sync flush	0		reclaim
722  *
723  * (*) dgc: I don't think the clean, pinned state is possible but it gets
724  * handled anyway given the order of checks implemented.
725  *
726  * As can be seen from the table, the return value of xfs_iflush() is not
727  * sufficient to correctly decide the reclaim action here. The checks in
728  * xfs_iflush() might look like duplicates, but they are not.
729  *
730  * Also, because we get the flush lock first, we know that any inode that has
731  * been flushed delwri has had the flush completed by the time we check that
732  * the inode is clean. The clean inode check needs to be done before flushing
733  * the inode delwri otherwise we would loop forever requeuing clean inodes as
734  * we cannot tell apart a successful delwri flush and a clean inode from the
735  * return value of xfs_iflush().
736  *
737  * Note that because the inode is flushed delayed write by background
738  * writeback, the flush lock may already be held here and waiting on it can
739  * result in very long latencies. Hence for sync reclaims, where we wait on the
740  * flush lock, the caller should push out delayed write inodes first before
741  * trying to reclaim them to minimise the amount of time spent waiting. For
742  * background relaim, we just requeue the inode for the next pass.
743  *
744  * Hence the order of actions after gaining the locks should be:
745  *	bad		=> reclaim
746  *	shutdown	=> unpin and reclaim
747  *	pinned, delwri	=> requeue
748  *	pinned, sync	=> unpin
749  *	stale		=> reclaim
750  *	clean		=> reclaim
751  *	dirty, delwri	=> flush and requeue
752  *	dirty, sync	=> flush, wait and reclaim
753  */
754 STATIC int
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag,int sync_mode)755 xfs_reclaim_inode(
756 	struct xfs_inode	*ip,
757 	struct xfs_perag	*pag,
758 	int			sync_mode)
759 {
760 	int	error;
761 
762 restart:
763 	error = 0;
764 	xfs_ilock(ip, XFS_ILOCK_EXCL);
765 	if (!xfs_iflock_nowait(ip)) {
766 		if (!(sync_mode & SYNC_WAIT))
767 			goto out;
768 
769 		/*
770 		 * If we only have a single dirty inode in a cluster there is
771 		 * a fair chance that the AIL push may have pushed it into
772 		 * the buffer, but xfsbufd won't touch it until 30 seconds
773 		 * from now, and thus we will lock up here.
774 		 *
775 		 * Promote the inode buffer to the front of the delwri list
776 		 * and wake up xfsbufd now.
777 		 */
778 		xfs_promote_inode(ip);
779 		xfs_iflock(ip);
780 	}
781 
782 	if (is_bad_inode(VFS_I(ip)))
783 		goto reclaim;
784 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
785 		xfs_iunpin_wait(ip);
786 		goto reclaim;
787 	}
788 	if (xfs_ipincount(ip)) {
789 		if (!(sync_mode & SYNC_WAIT)) {
790 			xfs_ifunlock(ip);
791 			goto out;
792 		}
793 		xfs_iunpin_wait(ip);
794 	}
795 	if (xfs_iflags_test(ip, XFS_ISTALE))
796 		goto reclaim;
797 	if (xfs_inode_clean(ip))
798 		goto reclaim;
799 
800 	/*
801 	 * Now we have an inode that needs flushing.
802 	 *
803 	 * We do a nonblocking flush here even if we are doing a SYNC_WAIT
804 	 * reclaim as we can deadlock with inode cluster removal.
805 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
806 	 * ip->i_lock, and we are doing the exact opposite here. As a result,
807 	 * doing a blocking xfs_itobp() to get the cluster buffer will result
808 	 * in an ABBA deadlock with xfs_ifree_cluster().
809 	 *
810 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
811 	 * cache to mark them stale, if we hit this case we don't actually want
812 	 * to do IO here - we want the inode marked stale so we can simply
813 	 * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush,
814 	 * just unlock the inode, back off and try again. Hopefully the next
815 	 * pass through will see the stale flag set on the inode.
816 	 */
817 	error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
818 	if (sync_mode & SYNC_WAIT) {
819 		if (error == EAGAIN) {
820 			xfs_iunlock(ip, XFS_ILOCK_EXCL);
821 			/* backoff longer than in xfs_ifree_cluster */
822 			delay(2);
823 			goto restart;
824 		}
825 		xfs_iflock(ip);
826 		goto reclaim;
827 	}
828 
829 	/*
830 	 * When we have to flush an inode but don't have SYNC_WAIT set, we
831 	 * flush the inode out using a delwri buffer and wait for the next
832 	 * call into reclaim to find it in a clean state instead of waiting for
833 	 * it now. We also don't return errors here - if the error is transient
834 	 * then the next reclaim pass will flush the inode, and if the error
835 	 * is permanent then the next sync reclaim will reclaim the inode and
836 	 * pass on the error.
837 	 */
838 	if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
839 		xfs_warn(ip->i_mount,
840 			"inode 0x%llx background reclaim flush failed with %d",
841 			(long long)ip->i_ino, error);
842 	}
843 out:
844 	xfs_iflags_clear(ip, XFS_IRECLAIM);
845 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
846 	/*
847 	 * We could return EAGAIN here to make reclaim rescan the inode tree in
848 	 * a short while. However, this just burns CPU time scanning the tree
849 	 * waiting for IO to complete and xfssyncd never goes back to the idle
850 	 * state. Instead, return 0 to let the next scheduled background reclaim
851 	 * attempt to reclaim the inode again.
852 	 */
853 	return 0;
854 
855 reclaim:
856 	xfs_ifunlock(ip);
857 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
858 
859 	XFS_STATS_INC(xs_ig_reclaims);
860 	/*
861 	 * Remove the inode from the per-AG radix tree.
862 	 *
863 	 * Because radix_tree_delete won't complain even if the item was never
864 	 * added to the tree assert that it's been there before to catch
865 	 * problems with the inode life time early on.
866 	 */
867 	spin_lock(&pag->pag_ici_lock);
868 	if (!radix_tree_delete(&pag->pag_ici_root,
869 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
870 		ASSERT(0);
871 	__xfs_inode_clear_reclaim(pag, ip);
872 	spin_unlock(&pag->pag_ici_lock);
873 
874 	/*
875 	 * Here we do an (almost) spurious inode lock in order to coordinate
876 	 * with inode cache radix tree lookups.  This is because the lookup
877 	 * can reference the inodes in the cache without taking references.
878 	 *
879 	 * We make that OK here by ensuring that we wait until the inode is
880 	 * unlocked after the lookup before we go ahead and free it.
881 	 */
882 	xfs_ilock(ip, XFS_ILOCK_EXCL);
883 	xfs_qm_dqdetach(ip);
884 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
885 
886 	xfs_inode_free(ip);
887 
888 	return error;
889 }
890 
891 /*
892  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
893  * corrupted, we still want to try to reclaim all the inodes. If we don't,
894  * then a shut down during filesystem unmount reclaim walk leak all the
895  * unreclaimed inodes.
896  */
897 int
xfs_reclaim_inodes_ag(struct xfs_mount * mp,int flags,int * nr_to_scan)898 xfs_reclaim_inodes_ag(
899 	struct xfs_mount	*mp,
900 	int			flags,
901 	int			*nr_to_scan)
902 {
903 	struct xfs_perag	*pag;
904 	int			error = 0;
905 	int			last_error = 0;
906 	xfs_agnumber_t		ag;
907 	int			trylock = flags & SYNC_TRYLOCK;
908 	int			skipped;
909 
910 restart:
911 	ag = 0;
912 	skipped = 0;
913 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
914 		unsigned long	first_index = 0;
915 		int		done = 0;
916 		int		nr_found = 0;
917 
918 		ag = pag->pag_agno + 1;
919 
920 		if (trylock) {
921 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
922 				skipped++;
923 				xfs_perag_put(pag);
924 				continue;
925 			}
926 			first_index = pag->pag_ici_reclaim_cursor;
927 		} else
928 			mutex_lock(&pag->pag_ici_reclaim_lock);
929 
930 		do {
931 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
932 			int	i;
933 
934 			rcu_read_lock();
935 			nr_found = radix_tree_gang_lookup_tag(
936 					&pag->pag_ici_root,
937 					(void **)batch, first_index,
938 					XFS_LOOKUP_BATCH,
939 					XFS_ICI_RECLAIM_TAG);
940 			if (!nr_found) {
941 				done = 1;
942 				rcu_read_unlock();
943 				break;
944 			}
945 
946 			/*
947 			 * Grab the inodes before we drop the lock. if we found
948 			 * nothing, nr == 0 and the loop will be skipped.
949 			 */
950 			for (i = 0; i < nr_found; i++) {
951 				struct xfs_inode *ip = batch[i];
952 
953 				if (done || xfs_reclaim_inode_grab(ip, flags))
954 					batch[i] = NULL;
955 
956 				/*
957 				 * Update the index for the next lookup. Catch
958 				 * overflows into the next AG range which can
959 				 * occur if we have inodes in the last block of
960 				 * the AG and we are currently pointing to the
961 				 * last inode.
962 				 *
963 				 * Because we may see inodes that are from the
964 				 * wrong AG due to RCU freeing and
965 				 * reallocation, only update the index if it
966 				 * lies in this AG. It was a race that lead us
967 				 * to see this inode, so another lookup from
968 				 * the same index will not find it again.
969 				 */
970 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
971 								pag->pag_agno)
972 					continue;
973 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
974 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
975 					done = 1;
976 			}
977 
978 			/* unlock now we've grabbed the inodes. */
979 			rcu_read_unlock();
980 
981 			for (i = 0; i < nr_found; i++) {
982 				if (!batch[i])
983 					continue;
984 				error = xfs_reclaim_inode(batch[i], pag, flags);
985 				if (error && last_error != EFSCORRUPTED)
986 					last_error = error;
987 			}
988 
989 			*nr_to_scan -= XFS_LOOKUP_BATCH;
990 
991 			cond_resched();
992 
993 		} while (nr_found && !done && *nr_to_scan > 0);
994 
995 		if (trylock && !done)
996 			pag->pag_ici_reclaim_cursor = first_index;
997 		else
998 			pag->pag_ici_reclaim_cursor = 0;
999 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1000 		xfs_perag_put(pag);
1001 	}
1002 
1003 	/*
1004 	 * if we skipped any AG, and we still have scan count remaining, do
1005 	 * another pass this time using blocking reclaim semantics (i.e
1006 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1007 	 * ensure that when we get more reclaimers than AGs we block rather
1008 	 * than spin trying to execute reclaim.
1009 	 */
1010 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1011 		trylock = 0;
1012 		goto restart;
1013 	}
1014 	return XFS_ERROR(last_error);
1015 }
1016 
1017 int
xfs_reclaim_inodes(xfs_mount_t * mp,int mode)1018 xfs_reclaim_inodes(
1019 	xfs_mount_t	*mp,
1020 	int		mode)
1021 {
1022 	int		nr_to_scan = INT_MAX;
1023 
1024 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1025 }
1026 
1027 /*
1028  * Scan a certain number of inodes for reclaim.
1029  *
1030  * When called we make sure that there is a background (fast) inode reclaim in
1031  * progress, while we will throttle the speed of reclaim via doing synchronous
1032  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1033  * them to be cleaned, which we hope will not be very long due to the
1034  * background walker having already kicked the IO off on those dirty inodes.
1035  */
1036 void
xfs_reclaim_inodes_nr(struct xfs_mount * mp,int nr_to_scan)1037 xfs_reclaim_inodes_nr(
1038 	struct xfs_mount	*mp,
1039 	int			nr_to_scan)
1040 {
1041 	/* kick background reclaimer and push the AIL */
1042 	xfs_syncd_queue_reclaim(mp);
1043 	xfs_ail_push_all(mp->m_ail);
1044 
1045 	xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1046 }
1047 
1048 /*
1049  * Return the number of reclaimable inodes in the filesystem for
1050  * the shrinker to determine how much to reclaim.
1051  */
1052 int
xfs_reclaim_inodes_count(struct xfs_mount * mp)1053 xfs_reclaim_inodes_count(
1054 	struct xfs_mount	*mp)
1055 {
1056 	struct xfs_perag	*pag;
1057 	xfs_agnumber_t		ag = 0;
1058 	int			reclaimable = 0;
1059 
1060 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1061 		ag = pag->pag_agno + 1;
1062 		reclaimable += pag->pag_ici_reclaimable;
1063 		xfs_perag_put(pag);
1064 	}
1065 	return reclaimable;
1066 }
1067 
1068