1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_acl.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_bmap.h"
41 #include "xfs_btree_trace.h"
42 #include "xfs_trace.h"
43 
44 
45 /*
46  * Define xfs inode iolock lockdep classes. We need to ensure that all active
47  * inodes are considered the same for lockdep purposes, including inodes that
48  * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
49  * guarantee the locks are considered the same when there are multiple lock
50  * initialisation siteѕ. Also, define a reclaimable inode class so it is
51  * obvious in lockdep reports which class the report is against.
52  */
53 static struct lock_class_key xfs_iolock_active;
54 struct lock_class_key xfs_iolock_reclaimable;
55 
56 /*
57  * Allocate and initialise an xfs_inode.
58  */
59 STATIC struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)60 xfs_inode_alloc(
61 	struct xfs_mount	*mp,
62 	xfs_ino_t		ino)
63 {
64 	struct xfs_inode	*ip;
65 
66 	/*
67 	 * if this didn't occur in transactions, we could use
68 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
69 	 * code up to do this anyway.
70 	 */
71 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
72 	if (!ip)
73 		return NULL;
74 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
75 		kmem_zone_free(xfs_inode_zone, ip);
76 		return NULL;
77 	}
78 
79 	ASSERT(atomic_read(&ip->i_iocount) == 0);
80 	ASSERT(atomic_read(&ip->i_pincount) == 0);
81 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
82 	ASSERT(completion_done(&ip->i_flush));
83 	ASSERT(ip->i_ino == 0);
84 
85 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
86 	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
87 			&xfs_iolock_active, "xfs_iolock_active");
88 
89 	/* initialise the xfs inode */
90 	ip->i_ino = ino;
91 	ip->i_mount = mp;
92 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
93 	ip->i_afp = NULL;
94 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
95 	ip->i_flags = 0;
96 	ip->i_update_core = 0;
97 	ip->i_delayed_blks = 0;
98 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
99 	ip->i_size = 0;
100 	ip->i_new_size = 0;
101 
102 	return ip;
103 }
104 
105 STATIC void
xfs_inode_free_callback(struct rcu_head * head)106 xfs_inode_free_callback(
107 	struct rcu_head		*head)
108 {
109 	struct inode		*inode = container_of(head, struct inode, i_rcu);
110 	struct xfs_inode	*ip = XFS_I(inode);
111 
112 	INIT_LIST_HEAD(&inode->i_dentry);
113 	kmem_zone_free(xfs_inode_zone, ip);
114 }
115 
116 void
xfs_inode_free(struct xfs_inode * ip)117 xfs_inode_free(
118 	struct xfs_inode	*ip)
119 {
120 	switch (ip->i_d.di_mode & S_IFMT) {
121 	case S_IFREG:
122 	case S_IFDIR:
123 	case S_IFLNK:
124 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
125 		break;
126 	}
127 
128 	if (ip->i_afp)
129 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
130 
131 	if (ip->i_itemp) {
132 		/*
133 		 * Only if we are shutting down the fs will we see an
134 		 * inode still in the AIL. If it is there, we should remove
135 		 * it to prevent a use-after-free from occurring.
136 		 */
137 		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
138 		struct xfs_ail	*ailp = lip->li_ailp;
139 
140 		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
141 				       XFS_FORCED_SHUTDOWN(ip->i_mount));
142 		if (lip->li_flags & XFS_LI_IN_AIL) {
143 			spin_lock(&ailp->xa_lock);
144 			if (lip->li_flags & XFS_LI_IN_AIL)
145 				xfs_trans_ail_delete(ailp, lip);
146 			else
147 				spin_unlock(&ailp->xa_lock);
148 		}
149 		xfs_inode_item_destroy(ip);
150 		ip->i_itemp = NULL;
151 	}
152 
153 	/* asserts to verify all state is correct here */
154 	ASSERT(atomic_read(&ip->i_iocount) == 0);
155 	ASSERT(atomic_read(&ip->i_pincount) == 0);
156 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
157 	ASSERT(completion_done(&ip->i_flush));
158 
159 	/*
160 	 * Because we use RCU freeing we need to ensure the inode always
161 	 * appears to be reclaimed with an invalid inode number when in the
162 	 * free state. The ip->i_flags_lock provides the barrier against lookup
163 	 * races.
164 	 */
165 	spin_lock(&ip->i_flags_lock);
166 	ip->i_flags = XFS_IRECLAIM;
167 	ip->i_ino = 0;
168 	spin_unlock(&ip->i_flags_lock);
169 
170 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
171 }
172 
173 /*
174  * Check the validity of the inode we just found it the cache
175  */
176 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)177 xfs_iget_cache_hit(
178 	struct xfs_perag	*pag,
179 	struct xfs_inode	*ip,
180 	xfs_ino_t		ino,
181 	int			flags,
182 	int			lock_flags) __releases(RCU)
183 {
184 	struct inode		*inode = VFS_I(ip);
185 	struct xfs_mount	*mp = ip->i_mount;
186 	int			error;
187 
188 	/*
189 	 * check for re-use of an inode within an RCU grace period due to the
190 	 * radix tree nodes not being updated yet. We monitor for this by
191 	 * setting the inode number to zero before freeing the inode structure.
192 	 * If the inode has been reallocated and set up, then the inode number
193 	 * will not match, so check for that, too.
194 	 */
195 	spin_lock(&ip->i_flags_lock);
196 	if (ip->i_ino != ino) {
197 		trace_xfs_iget_skip(ip);
198 		XFS_STATS_INC(xs_ig_frecycle);
199 		error = EAGAIN;
200 		goto out_error;
201 	}
202 
203 
204 	/*
205 	 * If we are racing with another cache hit that is currently
206 	 * instantiating this inode or currently recycling it out of
207 	 * reclaimabe state, wait for the initialisation to complete
208 	 * before continuing.
209 	 *
210 	 * XXX(hch): eventually we should do something equivalent to
211 	 *	     wait_on_inode to wait for these flags to be cleared
212 	 *	     instead of polling for it.
213 	 */
214 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
215 		trace_xfs_iget_skip(ip);
216 		XFS_STATS_INC(xs_ig_frecycle);
217 		error = EAGAIN;
218 		goto out_error;
219 	}
220 
221 	/*
222 	 * If lookup is racing with unlink return an error immediately.
223 	 */
224 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
225 		error = ENOENT;
226 		goto out_error;
227 	}
228 
229 	/*
230 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
231 	 * Need to carefully get it back into useable state.
232 	 */
233 	if (ip->i_flags & XFS_IRECLAIMABLE) {
234 		trace_xfs_iget_reclaim(ip);
235 
236 		/*
237 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
238 		 * from stomping over us while we recycle the inode.  We can't
239 		 * clear the radix tree reclaimable tag yet as it requires
240 		 * pag_ici_lock to be held exclusive.
241 		 */
242 		ip->i_flags |= XFS_IRECLAIM;
243 
244 		spin_unlock(&ip->i_flags_lock);
245 		rcu_read_unlock();
246 
247 		error = -inode_init_always(mp->m_super, inode);
248 		if (error) {
249 			/*
250 			 * Re-initializing the inode failed, and we are in deep
251 			 * trouble.  Try to re-add it to the reclaim list.
252 			 */
253 			rcu_read_lock();
254 			spin_lock(&ip->i_flags_lock);
255 
256 			ip->i_flags &= ~XFS_INEW;
257 			ip->i_flags |= XFS_IRECLAIMABLE;
258 			__xfs_inode_set_reclaim_tag(pag, ip);
259 			trace_xfs_iget_reclaim_fail(ip);
260 			goto out_error;
261 		}
262 
263 		spin_lock(&pag->pag_ici_lock);
264 		spin_lock(&ip->i_flags_lock);
265 		ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
266 		ip->i_flags |= XFS_INEW;
267 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
268 		inode->i_state = I_NEW;
269 
270 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
271 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
272 		lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
273 				&xfs_iolock_active, "xfs_iolock_active");
274 
275 		spin_unlock(&ip->i_flags_lock);
276 		spin_unlock(&pag->pag_ici_lock);
277 	} else {
278 		/* If the VFS inode is being torn down, pause and try again. */
279 		if (!igrab(inode)) {
280 			trace_xfs_iget_skip(ip);
281 			error = EAGAIN;
282 			goto out_error;
283 		}
284 
285 		/* We've got a live one. */
286 		spin_unlock(&ip->i_flags_lock);
287 		rcu_read_unlock();
288 		trace_xfs_iget_hit(ip);
289 	}
290 
291 	if (lock_flags != 0)
292 		xfs_ilock(ip, lock_flags);
293 
294 	xfs_iflags_clear(ip, XFS_ISTALE);
295 	XFS_STATS_INC(xs_ig_found);
296 
297 	return 0;
298 
299 out_error:
300 	spin_unlock(&ip->i_flags_lock);
301 	rcu_read_unlock();
302 	return error;
303 }
304 
305 
306 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)307 xfs_iget_cache_miss(
308 	struct xfs_mount	*mp,
309 	struct xfs_perag	*pag,
310 	xfs_trans_t		*tp,
311 	xfs_ino_t		ino,
312 	struct xfs_inode	**ipp,
313 	int			flags,
314 	int			lock_flags)
315 {
316 	struct xfs_inode	*ip;
317 	int			error;
318 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
319 
320 	ip = xfs_inode_alloc(mp, ino);
321 	if (!ip)
322 		return ENOMEM;
323 
324 	error = xfs_iread(mp, tp, ip, flags);
325 	if (error)
326 		goto out_destroy;
327 
328 	trace_xfs_iget_miss(ip);
329 
330 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
331 		error = ENOENT;
332 		goto out_destroy;
333 	}
334 
335 	/*
336 	 * Preload the radix tree so we can insert safely under the
337 	 * write spinlock. Note that we cannot sleep inside the preload
338 	 * region.
339 	 */
340 	if (radix_tree_preload(GFP_KERNEL)) {
341 		error = EAGAIN;
342 		goto out_destroy;
343 	}
344 
345 	/*
346 	 * Because the inode hasn't been added to the radix-tree yet it can't
347 	 * be found by another thread, so we can do the non-sleeping lock here.
348 	 */
349 	if (lock_flags) {
350 		if (!xfs_ilock_nowait(ip, lock_flags))
351 			BUG();
352 	}
353 
354 	spin_lock(&pag->pag_ici_lock);
355 
356 	/* insert the new inode */
357 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
358 	if (unlikely(error)) {
359 		WARN_ON(error != -EEXIST);
360 		XFS_STATS_INC(xs_ig_dup);
361 		error = EAGAIN;
362 		goto out_preload_end;
363 	}
364 
365 	/* These values _must_ be set before releasing the radix tree lock! */
366 	ip->i_udquot = ip->i_gdquot = NULL;
367 	xfs_iflags_set(ip, XFS_INEW);
368 
369 	spin_unlock(&pag->pag_ici_lock);
370 	radix_tree_preload_end();
371 
372 	*ipp = ip;
373 	return 0;
374 
375 out_preload_end:
376 	spin_unlock(&pag->pag_ici_lock);
377 	radix_tree_preload_end();
378 	if (lock_flags)
379 		xfs_iunlock(ip, lock_flags);
380 out_destroy:
381 	__destroy_inode(VFS_I(ip));
382 	xfs_inode_free(ip);
383 	return error;
384 }
385 
386 /*
387  * Look up an inode by number in the given file system.
388  * The inode is looked up in the cache held in each AG.
389  * If the inode is found in the cache, initialise the vfs inode
390  * if necessary.
391  *
392  * If it is not in core, read it in from the file system's device,
393  * add it to the cache and initialise the vfs inode.
394  *
395  * The inode is locked according to the value of the lock_flags parameter.
396  * This flag parameter indicates how and if the inode's IO lock and inode lock
397  * should be taken.
398  *
399  * mp -- the mount point structure for the current file system.  It points
400  *       to the inode hash table.
401  * tp -- a pointer to the current transaction if there is one.  This is
402  *       simply passed through to the xfs_iread() call.
403  * ino -- the number of the inode desired.  This is the unique identifier
404  *        within the file system for the inode being requested.
405  * lock_flags -- flags indicating how to lock the inode.  See the comment
406  *		 for xfs_ilock() for a list of valid values.
407  */
408 int
xfs_iget(xfs_mount_t * mp,xfs_trans_t * tp,xfs_ino_t ino,uint flags,uint lock_flags,xfs_inode_t ** ipp)409 xfs_iget(
410 	xfs_mount_t	*mp,
411 	xfs_trans_t	*tp,
412 	xfs_ino_t	ino,
413 	uint		flags,
414 	uint		lock_flags,
415 	xfs_inode_t	**ipp)
416 {
417 	xfs_inode_t	*ip;
418 	int		error;
419 	xfs_perag_t	*pag;
420 	xfs_agino_t	agino;
421 
422 	/* reject inode numbers outside existing AGs */
423 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
424 		return EINVAL;
425 
426 	/* get the perag structure and ensure that it's inode capable */
427 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
428 	agino = XFS_INO_TO_AGINO(mp, ino);
429 
430 again:
431 	error = 0;
432 	rcu_read_lock();
433 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
434 
435 	if (ip) {
436 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
437 		if (error)
438 			goto out_error_or_again;
439 	} else {
440 		rcu_read_unlock();
441 		XFS_STATS_INC(xs_ig_missed);
442 
443 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
444 							flags, lock_flags);
445 		if (error)
446 			goto out_error_or_again;
447 	}
448 	xfs_perag_put(pag);
449 
450 	*ipp = ip;
451 
452 	ASSERT(ip->i_df.if_ext_max ==
453 	       XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
454 	/*
455 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
456 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
457 	 */
458 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
459 		xfs_setup_inode(ip);
460 	return 0;
461 
462 out_error_or_again:
463 	if (error == EAGAIN) {
464 		delay(1);
465 		goto again;
466 	}
467 	xfs_perag_put(pag);
468 	return error;
469 }
470 
471 /*
472  * This is a wrapper routine around the xfs_ilock() routine
473  * used to centralize some grungy code.  It is used in places
474  * that wish to lock the inode solely for reading the extents.
475  * The reason these places can't just call xfs_ilock(SHARED)
476  * is that the inode lock also guards to bringing in of the
477  * extents from disk for a file in b-tree format.  If the inode
478  * is in b-tree format, then we need to lock the inode exclusively
479  * until the extents are read in.  Locking it exclusively all
480  * the time would limit our parallelism unnecessarily, though.
481  * What we do instead is check to see if the extents have been
482  * read in yet, and only lock the inode exclusively if they
483  * have not.
484  *
485  * The function returns a value which should be given to the
486  * corresponding xfs_iunlock_map_shared().  This value is
487  * the mode in which the lock was actually taken.
488  */
489 uint
xfs_ilock_map_shared(xfs_inode_t * ip)490 xfs_ilock_map_shared(
491 	xfs_inode_t	*ip)
492 {
493 	uint	lock_mode;
494 
495 	if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
496 	    ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
497 		lock_mode = XFS_ILOCK_EXCL;
498 	} else {
499 		lock_mode = XFS_ILOCK_SHARED;
500 	}
501 
502 	xfs_ilock(ip, lock_mode);
503 
504 	return lock_mode;
505 }
506 
507 /*
508  * This is simply the unlock routine to go with xfs_ilock_map_shared().
509  * All it does is call xfs_iunlock() with the given lock_mode.
510  */
511 void
xfs_iunlock_map_shared(xfs_inode_t * ip,unsigned int lock_mode)512 xfs_iunlock_map_shared(
513 	xfs_inode_t	*ip,
514 	unsigned int	lock_mode)
515 {
516 	xfs_iunlock(ip, lock_mode);
517 }
518 
519 /*
520  * The xfs inode contains 2 locks: a multi-reader lock called the
521  * i_iolock and a multi-reader lock called the i_lock.  This routine
522  * allows either or both of the locks to be obtained.
523  *
524  * The 2 locks should always be ordered so that the IO lock is
525  * obtained first in order to prevent deadlock.
526  *
527  * ip -- the inode being locked
528  * lock_flags -- this parameter indicates the inode's locks
529  *       to be locked.  It can be:
530  *		XFS_IOLOCK_SHARED,
531  *		XFS_IOLOCK_EXCL,
532  *		XFS_ILOCK_SHARED,
533  *		XFS_ILOCK_EXCL,
534  *		XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
535  *		XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
536  *		XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
537  *		XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
538  */
539 void
xfs_ilock(xfs_inode_t * ip,uint lock_flags)540 xfs_ilock(
541 	xfs_inode_t		*ip,
542 	uint			lock_flags)
543 {
544 	/*
545 	 * You can't set both SHARED and EXCL for the same lock,
546 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
547 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
548 	 */
549 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
550 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
551 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
552 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
553 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
554 
555 	if (lock_flags & XFS_IOLOCK_EXCL)
556 		mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
557 	else if (lock_flags & XFS_IOLOCK_SHARED)
558 		mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
559 
560 	if (lock_flags & XFS_ILOCK_EXCL)
561 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
562 	else if (lock_flags & XFS_ILOCK_SHARED)
563 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
564 
565 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
566 }
567 
568 /*
569  * This is just like xfs_ilock(), except that the caller
570  * is guaranteed not to sleep.  It returns 1 if it gets
571  * the requested locks and 0 otherwise.  If the IO lock is
572  * obtained but the inode lock cannot be, then the IO lock
573  * is dropped before returning.
574  *
575  * ip -- the inode being locked
576  * lock_flags -- this parameter indicates the inode's locks to be
577  *       to be locked.  See the comment for xfs_ilock() for a list
578  *	 of valid values.
579  */
580 int
xfs_ilock_nowait(xfs_inode_t * ip,uint lock_flags)581 xfs_ilock_nowait(
582 	xfs_inode_t		*ip,
583 	uint			lock_flags)
584 {
585 	/*
586 	 * You can't set both SHARED and EXCL for the same lock,
587 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
588 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
589 	 */
590 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
591 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
592 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
593 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
594 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
595 
596 	if (lock_flags & XFS_IOLOCK_EXCL) {
597 		if (!mrtryupdate(&ip->i_iolock))
598 			goto out;
599 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
600 		if (!mrtryaccess(&ip->i_iolock))
601 			goto out;
602 	}
603 	if (lock_flags & XFS_ILOCK_EXCL) {
604 		if (!mrtryupdate(&ip->i_lock))
605 			goto out_undo_iolock;
606 	} else if (lock_flags & XFS_ILOCK_SHARED) {
607 		if (!mrtryaccess(&ip->i_lock))
608 			goto out_undo_iolock;
609 	}
610 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
611 	return 1;
612 
613  out_undo_iolock:
614 	if (lock_flags & XFS_IOLOCK_EXCL)
615 		mrunlock_excl(&ip->i_iolock);
616 	else if (lock_flags & XFS_IOLOCK_SHARED)
617 		mrunlock_shared(&ip->i_iolock);
618  out:
619 	return 0;
620 }
621 
622 /*
623  * xfs_iunlock() is used to drop the inode locks acquired with
624  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
625  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
626  * that we know which locks to drop.
627  *
628  * ip -- the inode being unlocked
629  * lock_flags -- this parameter indicates the inode's locks to be
630  *       to be unlocked.  See the comment for xfs_ilock() for a list
631  *	 of valid values for this parameter.
632  *
633  */
634 void
xfs_iunlock(xfs_inode_t * ip,uint lock_flags)635 xfs_iunlock(
636 	xfs_inode_t		*ip,
637 	uint			lock_flags)
638 {
639 	/*
640 	 * You can't set both SHARED and EXCL for the same lock,
641 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
642 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
643 	 */
644 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
645 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
646 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
647 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
648 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
649 			XFS_LOCK_DEP_MASK)) == 0);
650 	ASSERT(lock_flags != 0);
651 
652 	if (lock_flags & XFS_IOLOCK_EXCL)
653 		mrunlock_excl(&ip->i_iolock);
654 	else if (lock_flags & XFS_IOLOCK_SHARED)
655 		mrunlock_shared(&ip->i_iolock);
656 
657 	if (lock_flags & XFS_ILOCK_EXCL)
658 		mrunlock_excl(&ip->i_lock);
659 	else if (lock_flags & XFS_ILOCK_SHARED)
660 		mrunlock_shared(&ip->i_lock);
661 
662 	if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
663 	    !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
664 		/*
665 		 * Let the AIL know that this item has been unlocked in case
666 		 * it is in the AIL and anyone is waiting on it.  Don't do
667 		 * this if the caller has asked us not to.
668 		 */
669 		xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
670 					(xfs_log_item_t*)(ip->i_itemp));
671 	}
672 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
673 }
674 
675 /*
676  * give up write locks.  the i/o lock cannot be held nested
677  * if it is being demoted.
678  */
679 void
xfs_ilock_demote(xfs_inode_t * ip,uint lock_flags)680 xfs_ilock_demote(
681 	xfs_inode_t		*ip,
682 	uint			lock_flags)
683 {
684 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
685 	ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
686 
687 	if (lock_flags & XFS_ILOCK_EXCL)
688 		mrdemote(&ip->i_lock);
689 	if (lock_flags & XFS_IOLOCK_EXCL)
690 		mrdemote(&ip->i_iolock);
691 
692 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
693 }
694 
695 #ifdef DEBUG
696 int
xfs_isilocked(xfs_inode_t * ip,uint lock_flags)697 xfs_isilocked(
698 	xfs_inode_t		*ip,
699 	uint			lock_flags)
700 {
701 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
702 		if (!(lock_flags & XFS_ILOCK_SHARED))
703 			return !!ip->i_lock.mr_writer;
704 		return rwsem_is_locked(&ip->i_lock.mr_lock);
705 	}
706 
707 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
708 		if (!(lock_flags & XFS_IOLOCK_SHARED))
709 			return !!ip->i_iolock.mr_writer;
710 		return rwsem_is_locked(&ip->i_iolock.mr_lock);
711 	}
712 
713 	ASSERT(0);
714 	return 0;
715 }
716 #endif
717