1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43 #include "bmap.h"
44 #define CREATE_TRACE_POINTS
45 #include "trace_gfs2.h"
46 
47 struct gfs2_glock_iter {
48 	int hash;			/* hash bucket index         */
49 	struct gfs2_sbd *sdp;		/* incore superblock         */
50 	struct gfs2_glock *gl;		/* current glock struct      */
51 	char string[512];		/* scratch space             */
52 };
53 
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 
56 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
57 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
58 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
59 
60 static struct dentry *gfs2_root;
61 static struct workqueue_struct *glock_workqueue;
62 struct workqueue_struct *gfs2_delete_workqueue;
63 static LIST_HEAD(lru_list);
64 static atomic_t lru_count = ATOMIC_INIT(0);
65 static DEFINE_SPINLOCK(lru_lock);
66 
67 #define GFS2_GL_HASH_SHIFT      15
68 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
69 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
70 
71 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72 static struct dentry *gfs2_root;
73 
74 /**
75  * gl_hash() - Turn glock number into hash bucket number
76  * @lock: The glock number
77  *
78  * Returns: The number of the corresponding hash bucket
79  */
80 
gl_hash(const struct gfs2_sbd * sdp,const struct lm_lockname * name)81 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
82 			    const struct lm_lockname *name)
83 {
84 	unsigned int h;
85 
86 	h = jhash(&name->ln_number, sizeof(u64), 0);
87 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
88 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
89 	h &= GFS2_GL_HASH_MASK;
90 
91 	return h;
92 }
93 
spin_lock_bucket(unsigned int hash)94 static inline void spin_lock_bucket(unsigned int hash)
95 {
96 	hlist_bl_lock(&gl_hash_table[hash]);
97 }
98 
spin_unlock_bucket(unsigned int hash)99 static inline void spin_unlock_bucket(unsigned int hash)
100 {
101 	hlist_bl_unlock(&gl_hash_table[hash]);
102 }
103 
gfs2_glock_dealloc(struct rcu_head * rcu)104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
105 {
106 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
107 
108 	if (gl->gl_ops->go_flags & GLOF_ASPACE)
109 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 	else
111 		kmem_cache_free(gfs2_glock_cachep, gl);
112 }
113 
gfs2_glock_free(struct gfs2_glock * gl)114 void gfs2_glock_free(struct gfs2_glock *gl)
115 {
116 	struct gfs2_sbd *sdp = gl->gl_sbd;
117 
118 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
119 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
120 		wake_up(&sdp->sd_glock_wait);
121 }
122 
123 /**
124  * gfs2_glock_hold() - increment reference count on glock
125  * @gl: The glock to hold
126  *
127  */
128 
gfs2_glock_hold(struct gfs2_glock * gl)129 void gfs2_glock_hold(struct gfs2_glock *gl)
130 {
131 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
132 	atomic_inc(&gl->gl_ref);
133 }
134 
135 /**
136  * demote_ok - Check to see if it's ok to unlock a glock
137  * @gl: the glock
138  *
139  * Returns: 1 if it's ok
140  */
141 
demote_ok(const struct gfs2_glock * gl)142 static int demote_ok(const struct gfs2_glock *gl)
143 {
144 	const struct gfs2_glock_operations *glops = gl->gl_ops;
145 
146 	/* assert_spin_locked(&gl->gl_spin); */
147 
148 	if (gl->gl_state == LM_ST_UNLOCKED)
149 		return 0;
150 	if (test_bit(GLF_LFLUSH, &gl->gl_flags))
151 		return 0;
152 	if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
153 	    !list_empty(&gl->gl_holders))
154 		return 0;
155 	if (glops->go_demote_ok)
156 		return glops->go_demote_ok(gl);
157 	return 1;
158 }
159 
160 
161 /**
162  * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
163  * @gl: the glock
164  *
165  * If the glock is demotable, then we add it (or move it) to the end
166  * of the glock LRU list.
167  */
168 
__gfs2_glock_schedule_for_reclaim(struct gfs2_glock * gl)169 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
170 {
171 	if (demote_ok(gl)) {
172 		spin_lock(&lru_lock);
173 
174 		if (!list_empty(&gl->gl_lru))
175 			list_del_init(&gl->gl_lru);
176 		else
177 			atomic_inc(&lru_count);
178 
179 		list_add_tail(&gl->gl_lru, &lru_list);
180 		spin_unlock(&lru_lock);
181 	}
182 }
183 
gfs2_glock_schedule_for_reclaim(struct gfs2_glock * gl)184 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
185 {
186 	spin_lock(&gl->gl_spin);
187 	__gfs2_glock_schedule_for_reclaim(gl);
188 	spin_unlock(&gl->gl_spin);
189 }
190 
191 /**
192  * gfs2_glock_put_nolock() - Decrement reference count on glock
193  * @gl: The glock to put
194  *
195  * This function should only be used if the caller has its own reference
196  * to the glock, in addition to the one it is dropping.
197  */
198 
gfs2_glock_put_nolock(struct gfs2_glock * gl)199 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
200 {
201 	if (atomic_dec_and_test(&gl->gl_ref))
202 		GLOCK_BUG_ON(gl, 1);
203 }
204 
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210 
gfs2_glock_put(struct gfs2_glock * gl)211 void gfs2_glock_put(struct gfs2_glock *gl)
212 {
213 	struct gfs2_sbd *sdp = gl->gl_sbd;
214 	struct address_space *mapping = gfs2_glock2aspace(gl);
215 
216 	if (atomic_dec_and_test(&gl->gl_ref)) {
217 		spin_lock_bucket(gl->gl_hash);
218 		hlist_bl_del_rcu(&gl->gl_list);
219 		spin_unlock_bucket(gl->gl_hash);
220 		spin_lock(&lru_lock);
221 		if (!list_empty(&gl->gl_lru)) {
222 			list_del_init(&gl->gl_lru);
223 			atomic_dec(&lru_count);
224 		}
225 		spin_unlock(&lru_lock);
226 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
227 		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
228 		trace_gfs2_glock_put(gl);
229 		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
230 	}
231 }
232 
233 /**
234  * search_bucket() - Find struct gfs2_glock by lock number
235  * @bucket: the bucket to search
236  * @name: The lock name
237  *
238  * Returns: NULL, or the struct gfs2_glock with the requested number
239  */
240 
search_bucket(unsigned int hash,const struct gfs2_sbd * sdp,const struct lm_lockname * name)241 static struct gfs2_glock *search_bucket(unsigned int hash,
242 					const struct gfs2_sbd *sdp,
243 					const struct lm_lockname *name)
244 {
245 	struct gfs2_glock *gl;
246 	struct hlist_bl_node *h;
247 
248 	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
249 		if (!lm_name_equal(&gl->gl_name, name))
250 			continue;
251 		if (gl->gl_sbd != sdp)
252 			continue;
253 		if (atomic_inc_not_zero(&gl->gl_ref))
254 			return gl;
255 	}
256 
257 	return NULL;
258 }
259 
260 /**
261  * may_grant - check if its ok to grant a new lock
262  * @gl: The glock
263  * @gh: The lock request which we wish to grant
264  *
265  * Returns: true if its ok to grant the lock
266  */
267 
may_grant(const struct gfs2_glock * gl,const struct gfs2_holder * gh)268 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
269 {
270 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
271 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
272 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
273 		return 0;
274 	if (gl->gl_state == gh->gh_state)
275 		return 1;
276 	if (gh->gh_flags & GL_EXACT)
277 		return 0;
278 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
279 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
280 			return 1;
281 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
282 			return 1;
283 	}
284 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
285 		return 1;
286 	return 0;
287 }
288 
gfs2_holder_wake(struct gfs2_holder * gh)289 static void gfs2_holder_wake(struct gfs2_holder *gh)
290 {
291 	clear_bit(HIF_WAIT, &gh->gh_iflags);
292 	smp_mb__after_clear_bit();
293 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
294 }
295 
296 /**
297  * do_error - Something unexpected has happened during a lock request
298  *
299  */
300 
do_error(struct gfs2_glock * gl,const int ret)301 static inline void do_error(struct gfs2_glock *gl, const int ret)
302 {
303 	struct gfs2_holder *gh, *tmp;
304 
305 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
306 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
307 			continue;
308 		if (ret & LM_OUT_ERROR)
309 			gh->gh_error = -EIO;
310 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
311 			gh->gh_error = GLR_TRYFAILED;
312 		else
313 			continue;
314 		list_del_init(&gh->gh_list);
315 		trace_gfs2_glock_queue(gh, 0);
316 		gfs2_holder_wake(gh);
317 	}
318 }
319 
320 /**
321  * do_promote - promote as many requests as possible on the current queue
322  * @gl: The glock
323  *
324  * Returns: 1 if there is a blocked holder at the head of the list, or 2
325  *          if a type specific operation is underway.
326  */
327 
do_promote(struct gfs2_glock * gl)328 static int do_promote(struct gfs2_glock *gl)
329 __releases(&gl->gl_spin)
330 __acquires(&gl->gl_spin)
331 {
332 	const struct gfs2_glock_operations *glops = gl->gl_ops;
333 	struct gfs2_holder *gh, *tmp;
334 	int ret;
335 
336 restart:
337 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
338 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
339 			continue;
340 		if (may_grant(gl, gh)) {
341 			if (gh->gh_list.prev == &gl->gl_holders &&
342 			    glops->go_lock) {
343 				spin_unlock(&gl->gl_spin);
344 				/* FIXME: eliminate this eventually */
345 				ret = glops->go_lock(gh);
346 				spin_lock(&gl->gl_spin);
347 				if (ret) {
348 					if (ret == 1)
349 						return 2;
350 					gh->gh_error = ret;
351 					list_del_init(&gh->gh_list);
352 					trace_gfs2_glock_queue(gh, 0);
353 					gfs2_holder_wake(gh);
354 					goto restart;
355 				}
356 				set_bit(HIF_HOLDER, &gh->gh_iflags);
357 				trace_gfs2_promote(gh, 1);
358 				gfs2_holder_wake(gh);
359 				goto restart;
360 			}
361 			set_bit(HIF_HOLDER, &gh->gh_iflags);
362 			trace_gfs2_promote(gh, 0);
363 			gfs2_holder_wake(gh);
364 			continue;
365 		}
366 		if (gh->gh_list.prev == &gl->gl_holders)
367 			return 1;
368 		do_error(gl, 0);
369 		break;
370 	}
371 	return 0;
372 }
373 
374 /**
375  * find_first_waiter - find the first gh that's waiting for the glock
376  * @gl: the glock
377  */
378 
find_first_waiter(const struct gfs2_glock * gl)379 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
380 {
381 	struct gfs2_holder *gh;
382 
383 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
384 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
385 			return gh;
386 	}
387 	return NULL;
388 }
389 
390 /**
391  * state_change - record that the glock is now in a different state
392  * @gl: the glock
393  * @new_state the new state
394  *
395  */
396 
state_change(struct gfs2_glock * gl,unsigned int new_state)397 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
398 {
399 	int held1, held2;
400 
401 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
402 	held2 = (new_state != LM_ST_UNLOCKED);
403 
404 	if (held1 != held2) {
405 		if (held2)
406 			gfs2_glock_hold(gl);
407 		else
408 			gfs2_glock_put_nolock(gl);
409 	}
410 	if (held1 && held2 && list_empty(&gl->gl_holders))
411 		clear_bit(GLF_QUEUED, &gl->gl_flags);
412 
413 	gl->gl_state = new_state;
414 	gl->gl_tchange = jiffies;
415 }
416 
gfs2_demote_wake(struct gfs2_glock * gl)417 static void gfs2_demote_wake(struct gfs2_glock *gl)
418 {
419 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
420 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
421 	smp_mb__after_clear_bit();
422 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
423 }
424 
425 /**
426  * finish_xmote - The DLM has replied to one of our lock requests
427  * @gl: The glock
428  * @ret: The status from the DLM
429  *
430  */
431 
finish_xmote(struct gfs2_glock * gl,unsigned int ret)432 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
433 {
434 	const struct gfs2_glock_operations *glops = gl->gl_ops;
435 	struct gfs2_holder *gh;
436 	unsigned state = ret & LM_OUT_ST_MASK;
437 	int rv;
438 
439 	spin_lock(&gl->gl_spin);
440 	trace_gfs2_glock_state_change(gl, state);
441 	state_change(gl, state);
442 	gh = find_first_waiter(gl);
443 
444 	/* Demote to UN request arrived during demote to SH or DF */
445 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
446 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
447 		gl->gl_target = LM_ST_UNLOCKED;
448 
449 	/* Check for state != intended state */
450 	if (unlikely(state != gl->gl_target)) {
451 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
452 			/* move to back of queue and try next entry */
453 			if (ret & LM_OUT_CANCELED) {
454 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
455 					list_move_tail(&gh->gh_list, &gl->gl_holders);
456 				gh = find_first_waiter(gl);
457 				gl->gl_target = gh->gh_state;
458 				goto retry;
459 			}
460 			/* Some error or failed "try lock" - report it */
461 			if ((ret & LM_OUT_ERROR) ||
462 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
463 				gl->gl_target = gl->gl_state;
464 				do_error(gl, ret);
465 				goto out;
466 			}
467 		}
468 		switch(state) {
469 		/* Unlocked due to conversion deadlock, try again */
470 		case LM_ST_UNLOCKED:
471 retry:
472 			do_xmote(gl, gh, gl->gl_target);
473 			break;
474 		/* Conversion fails, unlock and try again */
475 		case LM_ST_SHARED:
476 		case LM_ST_DEFERRED:
477 			do_xmote(gl, gh, LM_ST_UNLOCKED);
478 			break;
479 		default: /* Everything else */
480 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
481 			GLOCK_BUG_ON(gl, 1);
482 		}
483 		spin_unlock(&gl->gl_spin);
484 		return;
485 	}
486 
487 	/* Fast path - we got what we asked for */
488 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
489 		gfs2_demote_wake(gl);
490 	if (state != LM_ST_UNLOCKED) {
491 		if (glops->go_xmote_bh) {
492 			spin_unlock(&gl->gl_spin);
493 			rv = glops->go_xmote_bh(gl, gh);
494 			spin_lock(&gl->gl_spin);
495 			if (rv) {
496 				do_error(gl, rv);
497 				goto out;
498 			}
499 		}
500 		rv = do_promote(gl);
501 		if (rv == 2)
502 			goto out_locked;
503 	}
504 out:
505 	clear_bit(GLF_LOCK, &gl->gl_flags);
506 out_locked:
507 	spin_unlock(&gl->gl_spin);
508 }
509 
510 /**
511  * do_xmote - Calls the DLM to change the state of a lock
512  * @gl: The lock state
513  * @gh: The holder (only for promotes)
514  * @target: The target lock state
515  *
516  */
517 
do_xmote(struct gfs2_glock * gl,struct gfs2_holder * gh,unsigned int target)518 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
519 __releases(&gl->gl_spin)
520 __acquires(&gl->gl_spin)
521 {
522 	const struct gfs2_glock_operations *glops = gl->gl_ops;
523 	struct gfs2_sbd *sdp = gl->gl_sbd;
524 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
525 	int ret;
526 
527 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
528 		      LM_FLAG_PRIORITY);
529 	GLOCK_BUG_ON(gl, gl->gl_state == target);
530 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
531 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
532 	    glops->go_inval) {
533 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
534 		do_error(gl, 0); /* Fail queued try locks */
535 	}
536 	gl->gl_req = target;
537 	spin_unlock(&gl->gl_spin);
538 	if (glops->go_xmote_th)
539 		glops->go_xmote_th(gl);
540 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
541 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
542 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
543 
544 	gfs2_glock_hold(gl);
545 	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
546 	    gl->gl_state == LM_ST_DEFERRED) &&
547 	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
548 		lck_flags |= LM_FLAG_TRY_1CB;
549 
550 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
551 		/* lock_dlm */
552 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
553 		GLOCK_BUG_ON(gl, ret);
554 	} else { /* lock_nolock */
555 		finish_xmote(gl, target);
556 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
557 			gfs2_glock_put(gl);
558 	}
559 
560 	spin_lock(&gl->gl_spin);
561 }
562 
563 /**
564  * find_first_holder - find the first "holder" gh
565  * @gl: the glock
566  */
567 
find_first_holder(const struct gfs2_glock * gl)568 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
569 {
570 	struct gfs2_holder *gh;
571 
572 	if (!list_empty(&gl->gl_holders)) {
573 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
574 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
575 			return gh;
576 	}
577 	return NULL;
578 }
579 
580 /**
581  * run_queue - do all outstanding tasks related to a glock
582  * @gl: The glock in question
583  * @nonblock: True if we must not block in run_queue
584  *
585  */
586 
run_queue(struct gfs2_glock * gl,const int nonblock)587 static void run_queue(struct gfs2_glock *gl, const int nonblock)
588 __releases(&gl->gl_spin)
589 __acquires(&gl->gl_spin)
590 {
591 	struct gfs2_holder *gh = NULL;
592 	int ret;
593 
594 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
595 		return;
596 
597 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
598 
599 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
600 	    gl->gl_demote_state != gl->gl_state) {
601 		if (find_first_holder(gl))
602 			goto out_unlock;
603 		if (nonblock)
604 			goto out_sched;
605 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
606 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
607 		gl->gl_target = gl->gl_demote_state;
608 	} else {
609 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
610 			gfs2_demote_wake(gl);
611 		ret = do_promote(gl);
612 		if (ret == 0)
613 			goto out_unlock;
614 		if (ret == 2)
615 			goto out;
616 		gh = find_first_waiter(gl);
617 		gl->gl_target = gh->gh_state;
618 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
619 			do_error(gl, 0); /* Fail queued try locks */
620 	}
621 	do_xmote(gl, gh, gl->gl_target);
622 out:
623 	return;
624 
625 out_sched:
626 	clear_bit(GLF_LOCK, &gl->gl_flags);
627 	smp_mb__after_clear_bit();
628 	gfs2_glock_hold(gl);
629 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
630 		gfs2_glock_put_nolock(gl);
631 	return;
632 
633 out_unlock:
634 	clear_bit(GLF_LOCK, &gl->gl_flags);
635 	smp_mb__after_clear_bit();
636 	return;
637 }
638 
delete_work_func(struct work_struct * work)639 static void delete_work_func(struct work_struct *work)
640 {
641 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
642 	struct gfs2_sbd *sdp = gl->gl_sbd;
643 	struct gfs2_inode *ip;
644 	struct inode *inode;
645 	u64 no_addr = gl->gl_name.ln_number;
646 
647 	ip = gl->gl_object;
648 	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
649 
650 	if (ip)
651 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
652 	else
653 		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
654 	if (inode && !IS_ERR(inode)) {
655 		d_prune_aliases(inode);
656 		iput(inode);
657 	}
658 	gfs2_glock_put(gl);
659 }
660 
glock_work_func(struct work_struct * work)661 static void glock_work_func(struct work_struct *work)
662 {
663 	unsigned long delay = 0;
664 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
665 	int drop_ref = 0;
666 
667 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
668 		finish_xmote(gl, gl->gl_reply);
669 		drop_ref = 1;
670 	}
671 	spin_lock(&gl->gl_spin);
672 	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
673 	    gl->gl_state != LM_ST_UNLOCKED &&
674 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
675 		unsigned long holdtime, now = jiffies;
676 		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
677 		if (time_before(now, holdtime))
678 			delay = holdtime - now;
679 		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
680 	}
681 	run_queue(gl, 0);
682 	spin_unlock(&gl->gl_spin);
683 	if (!delay ||
684 	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
685 		gfs2_glock_put(gl);
686 	if (drop_ref)
687 		gfs2_glock_put(gl);
688 }
689 
690 /**
691  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
692  * @sdp: The GFS2 superblock
693  * @number: the lock number
694  * @glops: The glock_operations to use
695  * @create: If 0, don't create the glock if it doesn't exist
696  * @glp: the glock is returned here
697  *
698  * This does not lock a glock, just finds/creates structures for one.
699  *
700  * Returns: errno
701  */
702 
gfs2_glock_get(struct gfs2_sbd * sdp,u64 number,const struct gfs2_glock_operations * glops,int create,struct gfs2_glock ** glp)703 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
704 		   const struct gfs2_glock_operations *glops, int create,
705 		   struct gfs2_glock **glp)
706 {
707 	struct super_block *s = sdp->sd_vfs;
708 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
709 	struct gfs2_glock *gl, *tmp;
710 	unsigned int hash = gl_hash(sdp, &name);
711 	struct address_space *mapping;
712 	struct kmem_cache *cachep;
713 
714 	rcu_read_lock();
715 	gl = search_bucket(hash, sdp, &name);
716 	rcu_read_unlock();
717 
718 	*glp = gl;
719 	if (gl)
720 		return 0;
721 	if (!create)
722 		return -ENOENT;
723 
724 	if (glops->go_flags & GLOF_ASPACE)
725 		cachep = gfs2_glock_aspace_cachep;
726 	else
727 		cachep = gfs2_glock_cachep;
728 	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
729 	if (!gl)
730 		return -ENOMEM;
731 
732 	atomic_inc(&sdp->sd_glock_disposal);
733 	gl->gl_flags = 0;
734 	gl->gl_name = name;
735 	atomic_set(&gl->gl_ref, 1);
736 	gl->gl_state = LM_ST_UNLOCKED;
737 	gl->gl_target = LM_ST_UNLOCKED;
738 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
739 	gl->gl_hash = hash;
740 	gl->gl_ops = glops;
741 	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
742 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
743 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
744 	gl->gl_tchange = jiffies;
745 	gl->gl_object = NULL;
746 	gl->gl_sbd = sdp;
747 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
748 	INIT_WORK(&gl->gl_delete, delete_work_func);
749 
750 	mapping = gfs2_glock2aspace(gl);
751 	if (mapping) {
752                 mapping->a_ops = &gfs2_meta_aops;
753 		mapping->host = s->s_bdev->bd_inode;
754 		mapping->flags = 0;
755 		mapping_set_gfp_mask(mapping, GFP_NOFS);
756 		mapping->assoc_mapping = NULL;
757 		mapping->backing_dev_info = s->s_bdi;
758 		mapping->writeback_index = 0;
759 	}
760 
761 	spin_lock_bucket(hash);
762 	tmp = search_bucket(hash, sdp, &name);
763 	if (tmp) {
764 		spin_unlock_bucket(hash);
765 		kmem_cache_free(cachep, gl);
766 		atomic_dec(&sdp->sd_glock_disposal);
767 		gl = tmp;
768 	} else {
769 		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
770 		spin_unlock_bucket(hash);
771 	}
772 
773 	*glp = gl;
774 
775 	return 0;
776 }
777 
778 /**
779  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
780  * @gl: the glock
781  * @state: the state we're requesting
782  * @flags: the modifier flags
783  * @gh: the holder structure
784  *
785  */
786 
gfs2_holder_init(struct gfs2_glock * gl,unsigned int state,unsigned flags,struct gfs2_holder * gh)787 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
788 		      struct gfs2_holder *gh)
789 {
790 	INIT_LIST_HEAD(&gh->gh_list);
791 	gh->gh_gl = gl;
792 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
793 	gh->gh_owner_pid = get_pid(task_pid(current));
794 	gh->gh_state = state;
795 	gh->gh_flags = flags;
796 	gh->gh_error = 0;
797 	gh->gh_iflags = 0;
798 	gfs2_glock_hold(gl);
799 }
800 
801 /**
802  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
803  * @state: the state we're requesting
804  * @flags: the modifier flags
805  * @gh: the holder structure
806  *
807  * Don't mess with the glock.
808  *
809  */
810 
gfs2_holder_reinit(unsigned int state,unsigned flags,struct gfs2_holder * gh)811 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
812 {
813 	gh->gh_state = state;
814 	gh->gh_flags = flags;
815 	gh->gh_iflags = 0;
816 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
817 	if (gh->gh_owner_pid)
818 		put_pid(gh->gh_owner_pid);
819 	gh->gh_owner_pid = get_pid(task_pid(current));
820 }
821 
822 /**
823  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
824  * @gh: the holder structure
825  *
826  */
827 
gfs2_holder_uninit(struct gfs2_holder * gh)828 void gfs2_holder_uninit(struct gfs2_holder *gh)
829 {
830 	put_pid(gh->gh_owner_pid);
831 	gfs2_glock_put(gh->gh_gl);
832 	gh->gh_gl = NULL;
833 	gh->gh_ip = 0;
834 }
835 
836 /**
837  * gfs2_glock_holder_wait
838  * @word: unused
839  *
840  * This function and gfs2_glock_demote_wait both show up in the WCHAN
841  * field. Thus I've separated these otherwise identical functions in
842  * order to be more informative to the user.
843  */
844 
gfs2_glock_holder_wait(void * word)845 static int gfs2_glock_holder_wait(void *word)
846 {
847         schedule();
848         return 0;
849 }
850 
gfs2_glock_demote_wait(void * word)851 static int gfs2_glock_demote_wait(void *word)
852 {
853 	schedule();
854 	return 0;
855 }
856 
wait_on_holder(struct gfs2_holder * gh)857 static void wait_on_holder(struct gfs2_holder *gh)
858 {
859 	might_sleep();
860 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
861 }
862 
wait_on_demote(struct gfs2_glock * gl)863 static void wait_on_demote(struct gfs2_glock *gl)
864 {
865 	might_sleep();
866 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
867 }
868 
869 /**
870  * handle_callback - process a demote request
871  * @gl: the glock
872  * @state: the state the caller wants us to change to
873  *
874  * There are only two requests that we are going to see in actual
875  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
876  */
877 
handle_callback(struct gfs2_glock * gl,unsigned int state,unsigned long delay)878 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
879 			    unsigned long delay)
880 {
881 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
882 
883 	set_bit(bit, &gl->gl_flags);
884 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
885 		gl->gl_demote_state = state;
886 		gl->gl_demote_time = jiffies;
887 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
888 			gl->gl_demote_state != state) {
889 		gl->gl_demote_state = LM_ST_UNLOCKED;
890 	}
891 	if (gl->gl_ops->go_callback)
892 		gl->gl_ops->go_callback(gl);
893 	trace_gfs2_demote_rq(gl);
894 }
895 
896 /**
897  * gfs2_glock_wait - wait on a glock acquisition
898  * @gh: the glock holder
899  *
900  * Returns: 0 on success
901  */
902 
gfs2_glock_wait(struct gfs2_holder * gh)903 int gfs2_glock_wait(struct gfs2_holder *gh)
904 {
905 	wait_on_holder(gh);
906 	return gh->gh_error;
907 }
908 
gfs2_print_dbg(struct seq_file * seq,const char * fmt,...)909 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
910 {
911 	struct va_format vaf;
912 	va_list args;
913 
914 	va_start(args, fmt);
915 
916 	if (seq) {
917 		struct gfs2_glock_iter *gi = seq->private;
918 		vsprintf(gi->string, fmt, args);
919 		seq_printf(seq, gi->string);
920 	} else {
921 		vaf.fmt = fmt;
922 		vaf.va = &args;
923 
924 		printk(KERN_ERR " %pV", &vaf);
925 	}
926 
927 	va_end(args);
928 }
929 
930 /**
931  * add_to_queue - Add a holder to the wait queue (but look for recursion)
932  * @gh: the holder structure to add
933  *
934  * Eventually we should move the recursive locking trap to a
935  * debugging option or something like that. This is the fast
936  * path and needs to have the minimum number of distractions.
937  *
938  */
939 
add_to_queue(struct gfs2_holder * gh)940 static inline void add_to_queue(struct gfs2_holder *gh)
941 __releases(&gl->gl_spin)
942 __acquires(&gl->gl_spin)
943 {
944 	struct gfs2_glock *gl = gh->gh_gl;
945 	struct gfs2_sbd *sdp = gl->gl_sbd;
946 	struct list_head *insert_pt = NULL;
947 	struct gfs2_holder *gh2;
948 	int try_lock = 0;
949 
950 	BUG_ON(gh->gh_owner_pid == NULL);
951 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
952 		BUG();
953 
954 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
955 		if (test_bit(GLF_LOCK, &gl->gl_flags))
956 			try_lock = 1;
957 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
958 			goto fail;
959 	}
960 
961 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
962 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
963 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
964 			goto trap_recursive;
965 		if (try_lock &&
966 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
967 		    !may_grant(gl, gh)) {
968 fail:
969 			gh->gh_error = GLR_TRYFAILED;
970 			gfs2_holder_wake(gh);
971 			return;
972 		}
973 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
974 			continue;
975 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
976 			insert_pt = &gh2->gh_list;
977 	}
978 	set_bit(GLF_QUEUED, &gl->gl_flags);
979 	trace_gfs2_glock_queue(gh, 1);
980 	if (likely(insert_pt == NULL)) {
981 		list_add_tail(&gh->gh_list, &gl->gl_holders);
982 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
983 			goto do_cancel;
984 		return;
985 	}
986 	list_add_tail(&gh->gh_list, insert_pt);
987 do_cancel:
988 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
989 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
990 		spin_unlock(&gl->gl_spin);
991 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
992 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
993 		spin_lock(&gl->gl_spin);
994 	}
995 	return;
996 
997 trap_recursive:
998 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
999 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1000 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1001 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1002 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1003 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1004 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1005 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1006 	__dump_glock(NULL, gl);
1007 	BUG();
1008 }
1009 
1010 /**
1011  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1012  * @gh: the holder structure
1013  *
1014  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1015  *
1016  * Returns: 0, GLR_TRYFAILED, or errno on failure
1017  */
1018 
gfs2_glock_nq(struct gfs2_holder * gh)1019 int gfs2_glock_nq(struct gfs2_holder *gh)
1020 {
1021 	struct gfs2_glock *gl = gh->gh_gl;
1022 	struct gfs2_sbd *sdp = gl->gl_sbd;
1023 	int error = 0;
1024 
1025 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1026 		return -EIO;
1027 
1028 	spin_lock(&gl->gl_spin);
1029 	add_to_queue(gh);
1030 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1031 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1032 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1033 	run_queue(gl, 1);
1034 	spin_unlock(&gl->gl_spin);
1035 
1036 	if (!(gh->gh_flags & GL_ASYNC))
1037 		error = gfs2_glock_wait(gh);
1038 
1039 	return error;
1040 }
1041 
1042 /**
1043  * gfs2_glock_poll - poll to see if an async request has been completed
1044  * @gh: the holder
1045  *
1046  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1047  */
1048 
gfs2_glock_poll(struct gfs2_holder * gh)1049 int gfs2_glock_poll(struct gfs2_holder *gh)
1050 {
1051 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1052 }
1053 
1054 /**
1055  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1056  * @gh: the glock holder
1057  *
1058  */
1059 
gfs2_glock_dq(struct gfs2_holder * gh)1060 void gfs2_glock_dq(struct gfs2_holder *gh)
1061 {
1062 	struct gfs2_glock *gl = gh->gh_gl;
1063 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1064 	unsigned delay = 0;
1065 	int fast_path = 0;
1066 
1067 	spin_lock(&gl->gl_spin);
1068 	if (gh->gh_flags & GL_NOCACHE)
1069 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1070 
1071 	list_del_init(&gh->gh_list);
1072 	if (find_first_holder(gl) == NULL) {
1073 		if (glops->go_unlock) {
1074 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1075 			spin_unlock(&gl->gl_spin);
1076 			glops->go_unlock(gh);
1077 			spin_lock(&gl->gl_spin);
1078 			clear_bit(GLF_LOCK, &gl->gl_flags);
1079 		}
1080 		if (list_empty(&gl->gl_holders) &&
1081 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1082 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1083 			fast_path = 1;
1084 	}
1085 	__gfs2_glock_schedule_for_reclaim(gl);
1086 	trace_gfs2_glock_queue(gh, 0);
1087 	spin_unlock(&gl->gl_spin);
1088 	if (likely(fast_path))
1089 		return;
1090 
1091 	gfs2_glock_hold(gl);
1092 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1093 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1094 		delay = gl->gl_ops->go_min_hold_time;
1095 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1096 		gfs2_glock_put(gl);
1097 }
1098 
gfs2_glock_dq_wait(struct gfs2_holder * gh)1099 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1100 {
1101 	struct gfs2_glock *gl = gh->gh_gl;
1102 	gfs2_glock_dq(gh);
1103 	wait_on_demote(gl);
1104 }
1105 
1106 /**
1107  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1108  * @gh: the holder structure
1109  *
1110  */
1111 
gfs2_glock_dq_uninit(struct gfs2_holder * gh)1112 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1113 {
1114 	gfs2_glock_dq(gh);
1115 	gfs2_holder_uninit(gh);
1116 }
1117 
1118 /**
1119  * gfs2_glock_nq_num - acquire a glock based on lock number
1120  * @sdp: the filesystem
1121  * @number: the lock number
1122  * @glops: the glock operations for the type of glock
1123  * @state: the state to acquire the glock in
1124  * @flags: modifier flags for the acquisition
1125  * @gh: the struct gfs2_holder
1126  *
1127  * Returns: errno
1128  */
1129 
gfs2_glock_nq_num(struct gfs2_sbd * sdp,u64 number,const struct gfs2_glock_operations * glops,unsigned int state,int flags,struct gfs2_holder * gh)1130 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1131 		      const struct gfs2_glock_operations *glops,
1132 		      unsigned int state, int flags, struct gfs2_holder *gh)
1133 {
1134 	struct gfs2_glock *gl;
1135 	int error;
1136 
1137 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1138 	if (!error) {
1139 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1140 		gfs2_glock_put(gl);
1141 	}
1142 
1143 	return error;
1144 }
1145 
1146 /**
1147  * glock_compare - Compare two struct gfs2_glock structures for sorting
1148  * @arg_a: the first structure
1149  * @arg_b: the second structure
1150  *
1151  */
1152 
glock_compare(const void * arg_a,const void * arg_b)1153 static int glock_compare(const void *arg_a, const void *arg_b)
1154 {
1155 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1156 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1157 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1158 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1159 
1160 	if (a->ln_number > b->ln_number)
1161 		return 1;
1162 	if (a->ln_number < b->ln_number)
1163 		return -1;
1164 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1165 	return 0;
1166 }
1167 
1168 /**
1169  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1170  * @num_gh: the number of structures
1171  * @ghs: an array of struct gfs2_holder structures
1172  *
1173  * Returns: 0 on success (all glocks acquired),
1174  *          errno on failure (no glocks acquired)
1175  */
1176 
nq_m_sync(unsigned int num_gh,struct gfs2_holder * ghs,struct gfs2_holder ** p)1177 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1178 		     struct gfs2_holder **p)
1179 {
1180 	unsigned int x;
1181 	int error = 0;
1182 
1183 	for (x = 0; x < num_gh; x++)
1184 		p[x] = &ghs[x];
1185 
1186 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1187 
1188 	for (x = 0; x < num_gh; x++) {
1189 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1190 
1191 		error = gfs2_glock_nq(p[x]);
1192 		if (error) {
1193 			while (x--)
1194 				gfs2_glock_dq(p[x]);
1195 			break;
1196 		}
1197 	}
1198 
1199 	return error;
1200 }
1201 
1202 /**
1203  * gfs2_glock_nq_m - acquire multiple glocks
1204  * @num_gh: the number of structures
1205  * @ghs: an array of struct gfs2_holder structures
1206  *
1207  *
1208  * Returns: 0 on success (all glocks acquired),
1209  *          errno on failure (no glocks acquired)
1210  */
1211 
gfs2_glock_nq_m(unsigned int num_gh,struct gfs2_holder * ghs)1212 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1213 {
1214 	struct gfs2_holder *tmp[4];
1215 	struct gfs2_holder **pph = tmp;
1216 	int error = 0;
1217 
1218 	switch(num_gh) {
1219 	case 0:
1220 		return 0;
1221 	case 1:
1222 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1223 		return gfs2_glock_nq(ghs);
1224 	default:
1225 		if (num_gh <= 4)
1226 			break;
1227 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1228 		if (!pph)
1229 			return -ENOMEM;
1230 	}
1231 
1232 	error = nq_m_sync(num_gh, ghs, pph);
1233 
1234 	if (pph != tmp)
1235 		kfree(pph);
1236 
1237 	return error;
1238 }
1239 
1240 /**
1241  * gfs2_glock_dq_m - release multiple glocks
1242  * @num_gh: the number of structures
1243  * @ghs: an array of struct gfs2_holder structures
1244  *
1245  */
1246 
gfs2_glock_dq_m(unsigned int num_gh,struct gfs2_holder * ghs)1247 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1248 {
1249 	while (num_gh--)
1250 		gfs2_glock_dq(&ghs[num_gh]);
1251 }
1252 
1253 /**
1254  * gfs2_glock_dq_uninit_m - release multiple glocks
1255  * @num_gh: the number of structures
1256  * @ghs: an array of struct gfs2_holder structures
1257  *
1258  */
1259 
gfs2_glock_dq_uninit_m(unsigned int num_gh,struct gfs2_holder * ghs)1260 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1261 {
1262 	while (num_gh--)
1263 		gfs2_glock_dq_uninit(&ghs[num_gh]);
1264 }
1265 
gfs2_glock_cb(struct gfs2_glock * gl,unsigned int state)1266 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1267 {
1268 	unsigned long delay = 0;
1269 	unsigned long holdtime;
1270 	unsigned long now = jiffies;
1271 
1272 	gfs2_glock_hold(gl);
1273 	holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1274 	if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1275 		if (time_before(now, holdtime))
1276 			delay = holdtime - now;
1277 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1278 			delay = gl->gl_ops->go_min_hold_time;
1279 	}
1280 
1281 	spin_lock(&gl->gl_spin);
1282 	handle_callback(gl, state, delay);
1283 	spin_unlock(&gl->gl_spin);
1284 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1285 		gfs2_glock_put(gl);
1286 }
1287 
1288 /**
1289  * gfs2_should_freeze - Figure out if glock should be frozen
1290  * @gl: The glock in question
1291  *
1292  * Glocks are not frozen if (a) the result of the dlm operation is
1293  * an error, (b) the locking operation was an unlock operation or
1294  * (c) if there is a "noexp" flagged request anywhere in the queue
1295  *
1296  * Returns: 1 if freezing should occur, 0 otherwise
1297  */
1298 
gfs2_should_freeze(const struct gfs2_glock * gl)1299 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1300 {
1301 	const struct gfs2_holder *gh;
1302 
1303 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1304 		return 0;
1305 	if (gl->gl_target == LM_ST_UNLOCKED)
1306 		return 0;
1307 
1308 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1309 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1310 			continue;
1311 		if (LM_FLAG_NOEXP & gh->gh_flags)
1312 			return 0;
1313 	}
1314 
1315 	return 1;
1316 }
1317 
1318 /**
1319  * gfs2_glock_complete - Callback used by locking
1320  * @gl: Pointer to the glock
1321  * @ret: The return value from the dlm
1322  *
1323  * The gl_reply field is under the gl_spin lock so that it is ok
1324  * to use a bitfield shared with other glock state fields.
1325  */
1326 
gfs2_glock_complete(struct gfs2_glock * gl,int ret)1327 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1328 {
1329 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1330 
1331 	spin_lock(&gl->gl_spin);
1332 	gl->gl_reply = ret;
1333 
1334 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1335 		if (gfs2_should_freeze(gl)) {
1336 			set_bit(GLF_FROZEN, &gl->gl_flags);
1337 			spin_unlock(&gl->gl_spin);
1338 			return;
1339 		}
1340 	}
1341 
1342 	spin_unlock(&gl->gl_spin);
1343 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1344 	smp_wmb();
1345 	gfs2_glock_hold(gl);
1346 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1347 		gfs2_glock_put(gl);
1348 }
1349 
1350 
gfs2_shrink_glock_memory(struct shrinker * shrink,int nr,gfp_t gfp_mask)1351 static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1352 {
1353 	struct gfs2_glock *gl;
1354 	int may_demote;
1355 	int nr_skipped = 0;
1356 	LIST_HEAD(skipped);
1357 
1358 	if (nr == 0)
1359 		goto out;
1360 
1361 	if (!(gfp_mask & __GFP_FS))
1362 		return -1;
1363 
1364 	spin_lock(&lru_lock);
1365 	while(nr && !list_empty(&lru_list)) {
1366 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1367 		list_del_init(&gl->gl_lru);
1368 		atomic_dec(&lru_count);
1369 
1370 		/* Test for being demotable */
1371 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1372 			gfs2_glock_hold(gl);
1373 			spin_unlock(&lru_lock);
1374 			spin_lock(&gl->gl_spin);
1375 			may_demote = demote_ok(gl);
1376 			if (may_demote) {
1377 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1378 				nr--;
1379 			}
1380 			clear_bit(GLF_LOCK, &gl->gl_flags);
1381 			smp_mb__after_clear_bit();
1382 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1383 				gfs2_glock_put_nolock(gl);
1384 			spin_unlock(&gl->gl_spin);
1385 			spin_lock(&lru_lock);
1386 			continue;
1387 		}
1388 		nr_skipped++;
1389 		list_add(&gl->gl_lru, &skipped);
1390 	}
1391 	list_splice(&skipped, &lru_list);
1392 	atomic_add(nr_skipped, &lru_count);
1393 	spin_unlock(&lru_lock);
1394 out:
1395 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1396 }
1397 
1398 static struct shrinker glock_shrinker = {
1399 	.shrink = gfs2_shrink_glock_memory,
1400 	.seeks = DEFAULT_SEEKS,
1401 };
1402 
1403 /**
1404  * examine_bucket - Call a function for glock in a hash bucket
1405  * @examiner: the function
1406  * @sdp: the filesystem
1407  * @bucket: the bucket
1408  *
1409  */
1410 
examine_bucket(glock_examiner examiner,const struct gfs2_sbd * sdp,unsigned int hash)1411 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1412 			  unsigned int hash)
1413 {
1414 	struct gfs2_glock *gl;
1415 	struct hlist_bl_head *head = &gl_hash_table[hash];
1416 	struct hlist_bl_node *pos;
1417 
1418 	rcu_read_lock();
1419 	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1420 		if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1421 			examiner(gl);
1422 	}
1423 	rcu_read_unlock();
1424 	cond_resched();
1425 }
1426 
glock_hash_walk(glock_examiner examiner,const struct gfs2_sbd * sdp)1427 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1428 {
1429 	unsigned x;
1430 
1431 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1432 		examine_bucket(examiner, sdp, x);
1433 }
1434 
1435 
1436 /**
1437  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1438  * @gl: The glock to thaw
1439  *
1440  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1441  * so this has to result in the ref count being dropped by one.
1442  */
1443 
thaw_glock(struct gfs2_glock * gl)1444 static void thaw_glock(struct gfs2_glock *gl)
1445 {
1446 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1447 		return;
1448 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1449 	gfs2_glock_hold(gl);
1450 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1451 		gfs2_glock_put(gl);
1452 }
1453 
1454 /**
1455  * clear_glock - look at a glock and see if we can free it from glock cache
1456  * @gl: the glock to look at
1457  *
1458  */
1459 
clear_glock(struct gfs2_glock * gl)1460 static void clear_glock(struct gfs2_glock *gl)
1461 {
1462 	spin_lock(&lru_lock);
1463 	if (!list_empty(&gl->gl_lru)) {
1464 		list_del_init(&gl->gl_lru);
1465 		atomic_dec(&lru_count);
1466 	}
1467 	spin_unlock(&lru_lock);
1468 
1469 	spin_lock(&gl->gl_spin);
1470 	if (gl->gl_state != LM_ST_UNLOCKED)
1471 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1472 	spin_unlock(&gl->gl_spin);
1473 	gfs2_glock_hold(gl);
1474 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1475 		gfs2_glock_put(gl);
1476 }
1477 
1478 /**
1479  * gfs2_glock_thaw - Thaw any frozen glocks
1480  * @sdp: The super block
1481  *
1482  */
1483 
gfs2_glock_thaw(struct gfs2_sbd * sdp)1484 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1485 {
1486 	glock_hash_walk(thaw_glock, sdp);
1487 }
1488 
dump_glock(struct seq_file * seq,struct gfs2_glock * gl)1489 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1490 {
1491 	int ret;
1492 	spin_lock(&gl->gl_spin);
1493 	ret = __dump_glock(seq, gl);
1494 	spin_unlock(&gl->gl_spin);
1495 	return ret;
1496 }
1497 
dump_glock_func(struct gfs2_glock * gl)1498 static void dump_glock_func(struct gfs2_glock *gl)
1499 {
1500 	dump_glock(NULL, gl);
1501 }
1502 
1503 /**
1504  * gfs2_gl_hash_clear - Empty out the glock hash table
1505  * @sdp: the filesystem
1506  * @wait: wait until it's all gone
1507  *
1508  * Called when unmounting the filesystem.
1509  */
1510 
gfs2_gl_hash_clear(struct gfs2_sbd * sdp)1511 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1512 {
1513 	glock_hash_walk(clear_glock, sdp);
1514 	flush_workqueue(glock_workqueue);
1515 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1516 	glock_hash_walk(dump_glock_func, sdp);
1517 }
1518 
gfs2_glock_finish_truncate(struct gfs2_inode * ip)1519 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1520 {
1521 	struct gfs2_glock *gl = ip->i_gl;
1522 	int ret;
1523 
1524 	ret = gfs2_truncatei_resume(ip);
1525 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1526 
1527 	spin_lock(&gl->gl_spin);
1528 	clear_bit(GLF_LOCK, &gl->gl_flags);
1529 	run_queue(gl, 1);
1530 	spin_unlock(&gl->gl_spin);
1531 }
1532 
state2str(unsigned state)1533 static const char *state2str(unsigned state)
1534 {
1535 	switch(state) {
1536 	case LM_ST_UNLOCKED:
1537 		return "UN";
1538 	case LM_ST_SHARED:
1539 		return "SH";
1540 	case LM_ST_DEFERRED:
1541 		return "DF";
1542 	case LM_ST_EXCLUSIVE:
1543 		return "EX";
1544 	}
1545 	return "??";
1546 }
1547 
hflags2str(char * buf,unsigned flags,unsigned long iflags)1548 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1549 {
1550 	char *p = buf;
1551 	if (flags & LM_FLAG_TRY)
1552 		*p++ = 't';
1553 	if (flags & LM_FLAG_TRY_1CB)
1554 		*p++ = 'T';
1555 	if (flags & LM_FLAG_NOEXP)
1556 		*p++ = 'e';
1557 	if (flags & LM_FLAG_ANY)
1558 		*p++ = 'A';
1559 	if (flags & LM_FLAG_PRIORITY)
1560 		*p++ = 'p';
1561 	if (flags & GL_ASYNC)
1562 		*p++ = 'a';
1563 	if (flags & GL_EXACT)
1564 		*p++ = 'E';
1565 	if (flags & GL_NOCACHE)
1566 		*p++ = 'c';
1567 	if (test_bit(HIF_HOLDER, &iflags))
1568 		*p++ = 'H';
1569 	if (test_bit(HIF_WAIT, &iflags))
1570 		*p++ = 'W';
1571 	if (test_bit(HIF_FIRST, &iflags))
1572 		*p++ = 'F';
1573 	*p = 0;
1574 	return buf;
1575 }
1576 
1577 /**
1578  * dump_holder - print information about a glock holder
1579  * @seq: the seq_file struct
1580  * @gh: the glock holder
1581  *
1582  * Returns: 0 on success, -ENOBUFS when we run out of space
1583  */
1584 
dump_holder(struct seq_file * seq,const struct gfs2_holder * gh)1585 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1586 {
1587 	struct task_struct *gh_owner = NULL;
1588 	char flags_buf[32];
1589 
1590 	if (gh->gh_owner_pid)
1591 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1592 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1593 		       state2str(gh->gh_state),
1594 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1595 		       gh->gh_error,
1596 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1597 		       gh_owner ? gh_owner->comm : "(ended)",
1598 		       (void *)gh->gh_ip);
1599 	return 0;
1600 }
1601 
gflags2str(char * buf,const unsigned long * gflags)1602 static const char *gflags2str(char *buf, const unsigned long *gflags)
1603 {
1604 	char *p = buf;
1605 	if (test_bit(GLF_LOCK, gflags))
1606 		*p++ = 'l';
1607 	if (test_bit(GLF_DEMOTE, gflags))
1608 		*p++ = 'D';
1609 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1610 		*p++ = 'd';
1611 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1612 		*p++ = 'p';
1613 	if (test_bit(GLF_DIRTY, gflags))
1614 		*p++ = 'y';
1615 	if (test_bit(GLF_LFLUSH, gflags))
1616 		*p++ = 'f';
1617 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1618 		*p++ = 'i';
1619 	if (test_bit(GLF_REPLY_PENDING, gflags))
1620 		*p++ = 'r';
1621 	if (test_bit(GLF_INITIAL, gflags))
1622 		*p++ = 'I';
1623 	if (test_bit(GLF_FROZEN, gflags))
1624 		*p++ = 'F';
1625 	if (test_bit(GLF_QUEUED, gflags))
1626 		*p++ = 'q';
1627 	*p = 0;
1628 	return buf;
1629 }
1630 
1631 /**
1632  * __dump_glock - print information about a glock
1633  * @seq: The seq_file struct
1634  * @gl: the glock
1635  *
1636  * The file format is as follows:
1637  * One line per object, capital letters are used to indicate objects
1638  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1639  * other objects are indented by a single space and follow the glock to
1640  * which they are related. Fields are indicated by lower case letters
1641  * followed by a colon and the field value, except for strings which are in
1642  * [] so that its possible to see if they are composed of spaces for
1643  * example. The field's are n = number (id of the object), f = flags,
1644  * t = type, s = state, r = refcount, e = error, p = pid.
1645  *
1646  * Returns: 0 on success, -ENOBUFS when we run out of space
1647  */
1648 
__dump_glock(struct seq_file * seq,const struct gfs2_glock * gl)1649 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1650 {
1651 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1652 	unsigned long long dtime;
1653 	const struct gfs2_holder *gh;
1654 	char gflags_buf[32];
1655 	int error = 0;
1656 
1657 	dtime = jiffies - gl->gl_demote_time;
1658 	dtime *= 1000000/HZ; /* demote time in uSec */
1659 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1660 		dtime = 0;
1661 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
1662 		  state2str(gl->gl_state),
1663 		  gl->gl_name.ln_type,
1664 		  (unsigned long long)gl->gl_name.ln_number,
1665 		  gflags2str(gflags_buf, &gl->gl_flags),
1666 		  state2str(gl->gl_target),
1667 		  state2str(gl->gl_demote_state), dtime,
1668 		  atomic_read(&gl->gl_ail_count),
1669 		  atomic_read(&gl->gl_ref));
1670 
1671 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1672 		error = dump_holder(seq, gh);
1673 		if (error)
1674 			goto out;
1675 	}
1676 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1677 		error = glops->go_dump(seq, gl);
1678 out:
1679 	return error;
1680 }
1681 
1682 
1683 
1684 
gfs2_glock_init(void)1685 int __init gfs2_glock_init(void)
1686 {
1687 	unsigned i;
1688 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1689 		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1690 	}
1691 
1692 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1693 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1694 	if (IS_ERR(glock_workqueue))
1695 		return PTR_ERR(glock_workqueue);
1696 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1697 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1698 						0);
1699 	if (IS_ERR(gfs2_delete_workqueue)) {
1700 		destroy_workqueue(glock_workqueue);
1701 		return PTR_ERR(gfs2_delete_workqueue);
1702 	}
1703 
1704 	register_shrinker(&glock_shrinker);
1705 
1706 	return 0;
1707 }
1708 
gfs2_glock_exit(void)1709 void gfs2_glock_exit(void)
1710 {
1711 	unregister_shrinker(&glock_shrinker);
1712 	destroy_workqueue(glock_workqueue);
1713 	destroy_workqueue(gfs2_delete_workqueue);
1714 }
1715 
glock_hash_chain(unsigned hash)1716 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1717 {
1718 	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1719 			      struct gfs2_glock, gl_list);
1720 }
1721 
glock_hash_next(struct gfs2_glock * gl)1722 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1723 {
1724 	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1725 			      struct gfs2_glock, gl_list);
1726 }
1727 
gfs2_glock_iter_next(struct gfs2_glock_iter * gi)1728 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1729 {
1730 	struct gfs2_glock *gl;
1731 
1732 	do {
1733 		gl = gi->gl;
1734 		if (gl) {
1735 			gi->gl = glock_hash_next(gl);
1736 		} else {
1737 			gi->gl = glock_hash_chain(gi->hash);
1738 		}
1739 		while (gi->gl == NULL) {
1740 			gi->hash++;
1741 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1742 				rcu_read_unlock();
1743 				return 1;
1744 			}
1745 			gi->gl = glock_hash_chain(gi->hash);
1746 		}
1747 	/* Skip entries for other sb and dead entries */
1748 	} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1749 
1750 	return 0;
1751 }
1752 
gfs2_glock_seq_start(struct seq_file * seq,loff_t * pos)1753 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1754 {
1755 	struct gfs2_glock_iter *gi = seq->private;
1756 	loff_t n = *pos;
1757 
1758 	gi->hash = 0;
1759 	rcu_read_lock();
1760 
1761 	do {
1762 		if (gfs2_glock_iter_next(gi))
1763 			return NULL;
1764 	} while (n--);
1765 
1766 	return gi->gl;
1767 }
1768 
gfs2_glock_seq_next(struct seq_file * seq,void * iter_ptr,loff_t * pos)1769 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1770 				 loff_t *pos)
1771 {
1772 	struct gfs2_glock_iter *gi = seq->private;
1773 
1774 	(*pos)++;
1775 
1776 	if (gfs2_glock_iter_next(gi))
1777 		return NULL;
1778 
1779 	return gi->gl;
1780 }
1781 
gfs2_glock_seq_stop(struct seq_file * seq,void * iter_ptr)1782 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1783 {
1784 	struct gfs2_glock_iter *gi = seq->private;
1785 
1786 	if (gi->gl)
1787 		rcu_read_unlock();
1788 	gi->gl = NULL;
1789 }
1790 
gfs2_glock_seq_show(struct seq_file * seq,void * iter_ptr)1791 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1792 {
1793 	return dump_glock(seq, iter_ptr);
1794 }
1795 
1796 static const struct seq_operations gfs2_glock_seq_ops = {
1797 	.start = gfs2_glock_seq_start,
1798 	.next  = gfs2_glock_seq_next,
1799 	.stop  = gfs2_glock_seq_stop,
1800 	.show  = gfs2_glock_seq_show,
1801 };
1802 
gfs2_debugfs_open(struct inode * inode,struct file * file)1803 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1804 {
1805 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1806 				   sizeof(struct gfs2_glock_iter));
1807 	if (ret == 0) {
1808 		struct seq_file *seq = file->private_data;
1809 		struct gfs2_glock_iter *gi = seq->private;
1810 		gi->sdp = inode->i_private;
1811 	}
1812 	return ret;
1813 }
1814 
1815 static const struct file_operations gfs2_debug_fops = {
1816 	.owner   = THIS_MODULE,
1817 	.open    = gfs2_debugfs_open,
1818 	.read    = seq_read,
1819 	.llseek  = seq_lseek,
1820 	.release = seq_release_private,
1821 };
1822 
gfs2_create_debugfs_file(struct gfs2_sbd * sdp)1823 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1824 {
1825 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1826 	if (!sdp->debugfs_dir)
1827 		return -ENOMEM;
1828 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1829 							 S_IFREG | S_IRUGO,
1830 							 sdp->debugfs_dir, sdp,
1831 							 &gfs2_debug_fops);
1832 	if (!sdp->debugfs_dentry_glocks)
1833 		return -ENOMEM;
1834 
1835 	return 0;
1836 }
1837 
gfs2_delete_debugfs_file(struct gfs2_sbd * sdp)1838 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1839 {
1840 	if (sdp && sdp->debugfs_dir) {
1841 		if (sdp->debugfs_dentry_glocks) {
1842 			debugfs_remove(sdp->debugfs_dentry_glocks);
1843 			sdp->debugfs_dentry_glocks = NULL;
1844 		}
1845 		debugfs_remove(sdp->debugfs_dir);
1846 		sdp->debugfs_dir = NULL;
1847 	}
1848 }
1849 
gfs2_register_debugfs(void)1850 int gfs2_register_debugfs(void)
1851 {
1852 	gfs2_root = debugfs_create_dir("gfs2", NULL);
1853 	return gfs2_root ? 0 : -ENOMEM;
1854 }
1855 
gfs2_unregister_debugfs(void)1856 void gfs2_unregister_debugfs(void)
1857 {
1858 	debugfs_remove(gfs2_root);
1859 	gfs2_root = NULL;
1860 }
1861