1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "inode.h"
23 #include "log.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "util.h"
28 #include "trans.h"
29 
30 /**
31  * ail_empty_gl - remove all buffers for a given lock from the AIL
32  * @gl: the glock
33  *
34  * None of the buffers should be dirty, locked, or pinned.
35  */
36 
gfs2_ail_empty_gl(struct gfs2_glock * gl)37 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
38 {
39 	struct gfs2_sbd *sdp = gl->gl_sbd;
40 	struct list_head *head = &gl->gl_ail_list;
41 	struct gfs2_bufdata *bd;
42 	struct buffer_head *bh;
43 	struct gfs2_trans tr;
44 
45 	memset(&tr, 0, sizeof(tr));
46 	tr.tr_revokes = atomic_read(&gl->gl_ail_count);
47 
48 	if (!tr.tr_revokes)
49 		return;
50 
51 	/* A shortened, inline version of gfs2_trans_begin() */
52 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
53 	tr.tr_ip = (unsigned long)__builtin_return_address(0);
54 	INIT_LIST_HEAD(&tr.tr_list_buf);
55 	gfs2_log_reserve(sdp, tr.tr_reserved);
56 	BUG_ON(current->journal_info);
57 	current->journal_info = &tr;
58 
59 	spin_lock(&sdp->sd_ail_lock);
60 	while (!list_empty(head)) {
61 		bd = list_entry(head->next, struct gfs2_bufdata,
62 				bd_ail_gl_list);
63 		bh = bd->bd_bh;
64 		gfs2_remove_from_ail(bd);
65 		spin_unlock(&sdp->sd_ail_lock);
66 
67 		bd->bd_bh = NULL;
68 		bh->b_private = NULL;
69 		bd->bd_blkno = bh->b_blocknr;
70 		gfs2_log_lock(sdp);
71 		gfs2_assert_withdraw(sdp, !buffer_busy(bh));
72 		gfs2_trans_add_revoke(sdp, bd);
73 		gfs2_log_unlock(sdp);
74 
75 		spin_lock(&sdp->sd_ail_lock);
76 	}
77 	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
78 	spin_unlock(&sdp->sd_ail_lock);
79 
80 	gfs2_trans_end(sdp);
81 	gfs2_log_flush(sdp, NULL);
82 }
83 
84 /**
85  * rgrp_go_sync - sync out the metadata for this glock
86  * @gl: the glock
87  *
88  * Called when demoting or unlocking an EX glock.  We must flush
89  * to disk all dirty buffers/pages relating to this glock, and must not
90  * not return to caller to demote/unlock the glock until I/O is complete.
91  */
92 
rgrp_go_sync(struct gfs2_glock * gl)93 static void rgrp_go_sync(struct gfs2_glock *gl)
94 {
95 	struct address_space *metamapping = gfs2_glock2aspace(gl);
96 	int error;
97 
98 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
99 		return;
100 	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
101 
102 	gfs2_log_flush(gl->gl_sbd, gl);
103 	filemap_fdatawrite(metamapping);
104 	error = filemap_fdatawait(metamapping);
105         mapping_set_error(metamapping, error);
106 	gfs2_ail_empty_gl(gl);
107 }
108 
109 /**
110  * rgrp_go_inval - invalidate the metadata for this glock
111  * @gl: the glock
112  * @flags:
113  *
114  * We never used LM_ST_DEFERRED with resource groups, so that we
115  * should always see the metadata flag set here.
116  *
117  */
118 
rgrp_go_inval(struct gfs2_glock * gl,int flags)119 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
120 {
121 	struct address_space *mapping = gfs2_glock2aspace(gl);
122 
123 	BUG_ON(!(flags & DIO_METADATA));
124 	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
125 	truncate_inode_pages(mapping, 0);
126 
127 	if (gl->gl_object) {
128 		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
129 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
130 	}
131 }
132 
133 /**
134  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
135  * @gl: the glock protecting the inode
136  *
137  */
138 
inode_go_sync(struct gfs2_glock * gl)139 static void inode_go_sync(struct gfs2_glock *gl)
140 {
141 	struct gfs2_inode *ip = gl->gl_object;
142 	struct address_space *metamapping = gfs2_glock2aspace(gl);
143 	int error;
144 
145 	if (ip && !S_ISREG(ip->i_inode.i_mode))
146 		ip = NULL;
147 	if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
148 		unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
149 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
150 		return;
151 
152 	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
153 
154 	gfs2_log_flush(gl->gl_sbd, gl);
155 	filemap_fdatawrite(metamapping);
156 	if (ip) {
157 		struct address_space *mapping = ip->i_inode.i_mapping;
158 		filemap_fdatawrite(mapping);
159 		error = filemap_fdatawait(mapping);
160 		mapping_set_error(mapping, error);
161 	}
162 	error = filemap_fdatawait(metamapping);
163 	mapping_set_error(metamapping, error);
164 	gfs2_ail_empty_gl(gl);
165 	/*
166 	 * Writeback of the data mapping may cause the dirty flag to be set
167 	 * so we have to clear it again here.
168 	 */
169 	smp_mb__before_clear_bit();
170 	clear_bit(GLF_DIRTY, &gl->gl_flags);
171 }
172 
173 /**
174  * inode_go_inval - prepare a inode glock to be released
175  * @gl: the glock
176  * @flags:
177  *
178  * Normally we invlidate everything, but if we are moving into
179  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
180  * can keep hold of the metadata, since it won't have changed.
181  *
182  */
183 
inode_go_inval(struct gfs2_glock * gl,int flags)184 static void inode_go_inval(struct gfs2_glock *gl, int flags)
185 {
186 	struct gfs2_inode *ip = gl->gl_object;
187 
188 	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
189 
190 	if (flags & DIO_METADATA) {
191 		struct address_space *mapping = gfs2_glock2aspace(gl);
192 		truncate_inode_pages(mapping, 0);
193 		if (ip) {
194 			set_bit(GIF_INVALID, &ip->i_flags);
195 			forget_all_cached_acls(&ip->i_inode);
196 		}
197 	}
198 
199 	if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
200 		gl->gl_sbd->sd_rindex_uptodate = 0;
201 	if (ip && S_ISREG(ip->i_inode.i_mode))
202 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
203 }
204 
205 /**
206  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
207  * @gl: the glock
208  *
209  * Returns: 1 if it's ok
210  */
211 
inode_go_demote_ok(const struct gfs2_glock * gl)212 static int inode_go_demote_ok(const struct gfs2_glock *gl)
213 {
214 	struct gfs2_sbd *sdp = gl->gl_sbd;
215 	struct gfs2_holder *gh;
216 
217 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
218 		return 0;
219 
220 	if (!list_empty(&gl->gl_holders)) {
221 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
222 		if (gh->gh_list.next != &gl->gl_holders)
223 			return 0;
224 	}
225 
226 	return 1;
227 }
228 
229 /**
230  * inode_go_lock - operation done after an inode lock is locked by a process
231  * @gl: the glock
232  * @flags:
233  *
234  * Returns: errno
235  */
236 
inode_go_lock(struct gfs2_holder * gh)237 static int inode_go_lock(struct gfs2_holder *gh)
238 {
239 	struct gfs2_glock *gl = gh->gh_gl;
240 	struct gfs2_sbd *sdp = gl->gl_sbd;
241 	struct gfs2_inode *ip = gl->gl_object;
242 	int error = 0;
243 
244 	if (!ip || (gh->gh_flags & GL_SKIP))
245 		return 0;
246 
247 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
248 		error = gfs2_inode_refresh(ip);
249 		if (error)
250 			return error;
251 	}
252 
253 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
254 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
255 	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
256 		spin_lock(&sdp->sd_trunc_lock);
257 		if (list_empty(&ip->i_trunc_list))
258 			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
259 		spin_unlock(&sdp->sd_trunc_lock);
260 		wake_up(&sdp->sd_quota_wait);
261 		return 1;
262 	}
263 
264 	return error;
265 }
266 
267 /**
268  * inode_go_dump - print information about an inode
269  * @seq: The iterator
270  * @ip: the inode
271  *
272  * Returns: 0 on success, -ENOBUFS when we run out of space
273  */
274 
inode_go_dump(struct seq_file * seq,const struct gfs2_glock * gl)275 static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
276 {
277 	const struct gfs2_inode *ip = gl->gl_object;
278 	if (ip == NULL)
279 		return 0;
280 	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
281 		  (unsigned long long)ip->i_no_formal_ino,
282 		  (unsigned long long)ip->i_no_addr,
283 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
284 		  (unsigned int)ip->i_diskflags,
285 		  (unsigned long long)i_size_read(&ip->i_inode));
286 	return 0;
287 }
288 
289 /**
290  * rgrp_go_lock - operation done after an rgrp lock is locked by
291  *    a first holder on this node.
292  * @gl: the glock
293  * @flags:
294  *
295  * Returns: errno
296  */
297 
rgrp_go_lock(struct gfs2_holder * gh)298 static int rgrp_go_lock(struct gfs2_holder *gh)
299 {
300 	return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
301 }
302 
303 /**
304  * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
305  *    a last holder on this node.
306  * @gl: the glock
307  * @flags:
308  *
309  */
310 
rgrp_go_unlock(struct gfs2_holder * gh)311 static void rgrp_go_unlock(struct gfs2_holder *gh)
312 {
313 	gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
314 }
315 
316 /**
317  * trans_go_sync - promote/demote the transaction glock
318  * @gl: the glock
319  * @state: the requested state
320  * @flags:
321  *
322  */
323 
trans_go_sync(struct gfs2_glock * gl)324 static void trans_go_sync(struct gfs2_glock *gl)
325 {
326 	struct gfs2_sbd *sdp = gl->gl_sbd;
327 
328 	if (gl->gl_state != LM_ST_UNLOCKED &&
329 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
330 		gfs2_meta_syncfs(sdp);
331 		gfs2_log_shutdown(sdp);
332 	}
333 }
334 
335 /**
336  * trans_go_xmote_bh - After promoting/demoting the transaction glock
337  * @gl: the glock
338  *
339  */
340 
trans_go_xmote_bh(struct gfs2_glock * gl,struct gfs2_holder * gh)341 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
342 {
343 	struct gfs2_sbd *sdp = gl->gl_sbd;
344 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
345 	struct gfs2_glock *j_gl = ip->i_gl;
346 	struct gfs2_log_header_host head;
347 	int error;
348 
349 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
350 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
351 
352 		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
353 		if (error)
354 			gfs2_consist(sdp);
355 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
356 			gfs2_consist(sdp);
357 
358 		/*  Initialize some head of the log stuff  */
359 		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
360 			sdp->sd_log_sequence = head.lh_sequence + 1;
361 			gfs2_log_pointers_init(sdp, head.lh_blkno);
362 		}
363 	}
364 	return 0;
365 }
366 
367 /**
368  * trans_go_demote_ok
369  * @gl: the glock
370  *
371  * Always returns 0
372  */
373 
trans_go_demote_ok(const struct gfs2_glock * gl)374 static int trans_go_demote_ok(const struct gfs2_glock *gl)
375 {
376 	return 0;
377 }
378 
379 /**
380  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
381  * @gl: the glock
382  *
383  * gl_spin lock is held while calling this
384  */
iopen_go_callback(struct gfs2_glock * gl)385 static void iopen_go_callback(struct gfs2_glock *gl)
386 {
387 	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
388 	struct gfs2_sbd *sdp = gl->gl_sbd;
389 
390 	if (sdp->sd_vfs->s_flags & MS_RDONLY)
391 		return;
392 
393 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
394 	    gl->gl_state == LM_ST_SHARED && ip) {
395 		gfs2_glock_hold(gl);
396 		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
397 			gfs2_glock_put_nolock(gl);
398 	}
399 }
400 
401 const struct gfs2_glock_operations gfs2_meta_glops = {
402 	.go_type = LM_TYPE_META,
403 };
404 
405 const struct gfs2_glock_operations gfs2_inode_glops = {
406 	.go_xmote_th = inode_go_sync,
407 	.go_inval = inode_go_inval,
408 	.go_demote_ok = inode_go_demote_ok,
409 	.go_lock = inode_go_lock,
410 	.go_dump = inode_go_dump,
411 	.go_type = LM_TYPE_INODE,
412 	.go_min_hold_time = HZ / 5,
413 	.go_flags = GLOF_ASPACE,
414 };
415 
416 const struct gfs2_glock_operations gfs2_rgrp_glops = {
417 	.go_xmote_th = rgrp_go_sync,
418 	.go_inval = rgrp_go_inval,
419 	.go_lock = rgrp_go_lock,
420 	.go_unlock = rgrp_go_unlock,
421 	.go_dump = gfs2_rgrp_dump,
422 	.go_type = LM_TYPE_RGRP,
423 	.go_min_hold_time = HZ / 5,
424 	.go_flags = GLOF_ASPACE,
425 };
426 
427 const struct gfs2_glock_operations gfs2_trans_glops = {
428 	.go_xmote_th = trans_go_sync,
429 	.go_xmote_bh = trans_go_xmote_bh,
430 	.go_demote_ok = trans_go_demote_ok,
431 	.go_type = LM_TYPE_NONDISK,
432 };
433 
434 const struct gfs2_glock_operations gfs2_iopen_glops = {
435 	.go_type = LM_TYPE_IOPEN,
436 	.go_callback = iopen_go_callback,
437 };
438 
439 const struct gfs2_glock_operations gfs2_flock_glops = {
440 	.go_type = LM_TYPE_FLOCK,
441 };
442 
443 const struct gfs2_glock_operations gfs2_nondisk_glops = {
444 	.go_type = LM_TYPE_NONDISK,
445 };
446 
447 const struct gfs2_glock_operations gfs2_quota_glops = {
448 	.go_type = LM_TYPE_QUOTA,
449 };
450 
451 const struct gfs2_glock_operations gfs2_journal_glops = {
452 	.go_type = LM_TYPE_JOURNAL,
453 };
454 
455 const struct gfs2_glock_operations *gfs2_glops_list[] = {
456 	[LM_TYPE_META] = &gfs2_meta_glops,
457 	[LM_TYPE_INODE] = &gfs2_inode_glops,
458 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
459 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
460 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
461 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
462 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
463 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
464 };
465 
466