1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29 
30 struct workqueue_struct *gfs2_freeze_wq;
31 
32 extern struct workqueue_struct *gfs2_control_wq;
33 
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37 
38 	fs_err(sdp,
39 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 	       "state 0x%lx\n",
41 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 	       bh->b_folio->mapping, bh->b_folio->flags);
43 	fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
45 	       gfs2_glock2aspace(gl));
46 	gfs2_lm(sdp, "AIL error\n");
47 	gfs2_withdraw_delayed(sdp);
48 }
49 
50 /**
51  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52  * @gl: the glock
53  * @fsync: set when called from fsync (not all buffers will be clean)
54  * @nr_revokes: Number of buffers to revoke
55  *
56  * None of the buffers should be dirty, locked, or pinned.
57  */
58 
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 			     unsigned int nr_revokes)
61 {
62 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 	struct list_head *head = &gl->gl_ail_list;
64 	struct gfs2_bufdata *bd, *tmp;
65 	struct buffer_head *bh;
66 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67 
68 	gfs2_log_lock(sdp);
69 	spin_lock(&sdp->sd_ail_lock);
70 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 		if (nr_revokes == 0)
72 			break;
73 		bh = bd->bd_bh;
74 		if (bh->b_state & b_state) {
75 			if (fsync)
76 				continue;
77 			gfs2_ail_error(gl, bh);
78 		}
79 		gfs2_trans_add_revoke(sdp, bd);
80 		nr_revokes--;
81 	}
82 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 	spin_unlock(&sdp->sd_ail_lock);
84 	gfs2_log_unlock(sdp);
85 }
86 
87 
gfs2_ail_empty_gl(struct gfs2_glock * gl)88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
89 {
90 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
91 	struct gfs2_trans tr;
92 	unsigned int revokes;
93 	int ret = 0;
94 
95 	revokes = atomic_read(&gl->gl_ail_count);
96 
97 	if (!revokes) {
98 		bool have_revokes;
99 		bool log_in_flight;
100 
101 		/*
102 		 * We have nothing on the ail, but there could be revokes on
103 		 * the sdp revoke queue, in which case, we still want to flush
104 		 * the log and wait for it to finish.
105 		 *
106 		 * If the sdp revoke list is empty too, we might still have an
107 		 * io outstanding for writing revokes, so we should wait for
108 		 * it before returning.
109 		 *
110 		 * If none of these conditions are true, our revokes are all
111 		 * flushed and we can return.
112 		 */
113 		gfs2_log_lock(sdp);
114 		have_revokes = !list_empty(&sdp->sd_log_revokes);
115 		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 		gfs2_log_unlock(sdp);
117 		if (have_revokes)
118 			goto flush;
119 		if (log_in_flight)
120 			log_flush_wait(sdp);
121 		return 0;
122 	}
123 
124 	memset(&tr, 0, sizeof(tr));
125 	set_bit(TR_ONSTACK, &tr.tr_flags);
126 	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
127 	if (ret) {
128 		fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
129 		goto flush;
130 	}
131 	__gfs2_ail_flush(gl, 0, revokes);
132 	gfs2_trans_end(sdp);
133 
134 flush:
135 	if (!ret)
136 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
137 				GFS2_LFC_AIL_EMPTY_GL);
138 	return ret;
139 }
140 
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)141 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
142 {
143 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
144 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
145 	int ret;
146 
147 	if (!revokes)
148 		return;
149 
150 	ret = gfs2_trans_begin(sdp, 0, revokes);
151 	if (ret)
152 		return;
153 	__gfs2_ail_flush(gl, fsync, revokes);
154 	gfs2_trans_end(sdp);
155 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
156 		       GFS2_LFC_AIL_FLUSH);
157 }
158 
159 /**
160  * gfs2_rgrp_metasync - sync out the metadata of a resource group
161  * @gl: the glock protecting the resource group
162  *
163  */
164 
gfs2_rgrp_metasync(struct gfs2_glock * gl)165 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
166 {
167 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
168 	struct address_space *metamapping = &sdp->sd_aspace;
169 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
170 	const unsigned bsize = sdp->sd_sb.sb_bsize;
171 	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
172 	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
173 	int error;
174 
175 	filemap_fdatawrite_range(metamapping, start, end);
176 	error = filemap_fdatawait_range(metamapping, start, end);
177 	WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
178 	mapping_set_error(metamapping, error);
179 	if (error)
180 		gfs2_io_error(sdp);
181 	return error;
182 }
183 
184 /**
185  * rgrp_go_sync - sync out the metadata for this glock
186  * @gl: the glock
187  *
188  * Called when demoting or unlocking an EX glock.  We must flush
189  * to disk all dirty buffers/pages relating to this glock, and must not
190  * return to caller to demote/unlock the glock until I/O is complete.
191  */
192 
rgrp_go_sync(struct gfs2_glock * gl)193 static int rgrp_go_sync(struct gfs2_glock *gl)
194 {
195 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
196 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
197 	int error;
198 
199 	if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
200 		return 0;
201 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
202 
203 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
204 		       GFS2_LFC_RGRP_GO_SYNC);
205 	error = gfs2_rgrp_metasync(gl);
206 	if (!error)
207 		error = gfs2_ail_empty_gl(gl);
208 	gfs2_free_clones(rgd);
209 	return error;
210 }
211 
212 /**
213  * rgrp_go_inval - invalidate the metadata for this glock
214  * @gl: the glock
215  * @flags:
216  *
217  * We never used LM_ST_DEFERRED with resource groups, so that we
218  * should always see the metadata flag set here.
219  *
220  */
221 
rgrp_go_inval(struct gfs2_glock * gl,int flags)222 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
223 {
224 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
225 	struct address_space *mapping = &sdp->sd_aspace;
226 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
227 	const unsigned bsize = sdp->sd_sb.sb_bsize;
228 	loff_t start, end;
229 
230 	if (!rgd)
231 		return;
232 	start = (rgd->rd_addr * bsize) & PAGE_MASK;
233 	end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
234 	gfs2_rgrp_brelse(rgd);
235 	WARN_ON_ONCE(!(flags & DIO_METADATA));
236 	truncate_inode_pages_range(mapping, start, end);
237 }
238 
gfs2_rgrp_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)239 static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
240 			      const char *fs_id_buf)
241 {
242 	struct gfs2_rgrpd *rgd = gl->gl_object;
243 
244 	if (rgd)
245 		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
246 }
247 
gfs2_glock2inode(struct gfs2_glock * gl)248 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
249 {
250 	struct gfs2_inode *ip;
251 
252 	spin_lock(&gl->gl_lockref.lock);
253 	ip = gl->gl_object;
254 	if (ip)
255 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
256 	spin_unlock(&gl->gl_lockref.lock);
257 	return ip;
258 }
259 
gfs2_glock2rgrp(struct gfs2_glock * gl)260 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
261 {
262 	struct gfs2_rgrpd *rgd;
263 
264 	spin_lock(&gl->gl_lockref.lock);
265 	rgd = gl->gl_object;
266 	spin_unlock(&gl->gl_lockref.lock);
267 
268 	return rgd;
269 }
270 
gfs2_clear_glop_pending(struct gfs2_inode * ip)271 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
272 {
273 	if (!ip)
274 		return;
275 
276 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
277 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
278 }
279 
280 /**
281  * gfs2_inode_metasync - sync out the metadata of an inode
282  * @gl: the glock protecting the inode
283  *
284  */
gfs2_inode_metasync(struct gfs2_glock * gl)285 int gfs2_inode_metasync(struct gfs2_glock *gl)
286 {
287 	struct address_space *metamapping = gfs2_glock2aspace(gl);
288 	int error;
289 
290 	filemap_fdatawrite(metamapping);
291 	error = filemap_fdatawait(metamapping);
292 	if (error)
293 		gfs2_io_error(gl->gl_name.ln_sbd);
294 	return error;
295 }
296 
297 /**
298  * inode_go_sync - Sync the dirty metadata of an inode
299  * @gl: the glock protecting the inode
300  *
301  */
302 
inode_go_sync(struct gfs2_glock * gl)303 static int inode_go_sync(struct gfs2_glock *gl)
304 {
305 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
306 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
307 	struct address_space *metamapping = gfs2_glock2aspace(gl);
308 	int error = 0, ret;
309 
310 	if (isreg) {
311 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
312 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
313 		inode_dio_wait(&ip->i_inode);
314 	}
315 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
316 		goto out;
317 
318 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
319 
320 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
321 		       GFS2_LFC_INODE_GO_SYNC);
322 	filemap_fdatawrite(metamapping);
323 	if (isreg) {
324 		struct address_space *mapping = ip->i_inode.i_mapping;
325 		filemap_fdatawrite(mapping);
326 		error = filemap_fdatawait(mapping);
327 		mapping_set_error(mapping, error);
328 	}
329 	ret = gfs2_inode_metasync(gl);
330 	if (!error)
331 		error = ret;
332 	ret = gfs2_ail_empty_gl(gl);
333 	if (!error)
334 		error = ret;
335 	/*
336 	 * Writeback of the data mapping may cause the dirty flag to be set
337 	 * so we have to clear it again here.
338 	 */
339 	smp_mb__before_atomic();
340 	clear_bit(GLF_DIRTY, &gl->gl_flags);
341 
342 out:
343 	gfs2_clear_glop_pending(ip);
344 	return error;
345 }
346 
347 /**
348  * inode_go_inval - prepare a inode glock to be released
349  * @gl: the glock
350  * @flags:
351  *
352  * Normally we invalidate everything, but if we are moving into
353  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
354  * can keep hold of the metadata, since it won't have changed.
355  *
356  */
357 
inode_go_inval(struct gfs2_glock * gl,int flags)358 static void inode_go_inval(struct gfs2_glock *gl, int flags)
359 {
360 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
361 
362 	if (flags & DIO_METADATA) {
363 		struct address_space *mapping = gfs2_glock2aspace(gl);
364 		truncate_inode_pages(mapping, 0);
365 		if (ip) {
366 			set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
367 			forget_all_cached_acls(&ip->i_inode);
368 			security_inode_invalidate_secctx(&ip->i_inode);
369 			gfs2_dir_hash_inval(ip);
370 		}
371 	}
372 
373 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
374 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
375 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
376 			       GFS2_LFC_INODE_GO_INVAL);
377 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
378 	}
379 	if (ip && S_ISREG(ip->i_inode.i_mode))
380 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
381 
382 	gfs2_clear_glop_pending(ip);
383 }
384 
385 /**
386  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
387  * @gl: the glock
388  *
389  * Returns: 1 if it's ok
390  */
391 
inode_go_demote_ok(const struct gfs2_glock * gl)392 static int inode_go_demote_ok(const struct gfs2_glock *gl)
393 {
394 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
395 
396 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
397 		return 0;
398 
399 	return 1;
400 }
401 
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)402 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
403 {
404 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
405 	const struct gfs2_dinode *str = buf;
406 	struct timespec64 atime;
407 	u16 height, depth;
408 	umode_t mode = be32_to_cpu(str->di_mode);
409 	struct inode *inode = &ip->i_inode;
410 	bool is_new = inode->i_state & I_NEW;
411 
412 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
413 		goto corrupt;
414 	if (unlikely(!is_new && inode_wrong_type(inode, mode)))
415 		goto corrupt;
416 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
417 	inode->i_mode = mode;
418 	if (is_new) {
419 		inode->i_rdev = 0;
420 		switch (mode & S_IFMT) {
421 		case S_IFBLK:
422 		case S_IFCHR:
423 			inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
424 					      be32_to_cpu(str->di_minor));
425 			break;
426 		}
427 	}
428 
429 	i_uid_write(inode, be32_to_cpu(str->di_uid));
430 	i_gid_write(inode, be32_to_cpu(str->di_gid));
431 	set_nlink(inode, be32_to_cpu(str->di_nlink));
432 	i_size_write(inode, be64_to_cpu(str->di_size));
433 	gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
434 	atime.tv_sec = be64_to_cpu(str->di_atime);
435 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
436 	if (timespec64_compare(&inode->i_atime, &atime) < 0)
437 		inode->i_atime = atime;
438 	inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
439 	inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
440 	inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
441 			be32_to_cpu(str->di_ctime_nsec));
442 
443 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
444 	ip->i_generation = be64_to_cpu(str->di_generation);
445 
446 	ip->i_diskflags = be32_to_cpu(str->di_flags);
447 	ip->i_eattr = be64_to_cpu(str->di_eattr);
448 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
449 	gfs2_set_inode_flags(inode);
450 	height = be16_to_cpu(str->di_height);
451 	if (unlikely(height > sdp->sd_max_height))
452 		goto corrupt;
453 	ip->i_height = (u8)height;
454 
455 	depth = be16_to_cpu(str->di_depth);
456 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
457 		goto corrupt;
458 	ip->i_depth = (u8)depth;
459 	ip->i_entries = be32_to_cpu(str->di_entries);
460 
461 	if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
462 		goto corrupt;
463 
464 	if (S_ISREG(inode->i_mode))
465 		gfs2_set_aops(inode);
466 
467 	return 0;
468 corrupt:
469 	gfs2_consist_inode(ip);
470 	return -EIO;
471 }
472 
473 /**
474  * gfs2_inode_refresh - Refresh the incore copy of the dinode
475  * @ip: The GFS2 inode
476  *
477  * Returns: errno
478  */
479 
gfs2_inode_refresh(struct gfs2_inode * ip)480 int gfs2_inode_refresh(struct gfs2_inode *ip)
481 {
482 	struct buffer_head *dibh;
483 	int error;
484 
485 	error = gfs2_meta_inode_buffer(ip, &dibh);
486 	if (error)
487 		return error;
488 
489 	error = gfs2_dinode_in(ip, dibh->b_data);
490 	brelse(dibh);
491 	return error;
492 }
493 
494 /**
495  * inode_go_instantiate - read in an inode if necessary
496  * @gh: The glock holder
497  *
498  * Returns: errno
499  */
500 
inode_go_instantiate(struct gfs2_glock * gl)501 static int inode_go_instantiate(struct gfs2_glock *gl)
502 {
503 	struct gfs2_inode *ip = gl->gl_object;
504 
505 	if (!ip) /* no inode to populate - read it in later */
506 		return 0;
507 
508 	return gfs2_inode_refresh(ip);
509 }
510 
inode_go_held(struct gfs2_holder * gh)511 static int inode_go_held(struct gfs2_holder *gh)
512 {
513 	struct gfs2_glock *gl = gh->gh_gl;
514 	struct gfs2_inode *ip = gl->gl_object;
515 	int error = 0;
516 
517 	if (!ip) /* no inode to populate - read it in later */
518 		return 0;
519 
520 	if (gh->gh_state != LM_ST_DEFERRED)
521 		inode_dio_wait(&ip->i_inode);
522 
523 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
524 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
525 	    (gh->gh_state == LM_ST_EXCLUSIVE))
526 		error = gfs2_truncatei_resume(ip);
527 
528 	return error;
529 }
530 
531 /**
532  * inode_go_dump - print information about an inode
533  * @seq: The iterator
534  * @gl: The glock
535  * @fs_id_buf: file system id (may be empty)
536  *
537  */
538 
inode_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)539 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
540 			  const char *fs_id_buf)
541 {
542 	struct gfs2_inode *ip = gl->gl_object;
543 	const struct inode *inode = &ip->i_inode;
544 
545 	if (ip == NULL)
546 		return;
547 
548 	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
549 		       "p:%lu\n", fs_id_buf,
550 		  (unsigned long long)ip->i_no_formal_ino,
551 		  (unsigned long long)ip->i_no_addr,
552 		  IF2DT(inode->i_mode), ip->i_flags,
553 		  (unsigned int)ip->i_diskflags,
554 		  (unsigned long long)i_size_read(inode),
555 		  inode->i_data.nrpages);
556 }
557 
558 /**
559  * freeze_go_callback - A cluster node is requesting a freeze
560  * @gl: the glock
561  * @remote: true if this came from a different cluster node
562  */
563 
freeze_go_callback(struct gfs2_glock * gl,bool remote)564 static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
565 {
566 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
567 	struct super_block *sb = sdp->sd_vfs;
568 
569 	if (!remote ||
570 	    (gl->gl_state != LM_ST_SHARED &&
571 	     gl->gl_state != LM_ST_UNLOCKED) ||
572 	    gl->gl_demote_state != LM_ST_UNLOCKED)
573 		return;
574 
575 	/*
576 	 * Try to get an active super block reference to prevent racing with
577 	 * unmount (see super_trylock_shared()).  But note that unmount isn't
578 	 * the only place where a write lock on s_umount is taken, and we can
579 	 * fail here because of things like remount as well.
580 	 */
581 	if (down_read_trylock(&sb->s_umount)) {
582 		atomic_inc(&sb->s_active);
583 		up_read(&sb->s_umount);
584 		if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
585 			deactivate_super(sb);
586 	}
587 }
588 
589 /**
590  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
591  * @gl: the glock
592  */
freeze_go_xmote_bh(struct gfs2_glock * gl)593 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
594 {
595 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
596 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
597 	struct gfs2_glock *j_gl = ip->i_gl;
598 	struct gfs2_log_header_host head;
599 	int error;
600 
601 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
602 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
603 
604 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
605 		if (gfs2_assert_withdraw_delayed(sdp, !error))
606 			return error;
607 		if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
608 						 GFS2_LOG_HEAD_UNMOUNT))
609 			return -EIO;
610 		sdp->sd_log_sequence = head.lh_sequence + 1;
611 		gfs2_log_pointers_init(sdp, head.lh_blkno);
612 	}
613 	return 0;
614 }
615 
616 /**
617  * freeze_go_demote_ok
618  * @gl: the glock
619  *
620  * Always returns 0
621  */
622 
freeze_go_demote_ok(const struct gfs2_glock * gl)623 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
624 {
625 	return 0;
626 }
627 
628 /**
629  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
630  * @gl: the glock
631  * @remote: true if this came from a different cluster node
632  *
633  * gl_lockref.lock lock is held while calling this
634  */
iopen_go_callback(struct gfs2_glock * gl,bool remote)635 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
636 {
637 	struct gfs2_inode *ip = gl->gl_object;
638 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
639 
640 	if (!remote || sb_rdonly(sdp->sd_vfs) ||
641 	    test_bit(SDF_KILL, &sdp->sd_flags))
642 		return;
643 
644 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
645 	    gl->gl_state == LM_ST_SHARED && ip) {
646 		gl->gl_lockref.count++;
647 		if (!gfs2_queue_try_to_evict(gl))
648 			gl->gl_lockref.count--;
649 	}
650 }
651 
652 /**
653  * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
654  * @gl: glock being freed
655  *
656  * For now, this is only used for the journal inode glock. In withdraw
657  * situations, we need to wait for the glock to be freed so that we know
658  * other nodes may proceed with recovery / journal replay.
659  */
inode_go_free(struct gfs2_glock * gl)660 static void inode_go_free(struct gfs2_glock *gl)
661 {
662 	/* Note that we cannot reference gl_object because it's already set
663 	 * to NULL by this point in its lifecycle. */
664 	if (!test_bit(GLF_FREEING, &gl->gl_flags))
665 		return;
666 	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
667 	wake_up_bit(&gl->gl_flags, GLF_FREEING);
668 }
669 
670 /**
671  * nondisk_go_callback - used to signal when a node did a withdraw
672  * @gl: the nondisk glock
673  * @remote: true if this came from a different cluster node
674  *
675  */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)676 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
677 {
678 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
679 
680 	/* Ignore the callback unless it's from another node, and it's the
681 	   live lock. */
682 	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
683 		return;
684 
685 	/* First order of business is to cancel the demote request. We don't
686 	 * really want to demote a nondisk glock. At best it's just to inform
687 	 * us of another node's withdraw. We'll keep it in SH mode. */
688 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
689 	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
690 
691 	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
692 	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
693 	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
694 	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
695 		return;
696 
697 	/* We only care when a node wants us to unlock, because that means
698 	 * they want a journal recovered. */
699 	if (gl->gl_demote_state != LM_ST_UNLOCKED)
700 		return;
701 
702 	if (sdp->sd_args.ar_spectator) {
703 		fs_warn(sdp, "Spectator node cannot recover journals.\n");
704 		return;
705 	}
706 
707 	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
708 	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
709 	/*
710 	 * We can't call remote_withdraw directly here or gfs2_recover_journal
711 	 * because this is called from the glock unlock function and the
712 	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
713 	 * we were called from. So we queue it to the control work queue in
714 	 * lock_dlm.
715 	 */
716 	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
717 }
718 
719 const struct gfs2_glock_operations gfs2_meta_glops = {
720 	.go_type = LM_TYPE_META,
721 	.go_flags = GLOF_NONDISK,
722 };
723 
724 const struct gfs2_glock_operations gfs2_inode_glops = {
725 	.go_sync = inode_go_sync,
726 	.go_inval = inode_go_inval,
727 	.go_demote_ok = inode_go_demote_ok,
728 	.go_instantiate = inode_go_instantiate,
729 	.go_held = inode_go_held,
730 	.go_dump = inode_go_dump,
731 	.go_type = LM_TYPE_INODE,
732 	.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
733 	.go_free = inode_go_free,
734 };
735 
736 const struct gfs2_glock_operations gfs2_rgrp_glops = {
737 	.go_sync = rgrp_go_sync,
738 	.go_inval = rgrp_go_inval,
739 	.go_instantiate = gfs2_rgrp_go_instantiate,
740 	.go_dump = gfs2_rgrp_go_dump,
741 	.go_type = LM_TYPE_RGRP,
742 	.go_flags = GLOF_LVB,
743 };
744 
745 const struct gfs2_glock_operations gfs2_freeze_glops = {
746 	.go_xmote_bh = freeze_go_xmote_bh,
747 	.go_demote_ok = freeze_go_demote_ok,
748 	.go_callback = freeze_go_callback,
749 	.go_type = LM_TYPE_NONDISK,
750 	.go_flags = GLOF_NONDISK,
751 };
752 
753 const struct gfs2_glock_operations gfs2_iopen_glops = {
754 	.go_type = LM_TYPE_IOPEN,
755 	.go_callback = iopen_go_callback,
756 	.go_dump = inode_go_dump,
757 	.go_flags = GLOF_LRU | GLOF_NONDISK,
758 	.go_subclass = 1,
759 };
760 
761 const struct gfs2_glock_operations gfs2_flock_glops = {
762 	.go_type = LM_TYPE_FLOCK,
763 	.go_flags = GLOF_LRU | GLOF_NONDISK,
764 };
765 
766 const struct gfs2_glock_operations gfs2_nondisk_glops = {
767 	.go_type = LM_TYPE_NONDISK,
768 	.go_flags = GLOF_NONDISK,
769 	.go_callback = nondisk_go_callback,
770 };
771 
772 const struct gfs2_glock_operations gfs2_quota_glops = {
773 	.go_type = LM_TYPE_QUOTA,
774 	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
775 };
776 
777 const struct gfs2_glock_operations gfs2_journal_glops = {
778 	.go_type = LM_TYPE_JOURNAL,
779 	.go_flags = GLOF_NONDISK,
780 };
781 
782 const struct gfs2_glock_operations *gfs2_glops_list[] = {
783 	[LM_TYPE_META] = &gfs2_meta_glops,
784 	[LM_TYPE_INODE] = &gfs2_inode_glops,
785 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
786 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
787 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
788 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
789 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
790 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
791 };
792 
793