1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #ifndef __GLOCK_DOT_H__
8 #define __GLOCK_DOT_H__
9
10 #include <linux/sched.h>
11 #include <linux/parser.h>
12 #include "incore.h"
13 #include "util.h"
14
15 /* Options for hostdata parser */
16
17 enum {
18 Opt_jid,
19 Opt_id,
20 Opt_first,
21 Opt_nodir,
22 Opt_err,
23 };
24
25 /*
26 * lm_lockname types
27 */
28
29 #define LM_TYPE_RESERVED 0x00
30 #define LM_TYPE_NONDISK 0x01
31 #define LM_TYPE_INODE 0x02
32 #define LM_TYPE_RGRP 0x03
33 #define LM_TYPE_META 0x04
34 #define LM_TYPE_IOPEN 0x05
35 #define LM_TYPE_FLOCK 0x06
36 #define LM_TYPE_PLOCK 0x07
37 #define LM_TYPE_QUOTA 0x08
38 #define LM_TYPE_JOURNAL 0x09
39
40 /*
41 * lm_lock() states
42 *
43 * SHARED is compatible with SHARED, not with DEFERRED or EX.
44 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
45 */
46
47 #define LM_ST_UNLOCKED 0
48 #define LM_ST_EXCLUSIVE 1
49 #define LM_ST_DEFERRED 2
50 #define LM_ST_SHARED 3
51
52 /*
53 * lm_lock() flags
54 *
55 * LM_FLAG_TRY
56 * Don't wait to acquire the lock if it can't be granted immediately.
57 *
58 * LM_FLAG_TRY_1CB
59 * Send one blocking callback if TRY is set and the lock is not granted.
60 *
61 * LM_FLAG_NOEXP
62 * GFS sets this flag on lock requests it makes while doing journal recovery.
63 * These special requests should not be blocked due to the recovery like
64 * ordinary locks would be.
65 *
66 * LM_FLAG_ANY
67 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
68 * also be granted in SHARED. The preferred state is whichever is compatible
69 * with other granted locks, or the specified state if no other locks exist.
70 *
71 * LM_FLAG_PRIORITY
72 * Override fairness considerations. Suppose a lock is held in a shared state
73 * and there is a pending request for the deferred state. A shared lock
74 * request with the priority flag would be allowed to bypass the deferred
75 * request and directly join the other shared lock. A shared lock request
76 * without the priority flag might be forced to wait until the deferred
77 * requested had acquired and released the lock.
78 *
79 * LM_FLAG_NODE_SCOPE
80 * This holder agrees to share the lock within this node. In other words,
81 * the glock is held in EX mode according to DLM, but local holders on the
82 * same node can share it.
83 */
84
85 #define LM_FLAG_TRY 0x0001
86 #define LM_FLAG_TRY_1CB 0x0002
87 #define LM_FLAG_NOEXP 0x0004
88 #define LM_FLAG_ANY 0x0008
89 #define LM_FLAG_PRIORITY 0x0010
90 #define LM_FLAG_NODE_SCOPE 0x0020
91 #define GL_ASYNC 0x0040
92 #define GL_EXACT 0x0080
93 #define GL_SKIP 0x0100
94 #define GL_NOPID 0x0200
95 #define GL_NOCACHE 0x0400
96
97 /*
98 * lm_async_cb return flags
99 *
100 * LM_OUT_ST_MASK
101 * Masks the lower two bits of lock state in the returned value.
102 *
103 * LM_OUT_CANCELED
104 * The lock request was canceled.
105 *
106 */
107
108 #define LM_OUT_ST_MASK 0x00000003
109 #define LM_OUT_CANCELED 0x00000008
110 #define LM_OUT_ERROR 0x00000004
111
112 /*
113 * lm_recovery_done() messages
114 */
115
116 #define LM_RD_GAVEUP 308
117 #define LM_RD_SUCCESS 309
118
119 #define GLR_TRYFAILED 13
120
121 #define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
122 #define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
123 #define GL_GLOCK_MIN_HOLD (long)(10)
124 #define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
125 #define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
126
127 struct lm_lockops {
128 const char *lm_proto_name;
129 int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
130 void (*lm_first_done) (struct gfs2_sbd *sdp);
131 void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
132 unsigned int result);
133 void (*lm_unmount) (struct gfs2_sbd *sdp);
134 void (*lm_withdraw) (struct gfs2_sbd *sdp);
135 void (*lm_put_lock) (struct gfs2_glock *gl);
136 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
137 unsigned int flags);
138 void (*lm_cancel) (struct gfs2_glock *gl);
139 const match_table_t *lm_tokens;
140 };
141
142 struct gfs2_glock_aspace {
143 struct gfs2_glock glock;
144 struct address_space mapping;
145 };
146
147 extern struct workqueue_struct *gfs2_delete_workqueue;
gfs2_glock_is_locked_by_me(struct gfs2_glock * gl)148 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
149 {
150 struct gfs2_holder *gh;
151 struct pid *pid;
152
153 /* Look in glock's list of holders for one with current task as owner */
154 spin_lock(&gl->gl_lockref.lock);
155 pid = task_pid(current);
156 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
157 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
158 break;
159 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
160 continue;
161 if (gh->gh_owner_pid == pid)
162 goto out;
163 }
164 gh = NULL;
165 out:
166 spin_unlock(&gl->gl_lockref.lock);
167
168 return gh;
169 }
170
gfs2_glock_is_held_excl(struct gfs2_glock * gl)171 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
172 {
173 return gl->gl_state == LM_ST_EXCLUSIVE;
174 }
175
gfs2_glock_is_held_dfrd(struct gfs2_glock * gl)176 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
177 {
178 return gl->gl_state == LM_ST_DEFERRED;
179 }
180
gfs2_glock_is_held_shrd(struct gfs2_glock * gl)181 static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
182 {
183 return gl->gl_state == LM_ST_SHARED;
184 }
185
gfs2_glock2aspace(struct gfs2_glock * gl)186 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
187 {
188 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
189 struct gfs2_glock_aspace *gla =
190 container_of(gl, struct gfs2_glock_aspace, glock);
191 return &gla->mapping;
192 }
193 return NULL;
194 }
195
196 extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
197 const struct gfs2_glock_operations *glops,
198 int create, struct gfs2_glock **glp);
199 extern void gfs2_glock_hold(struct gfs2_glock *gl);
200 extern void gfs2_glock_put(struct gfs2_glock *gl);
201 extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
202
203 extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
204 u16 flags, struct gfs2_holder *gh,
205 unsigned long ip);
gfs2_holder_init(struct gfs2_glock * gl,unsigned int state,u16 flags,struct gfs2_holder * gh)206 static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
207 u16 flags, struct gfs2_holder *gh) {
208 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
209 }
210
211 extern void gfs2_holder_reinit(unsigned int state, u16 flags,
212 struct gfs2_holder *gh);
213 extern void gfs2_holder_uninit(struct gfs2_holder *gh);
214 extern int gfs2_glock_nq(struct gfs2_holder *gh);
215 extern int gfs2_glock_poll(struct gfs2_holder *gh);
216 extern int gfs2_instantiate(struct gfs2_holder *gh);
217 extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
218 extern int gfs2_glock_wait(struct gfs2_holder *gh);
219 extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
220 extern void gfs2_glock_dq(struct gfs2_holder *gh);
221 extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
222 extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
223 extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
224 const struct gfs2_glock_operations *glops,
225 unsigned int state, u16 flags,
226 struct gfs2_holder *gh);
227 extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
228 extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
229 extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
230 bool fsid);
231 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
232 gfs2_dump_glock(NULL, gl, true); \
233 BUG(); } } while(0)
234 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
235 gfs2_dump_glock(NULL, gl, true); \
236 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
237 while (0)
238 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
239 gfs2_dump_glock(NULL, gl, true); \
240 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
241 while (0)
242
243 extern __printf(2, 3)
244 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
245
246 /**
247 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
248 * @gl: the glock
249 * @state: the state we're requesting
250 * @flags: the modifier flags
251 * @gh: the holder structure
252 *
253 * Returns: 0, GLR_*, or errno
254 */
255
gfs2_glock_nq_init(struct gfs2_glock * gl,unsigned int state,u16 flags,struct gfs2_holder * gh)256 static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
257 unsigned int state, u16 flags,
258 struct gfs2_holder *gh)
259 {
260 int error;
261
262 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
263
264 error = gfs2_glock_nq(gh);
265 if (error)
266 gfs2_holder_uninit(gh);
267
268 return error;
269 }
270
271 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
272 extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
273 extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
274 extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
275 extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
276 extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
277 extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
278 extern void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
279 extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
280 extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
281 extern void gfs2_glock_free(struct gfs2_glock *gl);
282
283 extern int __init gfs2_glock_init(void);
284 extern void gfs2_glock_exit(void);
285
286 extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
287 extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
288 extern void gfs2_register_debugfs(void);
289 extern void gfs2_unregister_debugfs(void);
290
291 extern const struct lm_lockops gfs2_dlm_ops;
292
gfs2_holder_mark_uninitialized(struct gfs2_holder * gh)293 static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
294 {
295 gh->gh_gl = NULL;
296 }
297
gfs2_holder_initialized(struct gfs2_holder * gh)298 static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
299 {
300 return gh->gh_gl;
301 }
302
gfs2_holder_queued(struct gfs2_holder * gh)303 static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
304 {
305 return !list_empty(&gh->gh_list);
306 }
307
308 /**
309 * glock_set_object - set the gl_object field of a glock
310 * @gl: the glock
311 * @object: the object
312 */
glock_set_object(struct gfs2_glock * gl,void * object)313 static inline void glock_set_object(struct gfs2_glock *gl, void *object)
314 {
315 spin_lock(&gl->gl_lockref.lock);
316 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
317 gfs2_dump_glock(NULL, gl, true);
318 gl->gl_object = object;
319 spin_unlock(&gl->gl_lockref.lock);
320 }
321
322 /**
323 * glock_clear_object - clear the gl_object field of a glock
324 * @gl: the glock
325 * @object: the object
326 *
327 * I'd love to similarly add this:
328 * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
329 * gfs2_dump_glock(NULL, gl, true);
330 * Unfortunately, that's not possible because as soon as gfs2_delete_inode
331 * frees the block in the rgrp, another process can reassign it for an I_NEW
332 * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
333 * That means gfs2_delete_inode may subsequently try to call this function
334 * for a glock that's already pointing to a brand new inode. If we clear the
335 * new inode's gl_object, we'll introduce metadata corruption. Function
336 * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
337 * tries to clear gl_object, so it's more than just gfs2_delete_inode.
338 *
339 */
glock_clear_object(struct gfs2_glock * gl,void * object)340 static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
341 {
342 spin_lock(&gl->gl_lockref.lock);
343 if (gl->gl_object == object)
344 gl->gl_object = NULL;
345 spin_unlock(&gl->gl_lockref.lock);
346 }
347
gfs2_holder_allow_demote(struct gfs2_holder * gh)348 static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh)
349 {
350 struct gfs2_glock *gl = gh->gh_gl;
351
352 spin_lock(&gl->gl_lockref.lock);
353 set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
354 spin_unlock(&gl->gl_lockref.lock);
355 }
356
gfs2_holder_disallow_demote(struct gfs2_holder * gh)357 static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh)
358 {
359 struct gfs2_glock *gl = gh->gh_gl;
360
361 spin_lock(&gl->gl_lockref.lock);
362 clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
363 spin_unlock(&gl->gl_lockref.lock);
364 }
365
366 extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
367 extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
368
369 #endif /* __GLOCK_DOT_H__ */
370