1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM gfs2
3
4 #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_GFS2_H
6
7 #include <linux/tracepoint.h>
8
9 #include <linux/fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/dlmconstants.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/writeback.h>
14 #include <linux/ktime.h>
15 #include "incore.h"
16 #include "glock.h"
17
18 #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
19 #define glock_trace_name(x) __print_symbolic(x, \
20 dlm_state_name(IV), \
21 dlm_state_name(NL), \
22 dlm_state_name(CR), \
23 dlm_state_name(CW), \
24 dlm_state_name(PR), \
25 dlm_state_name(PW), \
26 dlm_state_name(EX))
27
28 #define block_state_name(x) __print_symbolic(x, \
29 { GFS2_BLKST_FREE, "free" }, \
30 { GFS2_BLKST_USED, "used" }, \
31 { GFS2_BLKST_DINODE, "dinode" }, \
32 { GFS2_BLKST_UNLINKED, "unlinked" })
33
34 #define show_glock_flags(flags) __print_flags(flags, "", \
35 {(1UL << GLF_LOCK), "l" }, \
36 {(1UL << GLF_DEMOTE), "D" }, \
37 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
38 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
39 {(1UL << GLF_DIRTY), "y" }, \
40 {(1UL << GLF_LFLUSH), "f" }, \
41 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
42 {(1UL << GLF_REPLY_PENDING), "r" }, \
43 {(1UL << GLF_INITIAL), "I" }, \
44 {(1UL << GLF_FROZEN), "F" }, \
45 {(1UL << GLF_QUEUED), "q" }, \
46 {(1UL << GLF_LRU), "L" }, \
47 {(1UL << GLF_OBJECT), "o" }, \
48 {(1UL << GLF_BLOCKING), "b" })
49
50 #ifndef NUMPTY
51 #define NUMPTY
glock_trace_state(unsigned int state)52 static inline u8 glock_trace_state(unsigned int state)
53 {
54 switch(state) {
55 case LM_ST_SHARED:
56 return DLM_LOCK_PR;
57 case LM_ST_DEFERRED:
58 return DLM_LOCK_CW;
59 case LM_ST_EXCLUSIVE:
60 return DLM_LOCK_EX;
61 }
62 return DLM_LOCK_NL;
63 }
64 #endif
65
66 /* Section 1 - Locking
67 *
68 * Objectives:
69 * Latency: Remote demote request to state change
70 * Latency: Local lock request to state change
71 * Latency: State change to lock grant
72 * Correctness: Ordering of local lock state vs. I/O requests
73 * Correctness: Responses to remote demote requests
74 */
75
76 /* General glock state change (DLM lock request completes) */
77 TRACE_EVENT(gfs2_glock_state_change,
78
79 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
80
81 TP_ARGS(gl, new_state),
82
83 TP_STRUCT__entry(
84 __field( dev_t, dev )
85 __field( u64, glnum )
86 __field( u32, gltype )
87 __field( u8, cur_state )
88 __field( u8, new_state )
89 __field( u8, dmt_state )
90 __field( u8, tgt_state )
91 __field( unsigned long, flags )
92 ),
93
94 TP_fast_assign(
95 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
96 __entry->glnum = gl->gl_name.ln_number;
97 __entry->gltype = gl->gl_name.ln_type;
98 __entry->cur_state = glock_trace_state(gl->gl_state);
99 __entry->new_state = glock_trace_state(new_state);
100 __entry->tgt_state = glock_trace_state(gl->gl_target);
101 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
102 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
103 ),
104
105 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
106 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
107 (unsigned long long)__entry->glnum,
108 glock_trace_name(__entry->cur_state),
109 glock_trace_name(__entry->new_state),
110 glock_trace_name(__entry->tgt_state),
111 glock_trace_name(__entry->dmt_state),
112 show_glock_flags(__entry->flags))
113 );
114
115 /* State change -> unlocked, glock is being deallocated */
116 TRACE_EVENT(gfs2_glock_put,
117
118 TP_PROTO(const struct gfs2_glock *gl),
119
120 TP_ARGS(gl),
121
122 TP_STRUCT__entry(
123 __field( dev_t, dev )
124 __field( u64, glnum )
125 __field( u32, gltype )
126 __field( u8, cur_state )
127 __field( unsigned long, flags )
128 ),
129
130 TP_fast_assign(
131 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
132 __entry->gltype = gl->gl_name.ln_type;
133 __entry->glnum = gl->gl_name.ln_number;
134 __entry->cur_state = glock_trace_state(gl->gl_state);
135 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
136 ),
137
138 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
139 MAJOR(__entry->dev), MINOR(__entry->dev),
140 __entry->gltype, (unsigned long long)__entry->glnum,
141 glock_trace_name(__entry->cur_state),
142 glock_trace_name(DLM_LOCK_IV),
143 show_glock_flags(__entry->flags))
144
145 );
146
147 /* Callback (local or remote) requesting lock demotion */
148 TRACE_EVENT(gfs2_demote_rq,
149
150 TP_PROTO(const struct gfs2_glock *gl),
151
152 TP_ARGS(gl),
153
154 TP_STRUCT__entry(
155 __field( dev_t, dev )
156 __field( u64, glnum )
157 __field( u32, gltype )
158 __field( u8, cur_state )
159 __field( u8, dmt_state )
160 __field( unsigned long, flags )
161 ),
162
163 TP_fast_assign(
164 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
165 __entry->gltype = gl->gl_name.ln_type;
166 __entry->glnum = gl->gl_name.ln_number;
167 __entry->cur_state = glock_trace_state(gl->gl_state);
168 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
169 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
170 ),
171
172 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
173 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
174 (unsigned long long)__entry->glnum,
175 glock_trace_name(__entry->cur_state),
176 glock_trace_name(__entry->dmt_state),
177 show_glock_flags(__entry->flags))
178
179 );
180
181 /* Promotion/grant of a glock */
182 TRACE_EVENT(gfs2_promote,
183
184 TP_PROTO(const struct gfs2_holder *gh, int first),
185
186 TP_ARGS(gh, first),
187
188 TP_STRUCT__entry(
189 __field( dev_t, dev )
190 __field( u64, glnum )
191 __field( u32, gltype )
192 __field( int, first )
193 __field( u8, state )
194 ),
195
196 TP_fast_assign(
197 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
198 __entry->glnum = gh->gh_gl->gl_name.ln_number;
199 __entry->gltype = gh->gh_gl->gl_name.ln_type;
200 __entry->first = first;
201 __entry->state = glock_trace_state(gh->gh_state);
202 ),
203
204 TP_printk("%u,%u glock %u:%llu promote %s %s",
205 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
206 (unsigned long long)__entry->glnum,
207 __entry->first ? "first": "other",
208 glock_trace_name(__entry->state))
209 );
210
211 /* Queue/dequeue a lock request */
212 TRACE_EVENT(gfs2_glock_queue,
213
214 TP_PROTO(const struct gfs2_holder *gh, int queue),
215
216 TP_ARGS(gh, queue),
217
218 TP_STRUCT__entry(
219 __field( dev_t, dev )
220 __field( u64, glnum )
221 __field( u32, gltype )
222 __field( int, queue )
223 __field( u8, state )
224 ),
225
226 TP_fast_assign(
227 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
228 __entry->glnum = gh->gh_gl->gl_name.ln_number;
229 __entry->gltype = gh->gh_gl->gl_name.ln_type;
230 __entry->queue = queue;
231 __entry->state = glock_trace_state(gh->gh_state);
232 ),
233
234 TP_printk("%u,%u glock %u:%llu %squeue %s",
235 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
236 (unsigned long long)__entry->glnum,
237 __entry->queue ? "" : "de",
238 glock_trace_name(__entry->state))
239 );
240
241 /* DLM sends a reply to GFS2 */
242 TRACE_EVENT(gfs2_glock_lock_time,
243
244 TP_PROTO(const struct gfs2_glock *gl, s64 tdiff),
245
246 TP_ARGS(gl, tdiff),
247
248 TP_STRUCT__entry(
249 __field( dev_t, dev )
250 __field( u64, glnum )
251 __field( u32, gltype )
252 __field( int, status )
253 __field( char, flags )
254 __field( s64, tdiff )
255 __field( s64, srtt )
256 __field( s64, srttvar )
257 __field( s64, srttb )
258 __field( s64, srttvarb )
259 __field( s64, sirt )
260 __field( s64, sirtvar )
261 __field( s64, dcount )
262 __field( s64, qcount )
263 ),
264
265 TP_fast_assign(
266 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
267 __entry->glnum = gl->gl_name.ln_number;
268 __entry->gltype = gl->gl_name.ln_type;
269 __entry->status = gl->gl_lksb.sb_status;
270 __entry->flags = gl->gl_lksb.sb_flags;
271 __entry->tdiff = tdiff;
272 __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT];
273 __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR];
274 __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
275 __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
276 __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT];
277 __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR];
278 __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
279 __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT];
280 ),
281
282 TP_printk("%u,%u glock %d:%lld status:%d flags:%02x tdiff:%lld srtt:%lld/%lld srttb:%lld/%lld sirt:%lld/%lld dcnt:%lld qcnt:%lld",
283 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
284 (unsigned long long)__entry->glnum,
285 __entry->status, __entry->flags,
286 (long long)__entry->tdiff,
287 (long long)__entry->srtt,
288 (long long)__entry->srttvar,
289 (long long)__entry->srttb,
290 (long long)__entry->srttvarb,
291 (long long)__entry->sirt,
292 (long long)__entry->sirtvar,
293 (long long)__entry->dcount,
294 (long long)__entry->qcount)
295 );
296
297 /* Section 2 - Log/journal
298 *
299 * Objectives:
300 * Latency: Log flush time
301 * Correctness: pin/unpin vs. disk I/O ordering
302 * Performance: Log usage stats
303 */
304
305 /* Pin/unpin a block in the log */
306 TRACE_EVENT(gfs2_pin,
307
308 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
309
310 TP_ARGS(bd, pin),
311
312 TP_STRUCT__entry(
313 __field( dev_t, dev )
314 __field( int, pin )
315 __field( u32, len )
316 __field( sector_t, block )
317 __field( u64, ino )
318 ),
319
320 TP_fast_assign(
321 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
322 __entry->pin = pin;
323 __entry->len = bd->bd_bh->b_size;
324 __entry->block = bd->bd_bh->b_blocknr;
325 __entry->ino = bd->bd_gl->gl_name.ln_number;
326 ),
327
328 TP_printk("%u,%u log %s %llu/%lu inode %llu",
329 MAJOR(__entry->dev), MINOR(__entry->dev),
330 __entry->pin ? "pin" : "unpin",
331 (unsigned long long)__entry->block,
332 (unsigned long)__entry->len,
333 (unsigned long long)__entry->ino)
334 );
335
336 /* Flushing the log */
337 TRACE_EVENT(gfs2_log_flush,
338
339 TP_PROTO(const struct gfs2_sbd *sdp, int start),
340
341 TP_ARGS(sdp, start),
342
343 TP_STRUCT__entry(
344 __field( dev_t, dev )
345 __field( int, start )
346 __field( u64, log_seq )
347 ),
348
349 TP_fast_assign(
350 __entry->dev = sdp->sd_vfs->s_dev;
351 __entry->start = start;
352 __entry->log_seq = sdp->sd_log_sequence;
353 ),
354
355 TP_printk("%u,%u log flush %s %llu",
356 MAJOR(__entry->dev), MINOR(__entry->dev),
357 __entry->start ? "start" : "end",
358 (unsigned long long)__entry->log_seq)
359 );
360
361 /* Reserving/releasing blocks in the log */
362 TRACE_EVENT(gfs2_log_blocks,
363
364 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
365
366 TP_ARGS(sdp, blocks),
367
368 TP_STRUCT__entry(
369 __field( dev_t, dev )
370 __field( int, blocks )
371 ),
372
373 TP_fast_assign(
374 __entry->dev = sdp->sd_vfs->s_dev;
375 __entry->blocks = blocks;
376 ),
377
378 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
379 MINOR(__entry->dev), __entry->blocks)
380 );
381
382 /* Writing back the AIL */
383 TRACE_EVENT(gfs2_ail_flush,
384
385 TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
386
387 TP_ARGS(sdp, wbc, start),
388
389 TP_STRUCT__entry(
390 __field( dev_t, dev )
391 __field( int, start )
392 __field( int, sync_mode )
393 __field( long, nr_to_write )
394 ),
395
396 TP_fast_assign(
397 __entry->dev = sdp->sd_vfs->s_dev;
398 __entry->start = start;
399 __entry->sync_mode = wbc->sync_mode;
400 __entry->nr_to_write = wbc->nr_to_write;
401 ),
402
403 TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
404 MINOR(__entry->dev), __entry->start ? "start" : "end",
405 __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
406 __entry->nr_to_write)
407 );
408
409 /* Section 3 - bmap
410 *
411 * Objectives:
412 * Latency: Bmap request time
413 * Performance: Block allocator tracing
414 * Correctness: Test of disard generation vs. blocks allocated
415 */
416
417 /* Map an extent of blocks, possibly a new allocation */
418 TRACE_EVENT(gfs2_bmap,
419
420 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
421 sector_t lblock, int create, int errno),
422
423 TP_ARGS(ip, bh, lblock, create, errno),
424
425 TP_STRUCT__entry(
426 __field( dev_t, dev )
427 __field( sector_t, lblock )
428 __field( sector_t, pblock )
429 __field( u64, inum )
430 __field( unsigned long, state )
431 __field( u32, len )
432 __field( int, create )
433 __field( int, errno )
434 ),
435
436 TP_fast_assign(
437 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
438 __entry->lblock = lblock;
439 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
440 __entry->inum = ip->i_no_addr;
441 __entry->state = bh->b_state;
442 __entry->len = bh->b_size;
443 __entry->create = create;
444 __entry->errno = errno;
445 ),
446
447 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
448 MAJOR(__entry->dev), MINOR(__entry->dev),
449 (unsigned long long)__entry->inum,
450 (unsigned long long)__entry->lblock,
451 (unsigned long)__entry->len,
452 (unsigned long long)__entry->pblock,
453 __entry->state, __entry->create ? "create " : "nocreate",
454 __entry->errno)
455 );
456
457 /* Keep track of blocks as they are allocated/freed */
458 TRACE_EVENT(gfs2_block_alloc,
459
460 TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
461 u8 block_state),
462
463 TP_ARGS(ip, block, len, block_state),
464
465 TP_STRUCT__entry(
466 __field( dev_t, dev )
467 __field( u64, start )
468 __field( u64, inum )
469 __field( u32, len )
470 __field( u8, block_state )
471 ),
472
473 TP_fast_assign(
474 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
475 __entry->start = block;
476 __entry->inum = ip->i_no_addr;
477 __entry->len = len;
478 __entry->block_state = block_state;
479 ),
480
481 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
482 MAJOR(__entry->dev), MINOR(__entry->dev),
483 (unsigned long long)__entry->inum,
484 (unsigned long long)__entry->start,
485 (unsigned long)__entry->len,
486 block_state_name(__entry->block_state))
487 );
488
489 #endif /* _TRACE_GFS2_H */
490
491 /* This part must be outside protection */
492 #undef TRACE_INCLUDE_PATH
493 #define TRACE_INCLUDE_PATH .
494 #define TRACE_INCLUDE_FILE trace_gfs2
495 #include <trace/define_trace.h>
496
497