1 /*
2  * Stage 1 of the trace events.
3  *
4  * Override the macros in <trace/trace_events.h> to include the following:
5  *
6  * struct ftrace_raw_<call> {
7  *	struct trace_entry		ent;
8  *	<type>				<item>;
9  *	<type2>				<item2>[<len>];
10  *	[...]
11  * };
12  *
13  * The <type> <item> is created by the __field(type, item) macro or
14  * the __array(type2, item2, len) macro.
15  * We simply do "type item;", and that will create the fields
16  * in the structure.
17  */
18 
19 #include <linux/ftrace_event.h>
20 
21 /*
22  * DECLARE_EVENT_CLASS can be used to add a generic function
23  * handlers for events. That is, if all events have the same
24  * parameters and just have distinct trace points.
25  * Each tracepoint can be defined with DEFINE_EVENT and that
26  * will map the DECLARE_EVENT_CLASS to the tracepoint.
27  *
28  * TRACE_EVENT is a one to one mapping between tracepoint and template.
29  */
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 	DECLARE_EVENT_CLASS(name,			       \
33 			     PARAMS(proto),		       \
34 			     PARAMS(args),		       \
35 			     PARAMS(tstruct),		       \
36 			     PARAMS(assign),		       \
37 			     PARAMS(print));		       \
38 	DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39 
40 
41 #undef __field
42 #define __field(type, item)		type	item;
43 
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type)	type	item;
46 
47 #undef __array
48 #define __array(type, item, len)	type	item[len];
49 
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
52 
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
55 
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
58 
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)	\
61 	struct ftrace_raw_##name {					\
62 		struct trace_entry	ent;				\
63 		tstruct							\
64 		char			__data[0];			\
65 	};								\
66 									\
67 	static struct ftrace_event_class event_class_##name;
68 
69 #undef DEFINE_EVENT
70 #define DEFINE_EVENT(template, name, proto, args)	\
71 	static struct ftrace_event_call	__used		\
72 	__attribute__((__aligned__(4))) event_##name
73 
74 #undef DEFINE_EVENT_PRINT
75 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
76 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
77 
78 /* Callbacks are meaningless to ftrace. */
79 #undef TRACE_EVENT_FN
80 #define TRACE_EVENT_FN(name, proto, args, tstruct,			\
81 		assign, print, reg, unreg)				\
82 	TRACE_EVENT(name, PARAMS(proto), PARAMS(args),			\
83 		PARAMS(tstruct), PARAMS(assign), PARAMS(print))		\
84 
85 #undef TRACE_EVENT_FLAGS
86 #define TRACE_EVENT_FLAGS(name, value)					\
87 	__TRACE_EVENT_FLAGS(name, value)
88 
89 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
90 
91 
92 /*
93  * Stage 2 of the trace events.
94  *
95  * Include the following:
96  *
97  * struct ftrace_data_offsets_<call> {
98  *	u32				<item1>;
99  *	u32				<item2>;
100  *	[...]
101  * };
102  *
103  * The __dynamic_array() macro will create each u32 <item>, this is
104  * to keep the offset of each array from the beginning of the event.
105  * The size of an array is also encoded, in the higher 16 bits of <item>.
106  */
107 
108 #undef __field
109 #define __field(type, item)
110 
111 #undef __field_ext
112 #define __field_ext(type, item, filter_type)
113 
114 #undef __array
115 #define __array(type, item, len)
116 
117 #undef __dynamic_array
118 #define __dynamic_array(type, item, len)	u32 item;
119 
120 #undef __string
121 #define __string(item, src) __dynamic_array(char, item, -1)
122 
123 #undef DECLARE_EVENT_CLASS
124 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
125 	struct ftrace_data_offsets_##call {				\
126 		tstruct;						\
127 	};
128 
129 #undef DEFINE_EVENT
130 #define DEFINE_EVENT(template, name, proto, args)
131 
132 #undef DEFINE_EVENT_PRINT
133 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
134 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
135 
136 #undef TRACE_EVENT_FLAGS
137 #define TRACE_EVENT_FLAGS(event, flag)
138 
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140 
141 /*
142  * Stage 3 of the trace events.
143  *
144  * Override the macros in <trace/trace_events.h> to include the following:
145  *
146  * enum print_line_t
147  * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
148  * {
149  *	struct trace_seq *s = &iter->seq;
150  *	struct ftrace_raw_<call> *field; <-- defined in stage 1
151  *	struct trace_entry *entry;
152  *	struct trace_seq *p = &iter->tmp_seq;
153  *	int ret;
154  *
155  *	entry = iter->ent;
156  *
157  *	if (entry->type != event_<call>->event.type) {
158  *		WARN_ON_ONCE(1);
159  *		return TRACE_TYPE_UNHANDLED;
160  *	}
161  *
162  *	field = (typeof(field))entry;
163  *
164  *	trace_seq_init(p);
165  *	ret = trace_seq_printf(s, "%s: ", <call>);
166  *	if (ret)
167  *		ret = trace_seq_printf(s, <TP_printk> "\n");
168  *	if (!ret)
169  *		return TRACE_TYPE_PARTIAL_LINE;
170  *
171  *	return TRACE_TYPE_HANDLED;
172  * }
173  *
174  * This is the method used to print the raw event to the trace
175  * output format. Note, this is not needed if the data is read
176  * in binary.
177  */
178 
179 #undef __entry
180 #define __entry field
181 
182 #undef TP_printk
183 #define TP_printk(fmt, args...) fmt "\n", args
184 
185 #undef __get_dynamic_array
186 #define __get_dynamic_array(field)	\
187 		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
188 
189 #undef __get_str
190 #define __get_str(field) (char *)__get_dynamic_array(field)
191 
192 #undef __print_flags
193 #define __print_flags(flag, delim, flag_array...)			\
194 	({								\
195 		static const struct trace_print_flags __flags[] =	\
196 			{ flag_array, { -1, NULL }};			\
197 		ftrace_print_flags_seq(p, delim, flag, __flags);	\
198 	})
199 
200 #undef __print_symbolic
201 #define __print_symbolic(value, symbol_array...)			\
202 	({								\
203 		static const struct trace_print_flags symbols[] =	\
204 			{ symbol_array, { -1, NULL }};			\
205 		ftrace_print_symbols_seq(p, value, symbols);		\
206 	})
207 
208 #undef __print_hex
209 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
210 
211 #undef DECLARE_EVENT_CLASS
212 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
213 static notrace enum print_line_t					\
214 ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\
215 			 struct trace_event *trace_event)		\
216 {									\
217 	struct ftrace_event_call *event;				\
218 	struct trace_seq *s = &iter->seq;				\
219 	struct ftrace_raw_##call *field;				\
220 	struct trace_entry *entry;					\
221 	struct trace_seq *p = &iter->tmp_seq;				\
222 	int ret;							\
223 									\
224 	event = container_of(trace_event, struct ftrace_event_call,	\
225 			     event);					\
226 									\
227 	entry = iter->ent;						\
228 									\
229 	if (entry->type != event->event.type) {				\
230 		WARN_ON_ONCE(1);					\
231 		return TRACE_TYPE_UNHANDLED;				\
232 	}								\
233 									\
234 	field = (typeof(field))entry;					\
235 									\
236 	trace_seq_init(p);						\
237 	ret = trace_seq_printf(s, "%s: ", event->name);			\
238 	if (ret)							\
239 		ret = trace_seq_printf(s, print);			\
240 	if (!ret)							\
241 		return TRACE_TYPE_PARTIAL_LINE;				\
242 									\
243 	return TRACE_TYPE_HANDLED;					\
244 }									\
245 static struct trace_event_functions ftrace_event_type_funcs_##call = {	\
246 	.trace			= ftrace_raw_output_##call,		\
247 };
248 
249 #undef DEFINE_EVENT_PRINT
250 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
251 static notrace enum print_line_t					\
252 ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\
253 			 struct trace_event *event)			\
254 {									\
255 	struct trace_seq *s = &iter->seq;				\
256 	struct ftrace_raw_##template *field;				\
257 	struct trace_entry *entry;					\
258 	struct trace_seq *p = &iter->tmp_seq;				\
259 	int ret;							\
260 									\
261 	entry = iter->ent;						\
262 									\
263 	if (entry->type != event_##call.event.type) {			\
264 		WARN_ON_ONCE(1);					\
265 		return TRACE_TYPE_UNHANDLED;				\
266 	}								\
267 									\
268 	field = (typeof(field))entry;					\
269 									\
270 	trace_seq_init(p);						\
271 	ret = trace_seq_printf(s, "%s: ", #call);			\
272 	if (ret)							\
273 		ret = trace_seq_printf(s, print);			\
274 	if (!ret)							\
275 		return TRACE_TYPE_PARTIAL_LINE;				\
276 									\
277 	return TRACE_TYPE_HANDLED;					\
278 }									\
279 static struct trace_event_functions ftrace_event_type_funcs_##call = {	\
280 	.trace			= ftrace_raw_output_##call,		\
281 };
282 
283 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
284 
285 #undef __field_ext
286 #define __field_ext(type, item, filter_type)				\
287 	ret = trace_define_field(event_call, #type, #item,		\
288 				 offsetof(typeof(field), item),		\
289 				 sizeof(field.item),			\
290 				 is_signed_type(type), filter_type);	\
291 	if (ret)							\
292 		return ret;
293 
294 #undef __field
295 #define __field(type, item)	__field_ext(type, item, FILTER_OTHER)
296 
297 #undef __array
298 #define __array(type, item, len)					\
299 	do {								\
300 		mutex_lock(&event_storage_mutex);			\
301 		BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);			\
302 		snprintf(event_storage, sizeof(event_storage),		\
303 			 "%s[%d]", #type, len);				\
304 		ret = trace_define_field(event_call, event_storage, #item, \
305 				 offsetof(typeof(field), item),		\
306 				 sizeof(field.item),			\
307 				 is_signed_type(type), FILTER_OTHER);	\
308 		mutex_unlock(&event_storage_mutex);			\
309 		if (ret)						\
310 			return ret;					\
311 	} while (0);
312 
313 #undef __dynamic_array
314 #define __dynamic_array(type, item, len)				       \
315 	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
316 				 offsetof(typeof(field), __data_loc_##item),   \
317 				 sizeof(field.__data_loc_##item),	       \
318 				 is_signed_type(type), FILTER_OTHER);
319 
320 #undef __string
321 #define __string(item, src) __dynamic_array(char, item, -1)
322 
323 #undef DECLARE_EVENT_CLASS
324 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)	\
325 static int notrace							\
326 ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
327 {									\
328 	struct ftrace_raw_##call field;					\
329 	int ret;							\
330 									\
331 	tstruct;							\
332 									\
333 	return ret;							\
334 }
335 
336 #undef DEFINE_EVENT
337 #define DEFINE_EVENT(template, name, proto, args)
338 
339 #undef DEFINE_EVENT_PRINT
340 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
341 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
342 
343 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
344 
345 /*
346  * remember the offset of each array from the beginning of the event.
347  */
348 
349 #undef __entry
350 #define __entry entry
351 
352 #undef __field
353 #define __field(type, item)
354 
355 #undef __field_ext
356 #define __field_ext(type, item, filter_type)
357 
358 #undef __array
359 #define __array(type, item, len)
360 
361 #undef __dynamic_array
362 #define __dynamic_array(type, item, len)				\
363 	__data_offsets->item = __data_size +				\
364 			       offsetof(typeof(*entry), __data);	\
365 	__data_offsets->item |= (len * sizeof(type)) << 16;		\
366 	__data_size += (len) * sizeof(type);
367 
368 #undef __string
369 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
370 
371 #undef DECLARE_EVENT_CLASS
372 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
373 static inline notrace int ftrace_get_offsets_##call(			\
374 	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
375 {									\
376 	int __data_size = 0;						\
377 	struct ftrace_raw_##call __maybe_unused *entry;			\
378 									\
379 	tstruct;							\
380 									\
381 	return __data_size;						\
382 }
383 
384 #undef DEFINE_EVENT
385 #define DEFINE_EVENT(template, name, proto, args)
386 
387 #undef DEFINE_EVENT_PRINT
388 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
389 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
390 
391 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
392 
393 /*
394  * Stage 4 of the trace events.
395  *
396  * Override the macros in <trace/trace_events.h> to include the following:
397  *
398  * For those macros defined with TRACE_EVENT:
399  *
400  * static struct ftrace_event_call event_<call>;
401  *
402  * static void ftrace_raw_event_<call>(void *__data, proto)
403  * {
404  *	struct ftrace_event_call *event_call = __data;
405  *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
406  *	struct ring_buffer_event *event;
407  *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
408  *	struct ring_buffer *buffer;
409  *	unsigned long irq_flags;
410  *	int __data_size;
411  *	int pc;
412  *
413  *	local_save_flags(irq_flags);
414  *	pc = preempt_count();
415  *
416  *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
417  *
418  *	event = trace_current_buffer_lock_reserve(&buffer,
419  *				  event_<call>->event.type,
420  *				  sizeof(*entry) + __data_size,
421  *				  irq_flags, pc);
422  *	if (!event)
423  *		return;
424  *	entry	= ring_buffer_event_data(event);
425  *
426  *	{ <assign>; }  <-- Here we assign the entries by the __field and
427  *			   __array macros.
428  *
429  *	if (!filter_current_check_discard(buffer, event_call, entry, event))
430  *		trace_current_buffer_unlock_commit(buffer,
431  *						   event, irq_flags, pc);
432  * }
433  *
434  * static struct trace_event ftrace_event_type_<call> = {
435  *	.trace			= ftrace_raw_output_<call>, <-- stage 2
436  * };
437  *
438  * static const char print_fmt_<call>[] = <TP_printk>;
439  *
440  * static struct ftrace_event_class __used event_class_<template> = {
441  *	.system			= "<system>",
442  *	.define_fields		= ftrace_define_fields_<call>,
443  *	.fields			= LIST_HEAD_INIT(event_class_##call.fields),
444  *	.raw_init		= trace_event_raw_init,
445  *	.probe			= ftrace_raw_event_##call,
446  *	.reg			= ftrace_event_reg,
447  * };
448  *
449  * static struct ftrace_event_call event_<call> = {
450  *	.name			= "<call>",
451  *	.class			= event_class_<template>,
452  *	.event			= &ftrace_event_type_<call>,
453  *	.print_fmt		= print_fmt_<call>,
454  * };
455  * // its only safe to use pointers when doing linker tricks to
456  * // create an array.
457  * static struct ftrace_event_call __used
458  * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
459  *
460  */
461 
462 #ifdef CONFIG_PERF_EVENTS
463 
464 #define _TRACE_PERF_PROTO(call, proto)					\
465 	static notrace void						\
466 	perf_trace_##call(void *__data, proto);
467 
468 #define _TRACE_PERF_INIT(call)						\
469 	.perf_probe		= perf_trace_##call,
470 
471 #else
472 #define _TRACE_PERF_PROTO(call, proto)
473 #define _TRACE_PERF_INIT(call)
474 #endif /* CONFIG_PERF_EVENTS */
475 
476 #undef __entry
477 #define __entry entry
478 
479 #undef __field
480 #define __field(type, item)
481 
482 #undef __array
483 #define __array(type, item, len)
484 
485 #undef __dynamic_array
486 #define __dynamic_array(type, item, len)				\
487 	__entry->__data_loc_##item = __data_offsets.item;
488 
489 #undef __string
490 #define __string(item, src) __dynamic_array(char, item, -1)       	\
491 
492 #undef __assign_str
493 #define __assign_str(dst, src)						\
494 	strcpy(__get_str(dst), src);
495 
496 #undef TP_fast_assign
497 #define TP_fast_assign(args...) args
498 
499 #undef TP_perf_assign
500 #define TP_perf_assign(args...)
501 
502 #undef DECLARE_EVENT_CLASS
503 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
504 									\
505 static notrace void							\
506 ftrace_raw_event_##call(void *__data, proto)				\
507 {									\
508 	struct ftrace_event_call *event_call = __data;			\
509 	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
510 	struct ring_buffer_event *event;				\
511 	struct ftrace_raw_##call *entry;				\
512 	struct ring_buffer *buffer;					\
513 	unsigned long irq_flags;					\
514 	int __data_size;						\
515 	int pc;								\
516 									\
517 	local_save_flags(irq_flags);					\
518 	pc = preempt_count();						\
519 									\
520 	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
521 									\
522 	event = trace_current_buffer_lock_reserve(&buffer,		\
523 				 event_call->event.type,		\
524 				 sizeof(*entry) + __data_size,		\
525 				 irq_flags, pc);			\
526 	if (!event)							\
527 		return;							\
528 	entry	= ring_buffer_event_data(event);			\
529 									\
530 	tstruct								\
531 									\
532 	{ assign; }							\
533 									\
534 	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
535 		trace_nowake_buffer_unlock_commit(buffer,		\
536 						  event, irq_flags, pc); \
537 }
538 /*
539  * The ftrace_test_probe is compiled out, it is only here as a build time check
540  * to make sure that if the tracepoint handling changes, the ftrace probe will
541  * fail to compile unless it too is updated.
542  */
543 
544 #undef DEFINE_EVENT
545 #define DEFINE_EVENT(template, call, proto, args)			\
546 static inline void ftrace_test_probe_##call(void)			\
547 {									\
548 	check_trace_callback_type_##call(ftrace_raw_event_##template);	\
549 }
550 
551 #undef DEFINE_EVENT_PRINT
552 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
553 
554 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
555 
556 #undef __entry
557 #define __entry REC
558 
559 #undef __print_flags
560 #undef __print_symbolic
561 #undef __get_dynamic_array
562 #undef __get_str
563 
564 #undef TP_printk
565 #define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
566 
567 #undef DECLARE_EVENT_CLASS
568 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
569 _TRACE_PERF_PROTO(call, PARAMS(proto));					\
570 static const char print_fmt_##call[] = print;				\
571 static struct ftrace_event_class __used event_class_##call = {		\
572 	.system			= __stringify(TRACE_SYSTEM),		\
573 	.define_fields		= ftrace_define_fields_##call,		\
574 	.fields			= LIST_HEAD_INIT(event_class_##call.fields),\
575 	.raw_init		= trace_event_raw_init,			\
576 	.probe			= ftrace_raw_event_##call,		\
577 	.reg			= ftrace_event_reg,			\
578 	_TRACE_PERF_INIT(call)						\
579 };
580 
581 #undef DEFINE_EVENT
582 #define DEFINE_EVENT(template, call, proto, args)			\
583 									\
584 static struct ftrace_event_call __used event_##call = {			\
585 	.name			= #call,				\
586 	.class			= &event_class_##template,		\
587 	.event.funcs		= &ftrace_event_type_funcs_##template,	\
588 	.print_fmt		= print_fmt_##template,			\
589 };									\
590 static struct ftrace_event_call __used					\
591 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
592 
593 #undef DEFINE_EVENT_PRINT
594 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
595 									\
596 static const char print_fmt_##call[] = print;				\
597 									\
598 static struct ftrace_event_call __used event_##call = {			\
599 	.name			= #call,				\
600 	.class			= &event_class_##template,		\
601 	.event.funcs		= &ftrace_event_type_funcs_##call,	\
602 	.print_fmt		= print_fmt_##call,			\
603 };									\
604 static struct ftrace_event_call __used					\
605 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
606 
607 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
608 
609 /*
610  * Define the insertion callback to perf events
611  *
612  * The job is very similar to ftrace_raw_event_<call> except that we don't
613  * insert in the ring buffer but in a perf counter.
614  *
615  * static void ftrace_perf_<call>(proto)
616  * {
617  *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
618  *	struct ftrace_event_call *event_call = &event_<call>;
619  *	extern void perf_tp_event(int, u64, u64, void *, int);
620  *	struct ftrace_raw_##call *entry;
621  *	struct perf_trace_buf *trace_buf;
622  *	u64 __addr = 0, __count = 1;
623  *	unsigned long irq_flags;
624  *	struct trace_entry *ent;
625  *	int __entry_size;
626  *	int __data_size;
627  *	int __cpu
628  *	int pc;
629  *
630  *	pc = preempt_count();
631  *
632  *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
633  *
634  *	// Below we want to get the aligned size by taking into account
635  *	// the u32 field that will later store the buffer size
636  *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
637  *			     sizeof(u64));
638  *	__entry_size -= sizeof(u32);
639  *
640  *	// Protect the non nmi buffer
641  *	// This also protects the rcu read side
642  *	local_irq_save(irq_flags);
643  *	__cpu = smp_processor_id();
644  *
645  *	if (in_nmi())
646  *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
647  *	else
648  *		trace_buf = rcu_dereference_sched(perf_trace_buf);
649  *
650  *	if (!trace_buf)
651  *		goto end;
652  *
653  *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
654  *
655  * 	// Avoid recursion from perf that could mess up the buffer
656  * 	if (trace_buf->recursion++)
657  *		goto end_recursion;
658  *
659  * 	raw_data = trace_buf->buf;
660  *
661  *	// Make recursion update visible before entering perf_tp_event
662  *	// so that we protect from perf recursions.
663  *
664  *	barrier();
665  *
666  *	//zero dead bytes from alignment to avoid stack leak to userspace:
667  *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
668  *	entry = (struct ftrace_raw_<call> *)raw_data;
669  *	ent = &entry->ent;
670  *	tracing_generic_entry_update(ent, irq_flags, pc);
671  *	ent->type = event_call->id;
672  *
673  *	<tstruct> <- do some jobs with dynamic arrays
674  *
675  *	<assign>  <- affect our values
676  *
677  *	perf_tp_event(event_call->id, __addr, __count, entry,
678  *		     __entry_size);  <- submit them to perf counter
679  *
680  * }
681  */
682 
683 #ifdef CONFIG_PERF_EVENTS
684 
685 #undef __entry
686 #define __entry entry
687 
688 #undef __get_dynamic_array
689 #define __get_dynamic_array(field)	\
690 		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
691 
692 #undef __get_str
693 #define __get_str(field) (char *)__get_dynamic_array(field)
694 
695 #undef __perf_addr
696 #define __perf_addr(a) __addr = (a)
697 
698 #undef __perf_count
699 #define __perf_count(c) __count = (c)
700 
701 #undef DECLARE_EVENT_CLASS
702 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
703 static notrace void							\
704 perf_trace_##call(void *__data, proto)					\
705 {									\
706 	struct ftrace_event_call *event_call = __data;			\
707 	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
708 	struct ftrace_raw_##call *entry;				\
709 	struct pt_regs __regs;						\
710 	u64 __addr = 0, __count = 1;					\
711 	struct hlist_head *head;					\
712 	int __entry_size;						\
713 	int __data_size;						\
714 	int rctx;							\
715 									\
716 	perf_fetch_caller_regs(&__regs);				\
717 									\
718 	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
719 	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
720 			     sizeof(u64));				\
721 	__entry_size -= sizeof(u32);					\
722 									\
723 	if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,		\
724 		      "profile buffer not large enough"))		\
725 		return;							\
726 									\
727 	entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(	\
728 		__entry_size, event_call->event.type, &__regs, &rctx);	\
729 	if (!entry)							\
730 		return;							\
731 									\
732 	tstruct								\
733 									\
734 	{ assign; }							\
735 									\
736 	head = this_cpu_ptr(event_call->perf_events);			\
737 	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\
738 		__count, &__regs, head);				\
739 }
740 
741 /*
742  * This part is compiled out, it is only here as a build time check
743  * to make sure that if the tracepoint handling changes, the
744  * perf probe will fail to compile unless it too is updated.
745  */
746 #undef DEFINE_EVENT
747 #define DEFINE_EVENT(template, call, proto, args)			\
748 static inline void perf_test_probe_##call(void)				\
749 {									\
750 	check_trace_callback_type_##call(perf_trace_##template);	\
751 }
752 
753 
754 #undef DEFINE_EVENT_PRINT
755 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
756 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
757 
758 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
759 #endif /* CONFIG_PERF_EVENTS */
760 
761 #undef _TRACE_PROFILE_INIT
762 
763