1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include "trace.h"
17 
18 #define STACK_TRACE_ENTRIES 500
19 
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
23 
24 static struct stack_trace max_stack_trace = {
25 	.max_entries		= STACK_TRACE_ENTRIES,
26 	.entries		= stack_dump_trace,
27 };
28 
29 static unsigned long max_stack_size;
30 static arch_spinlock_t max_stack_lock =
31 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
32 
33 static int stack_trace_disabled __read_mostly;
34 static DEFINE_PER_CPU(int, trace_active);
35 static DEFINE_MUTEX(stack_sysctl_mutex);
36 
37 int stack_tracer_enabled;
38 static int last_stack_tracer_enabled;
39 
check_stack(void)40 static inline void check_stack(void)
41 {
42 	unsigned long this_size, flags;
43 	unsigned long *p, *top, *start;
44 	int i;
45 
46 	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
47 	this_size = THREAD_SIZE - this_size;
48 
49 	if (this_size <= max_stack_size)
50 		return;
51 
52 	/* we do not handle interrupt stacks yet */
53 	if (!object_is_on_stack(&this_size))
54 		return;
55 
56 	local_irq_save(flags);
57 	arch_spin_lock(&max_stack_lock);
58 
59 	/* a race could have already updated it */
60 	if (this_size <= max_stack_size)
61 		goto out;
62 
63 	max_stack_size = this_size;
64 
65 	max_stack_trace.nr_entries	= 0;
66 	max_stack_trace.skip		= 3;
67 
68 	save_stack_trace(&max_stack_trace);
69 
70 	/*
71 	 * Now find where in the stack these are.
72 	 */
73 	i = 0;
74 	start = &this_size;
75 	top = (unsigned long *)
76 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
77 
78 	/*
79 	 * Loop through all the entries. One of the entries may
80 	 * for some reason be missed on the stack, so we may
81 	 * have to account for them. If they are all there, this
82 	 * loop will only happen once. This code only takes place
83 	 * on a new max, so it is far from a fast path.
84 	 */
85 	while (i < max_stack_trace.nr_entries) {
86 		int found = 0;
87 
88 		stack_dump_index[i] = this_size;
89 		p = start;
90 
91 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
92 			if (*p == stack_dump_trace[i]) {
93 				this_size = stack_dump_index[i++] =
94 					(top - p) * sizeof(unsigned long);
95 				found = 1;
96 				/* Start the search from here */
97 				start = p + 1;
98 			}
99 		}
100 
101 		if (!found)
102 			i++;
103 	}
104 
105  out:
106 	arch_spin_unlock(&max_stack_lock);
107 	local_irq_restore(flags);
108 }
109 
110 static void
stack_trace_call(unsigned long ip,unsigned long parent_ip)111 stack_trace_call(unsigned long ip, unsigned long parent_ip)
112 {
113 	int cpu;
114 
115 	if (unlikely(!ftrace_enabled || stack_trace_disabled))
116 		return;
117 
118 	preempt_disable_notrace();
119 
120 	cpu = raw_smp_processor_id();
121 	/* no atomic needed, we only modify this variable by this cpu */
122 	if (per_cpu(trace_active, cpu)++ != 0)
123 		goto out;
124 
125 	check_stack();
126 
127  out:
128 	per_cpu(trace_active, cpu)--;
129 	/* prevent recursion in schedule */
130 	preempt_enable_notrace();
131 }
132 
133 static struct ftrace_ops trace_ops __read_mostly =
134 {
135 	.func = stack_trace_call,
136 };
137 
138 static ssize_t
stack_max_size_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)139 stack_max_size_read(struct file *filp, char __user *ubuf,
140 		    size_t count, loff_t *ppos)
141 {
142 	unsigned long *ptr = filp->private_data;
143 	char buf[64];
144 	int r;
145 
146 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
147 	if (r > sizeof(buf))
148 		r = sizeof(buf);
149 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
150 }
151 
152 static ssize_t
stack_max_size_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)153 stack_max_size_write(struct file *filp, const char __user *ubuf,
154 		     size_t count, loff_t *ppos)
155 {
156 	long *ptr = filp->private_data;
157 	unsigned long val, flags;
158 	char buf[64];
159 	int ret;
160 	int cpu;
161 
162 	if (count >= sizeof(buf))
163 		return -EINVAL;
164 
165 	if (copy_from_user(&buf, ubuf, count))
166 		return -EFAULT;
167 
168 	buf[count] = 0;
169 
170 	ret = strict_strtoul(buf, 10, &val);
171 	if (ret < 0)
172 		return ret;
173 
174 	local_irq_save(flags);
175 
176 	/*
177 	 * In case we trace inside arch_spin_lock() or after (NMI),
178 	 * we will cause circular lock, so we also need to increase
179 	 * the percpu trace_active here.
180 	 */
181 	cpu = smp_processor_id();
182 	per_cpu(trace_active, cpu)++;
183 
184 	arch_spin_lock(&max_stack_lock);
185 	*ptr = val;
186 	arch_spin_unlock(&max_stack_lock);
187 
188 	per_cpu(trace_active, cpu)--;
189 	local_irq_restore(flags);
190 
191 	return count;
192 }
193 
194 static const struct file_operations stack_max_size_fops = {
195 	.open		= tracing_open_generic,
196 	.read		= stack_max_size_read,
197 	.write		= stack_max_size_write,
198 	.llseek		= default_llseek,
199 };
200 
201 static void *
__next(struct seq_file * m,loff_t * pos)202 __next(struct seq_file *m, loff_t *pos)
203 {
204 	long n = *pos - 1;
205 
206 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
207 		return NULL;
208 
209 	m->private = (void *)n;
210 	return &m->private;
211 }
212 
213 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)214 t_next(struct seq_file *m, void *v, loff_t *pos)
215 {
216 	(*pos)++;
217 	return __next(m, pos);
218 }
219 
t_start(struct seq_file * m,loff_t * pos)220 static void *t_start(struct seq_file *m, loff_t *pos)
221 {
222 	int cpu;
223 
224 	local_irq_disable();
225 
226 	cpu = smp_processor_id();
227 	per_cpu(trace_active, cpu)++;
228 
229 	arch_spin_lock(&max_stack_lock);
230 
231 	if (*pos == 0)
232 		return SEQ_START_TOKEN;
233 
234 	return __next(m, pos);
235 }
236 
t_stop(struct seq_file * m,void * p)237 static void t_stop(struct seq_file *m, void *p)
238 {
239 	int cpu;
240 
241 	arch_spin_unlock(&max_stack_lock);
242 
243 	cpu = smp_processor_id();
244 	per_cpu(trace_active, cpu)--;
245 
246 	local_irq_enable();
247 }
248 
trace_lookup_stack(struct seq_file * m,long i)249 static int trace_lookup_stack(struct seq_file *m, long i)
250 {
251 	unsigned long addr = stack_dump_trace[i];
252 
253 	return seq_printf(m, "%pS\n", (void *)addr);
254 }
255 
print_disabled(struct seq_file * m)256 static void print_disabled(struct seq_file *m)
257 {
258 	seq_puts(m, "#\n"
259 		 "#  Stack tracer disabled\n"
260 		 "#\n"
261 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
262 		 "# kernel command line\n"
263 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
264 		 "#\n");
265 }
266 
t_show(struct seq_file * m,void * v)267 static int t_show(struct seq_file *m, void *v)
268 {
269 	long i;
270 	int size;
271 
272 	if (v == SEQ_START_TOKEN) {
273 		seq_printf(m, "        Depth    Size   Location"
274 			   "    (%d entries)\n"
275 			   "        -----    ----   --------\n",
276 			   max_stack_trace.nr_entries - 1);
277 
278 		if (!stack_tracer_enabled && !max_stack_size)
279 			print_disabled(m);
280 
281 		return 0;
282 	}
283 
284 	i = *(long *)v;
285 
286 	if (i >= max_stack_trace.nr_entries ||
287 	    stack_dump_trace[i] == ULONG_MAX)
288 		return 0;
289 
290 	if (i+1 == max_stack_trace.nr_entries ||
291 	    stack_dump_trace[i+1] == ULONG_MAX)
292 		size = stack_dump_index[i];
293 	else
294 		size = stack_dump_index[i] - stack_dump_index[i+1];
295 
296 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
297 
298 	trace_lookup_stack(m, i);
299 
300 	return 0;
301 }
302 
303 static const struct seq_operations stack_trace_seq_ops = {
304 	.start		= t_start,
305 	.next		= t_next,
306 	.stop		= t_stop,
307 	.show		= t_show,
308 };
309 
stack_trace_open(struct inode * inode,struct file * file)310 static int stack_trace_open(struct inode *inode, struct file *file)
311 {
312 	return seq_open(file, &stack_trace_seq_ops);
313 }
314 
315 static const struct file_operations stack_trace_fops = {
316 	.open		= stack_trace_open,
317 	.read		= seq_read,
318 	.llseek		= seq_lseek,
319 	.release	= seq_release,
320 };
321 
322 int
stack_trace_sysctl(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)323 stack_trace_sysctl(struct ctl_table *table, int write,
324 		   void __user *buffer, size_t *lenp,
325 		   loff_t *ppos)
326 {
327 	int ret;
328 
329 	mutex_lock(&stack_sysctl_mutex);
330 
331 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
332 
333 	if (ret || !write ||
334 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
335 		goto out;
336 
337 	last_stack_tracer_enabled = !!stack_tracer_enabled;
338 
339 	if (stack_tracer_enabled)
340 		register_ftrace_function(&trace_ops);
341 	else
342 		unregister_ftrace_function(&trace_ops);
343 
344  out:
345 	mutex_unlock(&stack_sysctl_mutex);
346 	return ret;
347 }
348 
enable_stacktrace(char * str)349 static __init int enable_stacktrace(char *str)
350 {
351 	stack_tracer_enabled = 1;
352 	last_stack_tracer_enabled = 1;
353 	return 1;
354 }
355 __setup("stacktrace", enable_stacktrace);
356 
stack_trace_init(void)357 static __init int stack_trace_init(void)
358 {
359 	struct dentry *d_tracer;
360 
361 	d_tracer = tracing_init_dentry();
362 
363 	trace_create_file("stack_max_size", 0644, d_tracer,
364 			&max_stack_size, &stack_max_size_fops);
365 
366 	trace_create_file("stack_trace", 0444, d_tracer,
367 			NULL, &stack_trace_fops);
368 
369 	if (stack_tracer_enabled)
370 		register_ftrace_function(&trace_ops);
371 
372 	return 0;
373 }
374 
375 device_initcall(stack_trace_init);
376