1 // SPDX-License-Identifier: GPL-2.0
2 #include "unwind.h"
3 #include "dso.h"
4 #include "map.h"
5 #include "thread.h"
6 #include "session.h"
7 #include "debug.h"
8 #include "env.h"
9 #include "callchain.h"
10 
11 struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
12 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
13 struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
14 
unwind__register_ops(struct maps * maps,struct unwind_libunwind_ops * ops)15 static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
16 {
17 	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
18 }
19 
unwind__prepare_access(struct maps * maps,struct map * map,bool * initialized)20 int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
21 {
22 	const char *arch;
23 	enum dso_type dso_type;
24 	struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
25 	struct dso *dso = map__dso(map);
26 	struct machine *machine;
27 	int err;
28 
29 	if (!dwarf_callchain_users)
30 		return 0;
31 
32 	if (maps__addr_space(maps)) {
33 		pr_debug("unwind: thread map already set, dso=%s\n", dso->name);
34 		if (initialized)
35 			*initialized = true;
36 		return 0;
37 	}
38 
39 	machine = maps__machine(maps);
40 	/* env->arch is NULL for live-mode (i.e. perf top) */
41 	if (!machine->env || !machine->env->arch)
42 		goto out_register;
43 
44 	dso_type = dso__type(dso, machine);
45 	if (dso_type == DSO__TYPE_UNKNOWN)
46 		return 0;
47 
48 	arch = perf_env__arch(machine->env);
49 
50 	if (!strcmp(arch, "x86")) {
51 		if (dso_type != DSO__TYPE_64BIT)
52 			ops = x86_32_unwind_libunwind_ops;
53 	} else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
54 		if (dso_type == DSO__TYPE_64BIT)
55 			ops = arm64_unwind_libunwind_ops;
56 	}
57 
58 	if (!ops) {
59 		pr_warning_once("unwind: target platform=%s is not supported\n", arch);
60 		return 0;
61 	}
62 out_register:
63 	unwind__register_ops(maps, ops);
64 
65 	err = maps__unwind_libunwind_ops(maps)->prepare_access(maps);
66 	if (initialized)
67 		*initialized = err ? false : true;
68 	return err;
69 }
70 
unwind__flush_access(struct maps * maps)71 void unwind__flush_access(struct maps *maps)
72 {
73 	const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
74 
75 	if (ops)
76 		ops->flush_access(maps);
77 }
78 
unwind__finish_access(struct maps * maps)79 void unwind__finish_access(struct maps *maps)
80 {
81 	const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
82 
83 	if (ops)
84 		ops->finish_access(maps);
85 }
86 
unwind__get_entries(unwind_entry_cb_t cb,void * arg,struct thread * thread,struct perf_sample * data,int max_stack,bool best_effort)87 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
88 			 struct thread *thread,
89 			 struct perf_sample *data, int max_stack,
90 			 bool best_effort)
91 {
92 	const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread__maps(thread));
93 
94 	if (ops)
95 		return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
96 	return 0;
97 }
98