1 /* pcr.c: Generic sparc64 performance counter infrastructure.
2  *
3  * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/export.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
9 
10 #include <linux/irq_work.h>
11 #include <linux/ftrace.h>
12 
13 #include <asm/pil.h>
14 #include <asm/pcr.h>
15 #include <asm/nmi.h>
16 #include <asm/spitfire.h>
17 #include <asm/perfctr.h>
18 
19 /* This code is shared between various users of the performance
20  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
21  * perf_event support layer.
22  */
23 
24 #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
25 #define PCR_N2_ENABLE		(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
26 				 PCR_N2_TOE_OV1 | \
27 				 (2 << PCR_N2_SL1_SHIFT) | \
28 				 (0xff << PCR_N2_MASK1_SHIFT))
29 
30 u64 pcr_enable;
31 unsigned int picl_shift;
32 
33 /* Performance counter interrupts run unmasked at PIL level 15.
34  * Therefore we can't do things like wakeups and other work
35  * that expects IRQ disabling to be adhered to in locking etc.
36  *
37  * Therefore in such situations we defer the work by signalling
38  * a lower level cpu IRQ.
39  */
deferred_pcr_work_irq(int irq,struct pt_regs * regs)40 void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
41 {
42 	struct pt_regs *old_regs;
43 
44 	clear_softint(1 << PIL_DEFERRED_PCR_WORK);
45 
46 	old_regs = set_irq_regs(regs);
47 	irq_enter();
48 #ifdef CONFIG_IRQ_WORK
49 	irq_work_run();
50 #endif
51 	irq_exit();
52 	set_irq_regs(old_regs);
53 }
54 
arch_irq_work_raise(void)55 void arch_irq_work_raise(void)
56 {
57 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
58 }
59 
60 const struct pcr_ops *pcr_ops;
61 EXPORT_SYMBOL_GPL(pcr_ops);
62 
direct_pcr_read(void)63 static u64 direct_pcr_read(void)
64 {
65 	u64 val;
66 
67 	read_pcr(val);
68 	return val;
69 }
70 
direct_pcr_write(u64 val)71 static void direct_pcr_write(u64 val)
72 {
73 	write_pcr(val);
74 }
75 
76 static const struct pcr_ops direct_pcr_ops = {
77 	.read	= direct_pcr_read,
78 	.write	= direct_pcr_write,
79 };
80 
n2_pcr_write(u64 val)81 static void n2_pcr_write(u64 val)
82 {
83 	unsigned long ret;
84 
85 	if (val & PCR_N2_HTRACE) {
86 		ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
87 		if (ret != HV_EOK)
88 			write_pcr(val);
89 	} else
90 		write_pcr(val);
91 }
92 
93 static const struct pcr_ops n2_pcr_ops = {
94 	.read	= direct_pcr_read,
95 	.write	= n2_pcr_write,
96 };
97 
98 static unsigned long perf_hsvc_group;
99 static unsigned long perf_hsvc_major;
100 static unsigned long perf_hsvc_minor;
101 
register_perf_hsvc(void)102 static int __init register_perf_hsvc(void)
103 {
104 	if (tlb_type == hypervisor) {
105 		switch (sun4v_chip_type) {
106 		case SUN4V_CHIP_NIAGARA1:
107 			perf_hsvc_group = HV_GRP_NIAG_PERF;
108 			break;
109 
110 		case SUN4V_CHIP_NIAGARA2:
111 			perf_hsvc_group = HV_GRP_N2_CPU;
112 			break;
113 
114 		case SUN4V_CHIP_NIAGARA3:
115 			perf_hsvc_group = HV_GRP_KT_CPU;
116 			break;
117 
118 		default:
119 			return -ENODEV;
120 		}
121 
122 
123 		perf_hsvc_major = 1;
124 		perf_hsvc_minor = 0;
125 		if (sun4v_hvapi_register(perf_hsvc_group,
126 					 perf_hsvc_major,
127 					 &perf_hsvc_minor)) {
128 			printk("perfmon: Could not register hvapi.\n");
129 			return -ENODEV;
130 		}
131 	}
132 	return 0;
133 }
134 
unregister_perf_hsvc(void)135 static void __init unregister_perf_hsvc(void)
136 {
137 	if (tlb_type != hypervisor)
138 		return;
139 	sun4v_hvapi_unregister(perf_hsvc_group);
140 }
141 
pcr_arch_init(void)142 int __init pcr_arch_init(void)
143 {
144 	int err = register_perf_hsvc();
145 
146 	if (err)
147 		return err;
148 
149 	switch (tlb_type) {
150 	case hypervisor:
151 		pcr_ops = &n2_pcr_ops;
152 		pcr_enable = PCR_N2_ENABLE;
153 		picl_shift = 2;
154 		break;
155 
156 	case cheetah:
157 	case cheetah_plus:
158 		pcr_ops = &direct_pcr_ops;
159 		pcr_enable = PCR_SUN4U_ENABLE;
160 		break;
161 
162 	case spitfire:
163 		/* UltraSPARC-I/II and derivatives lack a profile
164 		 * counter overflow interrupt so we can't make use of
165 		 * their hardware currently.
166 		 */
167 		/* fallthrough */
168 	default:
169 		err = -ENODEV;
170 		goto out_unregister;
171 	}
172 
173 	return nmi_init();
174 
175 out_unregister:
176 	unregister_perf_hsvc();
177 	return err;
178 }
179