1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 #include "coresight-syscfg.h"
13 
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)14 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
15 {
16 	u8 idx;
17 	struct etmv4_config *config = &drvdata->config;
18 
19 	idx = config->addr_idx;
20 
21 	/*
22 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 	 * the trace unit performs
24 	 */
25 	if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
26 		if (idx % 2 != 0)
27 			return -EINVAL;
28 
29 		/*
30 		 * We are performing instruction address comparison. Set the
31 		 * relevant bit of ViewInst Include/Exclude Control register
32 		 * for corresponding address comparator pair.
33 		 */
34 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
35 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
36 			return -EINVAL;
37 
38 		if (exclude == true) {
39 			/*
40 			 * Set exclude bit and unset the include bit
41 			 * corresponding to comparator pair
42 			 */
43 			config->viiectlr |= BIT(idx / 2 + 16);
44 			config->viiectlr &= ~BIT(idx / 2);
45 		} else {
46 			/*
47 			 * Set include bit and unset exclude bit
48 			 * corresponding to comparator pair
49 			 */
50 			config->viiectlr |= BIT(idx / 2);
51 			config->viiectlr &= ~BIT(idx / 2 + 16);
52 		}
53 	}
54 	return 0;
55 }
56 
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)57 static ssize_t nr_pe_cmp_show(struct device *dev,
58 			      struct device_attribute *attr,
59 			      char *buf)
60 {
61 	unsigned long val;
62 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63 
64 	val = drvdata->nr_pe_cmp;
65 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 }
67 static DEVICE_ATTR_RO(nr_pe_cmp);
68 
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)69 static ssize_t nr_addr_cmp_show(struct device *dev,
70 				struct device_attribute *attr,
71 				char *buf)
72 {
73 	unsigned long val;
74 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75 
76 	val = drvdata->nr_addr_cmp;
77 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 }
79 static DEVICE_ATTR_RO(nr_addr_cmp);
80 
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)81 static ssize_t nr_cntr_show(struct device *dev,
82 			    struct device_attribute *attr,
83 			    char *buf)
84 {
85 	unsigned long val;
86 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87 
88 	val = drvdata->nr_cntr;
89 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 }
91 static DEVICE_ATTR_RO(nr_cntr);
92 
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)93 static ssize_t nr_ext_inp_show(struct device *dev,
94 			       struct device_attribute *attr,
95 			       char *buf)
96 {
97 	unsigned long val;
98 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99 
100 	val = drvdata->nr_ext_inp;
101 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 }
103 static DEVICE_ATTR_RO(nr_ext_inp);
104 
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)105 static ssize_t numcidc_show(struct device *dev,
106 			    struct device_attribute *attr,
107 			    char *buf)
108 {
109 	unsigned long val;
110 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111 
112 	val = drvdata->numcidc;
113 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 }
115 static DEVICE_ATTR_RO(numcidc);
116 
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)117 static ssize_t numvmidc_show(struct device *dev,
118 			     struct device_attribute *attr,
119 			     char *buf)
120 {
121 	unsigned long val;
122 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123 
124 	val = drvdata->numvmidc;
125 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 }
127 static DEVICE_ATTR_RO(numvmidc);
128 
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)129 static ssize_t nrseqstate_show(struct device *dev,
130 			       struct device_attribute *attr,
131 			       char *buf)
132 {
133 	unsigned long val;
134 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135 
136 	val = drvdata->nrseqstate;
137 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 }
139 static DEVICE_ATTR_RO(nrseqstate);
140 
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)141 static ssize_t nr_resource_show(struct device *dev,
142 				struct device_attribute *attr,
143 				char *buf)
144 {
145 	unsigned long val;
146 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147 
148 	val = drvdata->nr_resource;
149 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 }
151 static DEVICE_ATTR_RO(nr_resource);
152 
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t nr_ss_cmp_show(struct device *dev,
154 			      struct device_attribute *attr,
155 			      char *buf)
156 {
157 	unsigned long val;
158 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 
160 	val = drvdata->nr_ss_cmp;
161 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 }
163 static DEVICE_ATTR_RO(nr_ss_cmp);
164 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)165 static ssize_t reset_store(struct device *dev,
166 			   struct device_attribute *attr,
167 			   const char *buf, size_t size)
168 {
169 	int i;
170 	unsigned long val;
171 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
172 	struct etmv4_config *config = &drvdata->config;
173 
174 	if (kstrtoul(buf, 16, &val))
175 		return -EINVAL;
176 
177 	spin_lock(&drvdata->spinlock);
178 	if (val)
179 		config->mode = 0x0;
180 
181 	/* Disable data tracing: do not trace load and store data transfers */
182 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
183 	config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
184 
185 	/* Disable data value and data address tracing */
186 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
187 			   ETM_MODE_DATA_TRACE_VAL);
188 	config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
189 
190 	/* Disable all events tracing */
191 	config->eventctrl0 = 0x0;
192 	config->eventctrl1 = 0x0;
193 
194 	/* Disable timestamp event */
195 	config->ts_ctrl = 0x0;
196 
197 	/* Disable stalling */
198 	config->stall_ctrl = 0x0;
199 
200 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
201 	if (drvdata->syncpr == false)
202 		config->syncfreq = 0x8;
203 
204 	/*
205 	 * Enable ViewInst to trace everything with start-stop logic in
206 	 * started state. ARM recommends start-stop logic is set before
207 	 * each trace run.
208 	 */
209 	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
210 	if (drvdata->nr_addr_cmp > 0) {
211 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
212 		/* SSSTATUS, bit[9] */
213 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
214 	}
215 
216 	/* No address range filtering for ViewInst */
217 	config->viiectlr = 0x0;
218 
219 	/* No start-stop filtering for ViewInst */
220 	config->vissctlr = 0x0;
221 	config->vipcssctlr = 0x0;
222 
223 	/* Disable seq events */
224 	for (i = 0; i < drvdata->nrseqstate-1; i++)
225 		config->seq_ctrl[i] = 0x0;
226 	config->seq_rst = 0x0;
227 	config->seq_state = 0x0;
228 
229 	/* Disable external input events */
230 	config->ext_inp = 0x0;
231 
232 	config->cntr_idx = 0x0;
233 	for (i = 0; i < drvdata->nr_cntr; i++) {
234 		config->cntrldvr[i] = 0x0;
235 		config->cntr_ctrl[i] = 0x0;
236 		config->cntr_val[i] = 0x0;
237 	}
238 
239 	config->res_idx = 0x0;
240 	for (i = 2; i < 2 * drvdata->nr_resource; i++)
241 		config->res_ctrl[i] = 0x0;
242 
243 	config->ss_idx = 0x0;
244 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
245 		config->ss_ctrl[i] = 0x0;
246 		config->ss_pe_cmp[i] = 0x0;
247 	}
248 
249 	config->addr_idx = 0x0;
250 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
251 		config->addr_val[i] = 0x0;
252 		config->addr_acc[i] = 0x0;
253 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
254 	}
255 
256 	config->ctxid_idx = 0x0;
257 	for (i = 0; i < drvdata->numcidc; i++)
258 		config->ctxid_pid[i] = 0x0;
259 
260 	config->ctxid_mask0 = 0x0;
261 	config->ctxid_mask1 = 0x0;
262 
263 	config->vmid_idx = 0x0;
264 	for (i = 0; i < drvdata->numvmidc; i++)
265 		config->vmid_val[i] = 0x0;
266 	config->vmid_mask0 = 0x0;
267 	config->vmid_mask1 = 0x0;
268 
269 	drvdata->trcid = drvdata->cpu + 1;
270 
271 	spin_unlock(&drvdata->spinlock);
272 
273 	cscfg_csdev_reset_feats(to_coresight_device(dev));
274 
275 	return size;
276 }
277 static DEVICE_ATTR_WO(reset);
278 
mode_show(struct device * dev,struct device_attribute * attr,char * buf)279 static ssize_t mode_show(struct device *dev,
280 			 struct device_attribute *attr,
281 			 char *buf)
282 {
283 	unsigned long val;
284 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
285 	struct etmv4_config *config = &drvdata->config;
286 
287 	val = config->mode;
288 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
289 }
290 
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)291 static ssize_t mode_store(struct device *dev,
292 			  struct device_attribute *attr,
293 			  const char *buf, size_t size)
294 {
295 	unsigned long val, mode;
296 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
297 	struct etmv4_config *config = &drvdata->config;
298 
299 	if (kstrtoul(buf, 16, &val))
300 		return -EINVAL;
301 
302 	spin_lock(&drvdata->spinlock);
303 	config->mode = val & ETMv4_MODE_ALL;
304 
305 	if (drvdata->instrp0 == true) {
306 		/* start by clearing instruction P0 field */
307 		config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
308 		if (config->mode & ETM_MODE_LOAD)
309 			/* 0b01 Trace load instructions as P0 instructions */
310 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
311 		if (config->mode & ETM_MODE_STORE)
312 			/* 0b10 Trace store instructions as P0 instructions */
313 			config->cfg  |= TRCCONFIGR_INSTP0_STORE;
314 		if (config->mode & ETM_MODE_LOAD_STORE)
315 			/*
316 			 * 0b11 Trace load and store instructions
317 			 * as P0 instructions
318 			 */
319 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
320 	}
321 
322 	/* bit[3], Branch broadcast mode */
323 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 		config->cfg |= TRCCONFIGR_BB;
325 	else
326 		config->cfg &= ~TRCCONFIGR_BB;
327 
328 	/* bit[4], Cycle counting instruction trace bit */
329 	if ((config->mode & ETMv4_MODE_CYCACC) &&
330 		(drvdata->trccci == true))
331 		config->cfg |= TRCCONFIGR_CCI;
332 	else
333 		config->cfg &= ~TRCCONFIGR_CCI;
334 
335 	/* bit[6], Context ID tracing bit */
336 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 		config->cfg |= TRCCONFIGR_CID;
338 	else
339 		config->cfg &= ~TRCCONFIGR_CID;
340 
341 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 		config->cfg |= TRCCONFIGR_VMID;
343 	else
344 		config->cfg &= ~TRCCONFIGR_VMID;
345 
346 	/* bits[10:8], Conditional instruction tracing bit */
347 	mode = ETM_MODE_COND(config->mode);
348 	if (drvdata->trccond == true) {
349 		config->cfg &= ~TRCCONFIGR_COND_MASK;
350 		config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
351 	}
352 
353 	/* bit[11], Global timestamp tracing bit */
354 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 		config->cfg |= TRCCONFIGR_TS;
356 	else
357 		config->cfg &= ~TRCCONFIGR_TS;
358 
359 	/* bit[12], Return stack enable bit */
360 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 					(drvdata->retstack == true))
362 		config->cfg |= TRCCONFIGR_RS;
363 	else
364 		config->cfg &= ~TRCCONFIGR_RS;
365 
366 	/* bits[14:13], Q element enable field */
367 	mode = ETM_MODE_QELEM(config->mode);
368 	/* start by clearing QE bits */
369 	config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
370 	/*
371 	 * if supported, Q elements with instruction counts are enabled.
372 	 * Always set the low bit for any requested mode. Valid combos are
373 	 * 0b00, 0b01 and 0b11.
374 	 */
375 	if (mode && drvdata->q_support)
376 		config->cfg |= TRCCONFIGR_QE_W_COUNTS;
377 	/*
378 	 * if supported, Q elements with and without instruction
379 	 * counts are enabled
380 	 */
381 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
382 		config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
383 
384 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
385 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
386 	    (drvdata->atbtrig == true))
387 		config->eventctrl1 |= TRCEVENTCTL1R_ATB;
388 	else
389 		config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
390 
391 	/* bit[12], Low-power state behavior override bit */
392 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
393 	    (drvdata->lpoverride == true))
394 		config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
395 	else
396 		config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
397 
398 	/* bit[8], Instruction stall bit */
399 	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
400 		config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
401 	else
402 		config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
403 
404 	/* bit[10], Prioritize instruction trace bit */
405 	if (config->mode & ETM_MODE_INSTPRIO)
406 		config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
407 	else
408 		config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
409 
410 	/* bit[13], Trace overflow prevention bit */
411 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
412 		(drvdata->nooverflow == true))
413 		config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
414 	else
415 		config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
416 
417 	/* bit[9] Start/stop logic control bit */
418 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
419 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
420 	else
421 		config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
422 
423 	/* bit[10], Whether a trace unit must trace a Reset exception */
424 	if (config->mode & ETM_MODE_TRACE_RESET)
425 		config->vinst_ctrl |= TRCVICTLR_TRCRESET;
426 	else
427 		config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
428 
429 	/* bit[11], Whether a trace unit must trace a system error exception */
430 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
431 		(drvdata->trc_error == true))
432 		config->vinst_ctrl |= TRCVICTLR_TRCERR;
433 	else
434 		config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
435 
436 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
437 		etm4_config_trace_mode(config);
438 
439 	spin_unlock(&drvdata->spinlock);
440 
441 	return size;
442 }
443 static DEVICE_ATTR_RW(mode);
444 
pe_show(struct device * dev,struct device_attribute * attr,char * buf)445 static ssize_t pe_show(struct device *dev,
446 		       struct device_attribute *attr,
447 		       char *buf)
448 {
449 	unsigned long val;
450 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
451 	struct etmv4_config *config = &drvdata->config;
452 
453 	val = config->pe_sel;
454 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
455 }
456 
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)457 static ssize_t pe_store(struct device *dev,
458 			struct device_attribute *attr,
459 			const char *buf, size_t size)
460 {
461 	unsigned long val;
462 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
463 	struct etmv4_config *config = &drvdata->config;
464 
465 	if (kstrtoul(buf, 16, &val))
466 		return -EINVAL;
467 
468 	spin_lock(&drvdata->spinlock);
469 	if (val > drvdata->nr_pe) {
470 		spin_unlock(&drvdata->spinlock);
471 		return -EINVAL;
472 	}
473 
474 	config->pe_sel = val;
475 	spin_unlock(&drvdata->spinlock);
476 	return size;
477 }
478 static DEVICE_ATTR_RW(pe);
479 
event_show(struct device * dev,struct device_attribute * attr,char * buf)480 static ssize_t event_show(struct device *dev,
481 			  struct device_attribute *attr,
482 			  char *buf)
483 {
484 	unsigned long val;
485 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
486 	struct etmv4_config *config = &drvdata->config;
487 
488 	val = config->eventctrl0;
489 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
490 }
491 
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)492 static ssize_t event_store(struct device *dev,
493 			   struct device_attribute *attr,
494 			   const char *buf, size_t size)
495 {
496 	unsigned long val;
497 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
498 	struct etmv4_config *config = &drvdata->config;
499 
500 	if (kstrtoul(buf, 16, &val))
501 		return -EINVAL;
502 
503 	spin_lock(&drvdata->spinlock);
504 	switch (drvdata->nr_event) {
505 	case 0x0:
506 		/* EVENT0, bits[7:0] */
507 		config->eventctrl0 = val & 0xFF;
508 		break;
509 	case 0x1:
510 		 /* EVENT1, bits[15:8] */
511 		config->eventctrl0 = val & 0xFFFF;
512 		break;
513 	case 0x2:
514 		/* EVENT2, bits[23:16] */
515 		config->eventctrl0 = val & 0xFFFFFF;
516 		break;
517 	case 0x3:
518 		/* EVENT3, bits[31:24] */
519 		config->eventctrl0 = val;
520 		break;
521 	default:
522 		break;
523 	}
524 	spin_unlock(&drvdata->spinlock);
525 	return size;
526 }
527 static DEVICE_ATTR_RW(event);
528 
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)529 static ssize_t event_instren_show(struct device *dev,
530 				  struct device_attribute *attr,
531 				  char *buf)
532 {
533 	unsigned long val;
534 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
535 	struct etmv4_config *config = &drvdata->config;
536 
537 	val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
538 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
539 }
540 
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)541 static ssize_t event_instren_store(struct device *dev,
542 				   struct device_attribute *attr,
543 				   const char *buf, size_t size)
544 {
545 	unsigned long val;
546 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
547 	struct etmv4_config *config = &drvdata->config;
548 
549 	if (kstrtoul(buf, 16, &val))
550 		return -EINVAL;
551 
552 	spin_lock(&drvdata->spinlock);
553 	/* start by clearing all instruction event enable bits */
554 	config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
555 	switch (drvdata->nr_event) {
556 	case 0x0:
557 		/* generate Event element for event 1 */
558 		config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
559 		break;
560 	case 0x1:
561 		/* generate Event element for event 1 and 2 */
562 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
563 		break;
564 	case 0x2:
565 		/* generate Event element for event 1, 2 and 3 */
566 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
567 					     TRCEVENTCTL1R_INSTEN_1 |
568 					     TRCEVENTCTL1R_INSTEN_2);
569 		break;
570 	case 0x3:
571 		/* generate Event element for all 4 events */
572 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
573 					     TRCEVENTCTL1R_INSTEN_1 |
574 					     TRCEVENTCTL1R_INSTEN_2 |
575 					     TRCEVENTCTL1R_INSTEN_3);
576 		break;
577 	default:
578 		break;
579 	}
580 	spin_unlock(&drvdata->spinlock);
581 	return size;
582 }
583 static DEVICE_ATTR_RW(event_instren);
584 
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)585 static ssize_t event_ts_show(struct device *dev,
586 			     struct device_attribute *attr,
587 			     char *buf)
588 {
589 	unsigned long val;
590 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 	struct etmv4_config *config = &drvdata->config;
592 
593 	val = config->ts_ctrl;
594 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
595 }
596 
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)597 static ssize_t event_ts_store(struct device *dev,
598 			      struct device_attribute *attr,
599 			      const char *buf, size_t size)
600 {
601 	unsigned long val;
602 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
603 	struct etmv4_config *config = &drvdata->config;
604 
605 	if (kstrtoul(buf, 16, &val))
606 		return -EINVAL;
607 	if (!drvdata->ts_size)
608 		return -EINVAL;
609 
610 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
611 	return size;
612 }
613 static DEVICE_ATTR_RW(event_ts);
614 
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)615 static ssize_t syncfreq_show(struct device *dev,
616 			     struct device_attribute *attr,
617 			     char *buf)
618 {
619 	unsigned long val;
620 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
621 	struct etmv4_config *config = &drvdata->config;
622 
623 	val = config->syncfreq;
624 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
625 }
626 
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)627 static ssize_t syncfreq_store(struct device *dev,
628 			      struct device_attribute *attr,
629 			      const char *buf, size_t size)
630 {
631 	unsigned long val;
632 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
633 	struct etmv4_config *config = &drvdata->config;
634 
635 	if (kstrtoul(buf, 16, &val))
636 		return -EINVAL;
637 	if (drvdata->syncpr == true)
638 		return -EINVAL;
639 
640 	config->syncfreq = val & ETMv4_SYNC_MASK;
641 	return size;
642 }
643 static DEVICE_ATTR_RW(syncfreq);
644 
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)645 static ssize_t cyc_threshold_show(struct device *dev,
646 				  struct device_attribute *attr,
647 				  char *buf)
648 {
649 	unsigned long val;
650 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 	struct etmv4_config *config = &drvdata->config;
652 
653 	val = config->ccctlr;
654 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
655 }
656 
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)657 static ssize_t cyc_threshold_store(struct device *dev,
658 				   struct device_attribute *attr,
659 				   const char *buf, size_t size)
660 {
661 	unsigned long val;
662 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
663 	struct etmv4_config *config = &drvdata->config;
664 
665 	if (kstrtoul(buf, 16, &val))
666 		return -EINVAL;
667 
668 	/* mask off max threshold before checking min value */
669 	val &= ETM_CYC_THRESHOLD_MASK;
670 	if (val < drvdata->ccitmin)
671 		return -EINVAL;
672 
673 	config->ccctlr = val;
674 	return size;
675 }
676 static DEVICE_ATTR_RW(cyc_threshold);
677 
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)678 static ssize_t bb_ctrl_show(struct device *dev,
679 			    struct device_attribute *attr,
680 			    char *buf)
681 {
682 	unsigned long val;
683 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 	struct etmv4_config *config = &drvdata->config;
685 
686 	val = config->bb_ctrl;
687 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
688 }
689 
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)690 static ssize_t bb_ctrl_store(struct device *dev,
691 			     struct device_attribute *attr,
692 			     const char *buf, size_t size)
693 {
694 	unsigned long val;
695 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
696 	struct etmv4_config *config = &drvdata->config;
697 
698 	if (kstrtoul(buf, 16, &val))
699 		return -EINVAL;
700 	if (drvdata->trcbb == false)
701 		return -EINVAL;
702 	if (!drvdata->nr_addr_cmp)
703 		return -EINVAL;
704 
705 	/*
706 	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
707 	 * individual range comparators. If include then at least 1
708 	 * range must be selected.
709 	 */
710 	if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
711 		return -EINVAL;
712 
713 	config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
714 	return size;
715 }
716 static DEVICE_ATTR_RW(bb_ctrl);
717 
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)718 static ssize_t event_vinst_show(struct device *dev,
719 				struct device_attribute *attr,
720 				char *buf)
721 {
722 	unsigned long val;
723 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
724 	struct etmv4_config *config = &drvdata->config;
725 
726 	val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
727 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
728 }
729 
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)730 static ssize_t event_vinst_store(struct device *dev,
731 				 struct device_attribute *attr,
732 				 const char *buf, size_t size)
733 {
734 	unsigned long val;
735 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
736 	struct etmv4_config *config = &drvdata->config;
737 
738 	if (kstrtoul(buf, 16, &val))
739 		return -EINVAL;
740 
741 	spin_lock(&drvdata->spinlock);
742 	val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
743 	config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
744 	config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
745 	spin_unlock(&drvdata->spinlock);
746 	return size;
747 }
748 static DEVICE_ATTR_RW(event_vinst);
749 
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)750 static ssize_t s_exlevel_vinst_show(struct device *dev,
751 				    struct device_attribute *attr,
752 				    char *buf)
753 {
754 	unsigned long val;
755 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
756 	struct etmv4_config *config = &drvdata->config;
757 
758 	val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
759 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
760 }
761 
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)762 static ssize_t s_exlevel_vinst_store(struct device *dev,
763 				     struct device_attribute *attr,
764 				     const char *buf, size_t size)
765 {
766 	unsigned long val;
767 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
768 	struct etmv4_config *config = &drvdata->config;
769 
770 	if (kstrtoul(buf, 16, &val))
771 		return -EINVAL;
772 
773 	spin_lock(&drvdata->spinlock);
774 	/* clear all EXLEVEL_S bits  */
775 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
776 	/* enable instruction tracing for corresponding exception level */
777 	val &= drvdata->s_ex_level;
778 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
779 	spin_unlock(&drvdata->spinlock);
780 	return size;
781 }
782 static DEVICE_ATTR_RW(s_exlevel_vinst);
783 
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)784 static ssize_t ns_exlevel_vinst_show(struct device *dev,
785 				     struct device_attribute *attr,
786 				     char *buf)
787 {
788 	unsigned long val;
789 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
790 	struct etmv4_config *config = &drvdata->config;
791 
792 	/* EXLEVEL_NS, bits[23:20] */
793 	val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
794 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
795 }
796 
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)797 static ssize_t ns_exlevel_vinst_store(struct device *dev,
798 				      struct device_attribute *attr,
799 				      const char *buf, size_t size)
800 {
801 	unsigned long val;
802 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
803 	struct etmv4_config *config = &drvdata->config;
804 
805 	if (kstrtoul(buf, 16, &val))
806 		return -EINVAL;
807 
808 	spin_lock(&drvdata->spinlock);
809 	/* clear EXLEVEL_NS bits  */
810 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
811 	/* enable instruction tracing for corresponding exception level */
812 	val &= drvdata->ns_ex_level;
813 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
814 	spin_unlock(&drvdata->spinlock);
815 	return size;
816 }
817 static DEVICE_ATTR_RW(ns_exlevel_vinst);
818 
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)819 static ssize_t addr_idx_show(struct device *dev,
820 			     struct device_attribute *attr,
821 			     char *buf)
822 {
823 	unsigned long val;
824 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
825 	struct etmv4_config *config = &drvdata->config;
826 
827 	val = config->addr_idx;
828 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
829 }
830 
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)831 static ssize_t addr_idx_store(struct device *dev,
832 			      struct device_attribute *attr,
833 			      const char *buf, size_t size)
834 {
835 	unsigned long val;
836 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
837 	struct etmv4_config *config = &drvdata->config;
838 
839 	if (kstrtoul(buf, 16, &val))
840 		return -EINVAL;
841 	if (val >= drvdata->nr_addr_cmp * 2)
842 		return -EINVAL;
843 
844 	/*
845 	 * Use spinlock to ensure index doesn't change while it gets
846 	 * dereferenced multiple times within a spinlock block elsewhere.
847 	 */
848 	spin_lock(&drvdata->spinlock);
849 	config->addr_idx = val;
850 	spin_unlock(&drvdata->spinlock);
851 	return size;
852 }
853 static DEVICE_ATTR_RW(addr_idx);
854 
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)855 static ssize_t addr_instdatatype_show(struct device *dev,
856 				      struct device_attribute *attr,
857 				      char *buf)
858 {
859 	ssize_t len;
860 	u8 val, idx;
861 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
862 	struct etmv4_config *config = &drvdata->config;
863 
864 	spin_lock(&drvdata->spinlock);
865 	idx = config->addr_idx;
866 	val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
867 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
868 			val == TRCACATRn_TYPE_ADDR ? "instr" :
869 			(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
870 			(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
871 			"data_load_store")));
872 	spin_unlock(&drvdata->spinlock);
873 	return len;
874 }
875 
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)876 static ssize_t addr_instdatatype_store(struct device *dev,
877 				       struct device_attribute *attr,
878 				       const char *buf, size_t size)
879 {
880 	u8 idx;
881 	char str[20] = "";
882 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
883 	struct etmv4_config *config = &drvdata->config;
884 
885 	if (strlen(buf) >= 20)
886 		return -EINVAL;
887 	if (sscanf(buf, "%s", str) != 1)
888 		return -EINVAL;
889 
890 	spin_lock(&drvdata->spinlock);
891 	idx = config->addr_idx;
892 	if (!strcmp(str, "instr"))
893 		/* TYPE, bits[1:0] */
894 		config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
895 
896 	spin_unlock(&drvdata->spinlock);
897 	return size;
898 }
899 static DEVICE_ATTR_RW(addr_instdatatype);
900 
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)901 static ssize_t addr_single_show(struct device *dev,
902 				struct device_attribute *attr,
903 				char *buf)
904 {
905 	u8 idx;
906 	unsigned long val;
907 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
908 	struct etmv4_config *config = &drvdata->config;
909 
910 	idx = config->addr_idx;
911 	spin_lock(&drvdata->spinlock);
912 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
913 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
914 		spin_unlock(&drvdata->spinlock);
915 		return -EPERM;
916 	}
917 	val = (unsigned long)config->addr_val[idx];
918 	spin_unlock(&drvdata->spinlock);
919 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
920 }
921 
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)922 static ssize_t addr_single_store(struct device *dev,
923 				 struct device_attribute *attr,
924 				 const char *buf, size_t size)
925 {
926 	u8 idx;
927 	unsigned long val;
928 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
929 	struct etmv4_config *config = &drvdata->config;
930 
931 	if (kstrtoul(buf, 16, &val))
932 		return -EINVAL;
933 
934 	spin_lock(&drvdata->spinlock);
935 	idx = config->addr_idx;
936 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
937 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
938 		spin_unlock(&drvdata->spinlock);
939 		return -EPERM;
940 	}
941 
942 	config->addr_val[idx] = (u64)val;
943 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
944 	spin_unlock(&drvdata->spinlock);
945 	return size;
946 }
947 static DEVICE_ATTR_RW(addr_single);
948 
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)949 static ssize_t addr_range_show(struct device *dev,
950 			       struct device_attribute *attr,
951 			       char *buf)
952 {
953 	u8 idx;
954 	unsigned long val1, val2;
955 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
956 	struct etmv4_config *config = &drvdata->config;
957 
958 	spin_lock(&drvdata->spinlock);
959 	idx = config->addr_idx;
960 	if (idx % 2 != 0) {
961 		spin_unlock(&drvdata->spinlock);
962 		return -EPERM;
963 	}
964 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
965 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
966 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
967 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
968 		spin_unlock(&drvdata->spinlock);
969 		return -EPERM;
970 	}
971 
972 	val1 = (unsigned long)config->addr_val[idx];
973 	val2 = (unsigned long)config->addr_val[idx + 1];
974 	spin_unlock(&drvdata->spinlock);
975 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
976 }
977 
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)978 static ssize_t addr_range_store(struct device *dev,
979 				struct device_attribute *attr,
980 				const char *buf, size_t size)
981 {
982 	u8 idx;
983 	unsigned long val1, val2;
984 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
985 	struct etmv4_config *config = &drvdata->config;
986 	int elements, exclude;
987 
988 	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
989 
990 	/*  exclude is optional, but need at least two parameter */
991 	if (elements < 2)
992 		return -EINVAL;
993 	/* lower address comparator cannot have a higher address value */
994 	if (val1 > val2)
995 		return -EINVAL;
996 
997 	spin_lock(&drvdata->spinlock);
998 	idx = config->addr_idx;
999 	if (idx % 2 != 0) {
1000 		spin_unlock(&drvdata->spinlock);
1001 		return -EPERM;
1002 	}
1003 
1004 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1005 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1006 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1007 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1008 		spin_unlock(&drvdata->spinlock);
1009 		return -EPERM;
1010 	}
1011 
1012 	config->addr_val[idx] = (u64)val1;
1013 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1014 	config->addr_val[idx + 1] = (u64)val2;
1015 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1016 	/*
1017 	 * Program include or exclude control bits for vinst or vdata
1018 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1019 	 * use supplied value, or default to bit set in 'mode'
1020 	 */
1021 	if (elements != 3)
1022 		exclude = config->mode & ETM_MODE_EXCLUDE;
1023 	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1024 
1025 	spin_unlock(&drvdata->spinlock);
1026 	return size;
1027 }
1028 static DEVICE_ATTR_RW(addr_range);
1029 
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1030 static ssize_t addr_start_show(struct device *dev,
1031 			       struct device_attribute *attr,
1032 			       char *buf)
1033 {
1034 	u8 idx;
1035 	unsigned long val;
1036 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037 	struct etmv4_config *config = &drvdata->config;
1038 
1039 	spin_lock(&drvdata->spinlock);
1040 	idx = config->addr_idx;
1041 
1042 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1043 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1044 		spin_unlock(&drvdata->spinlock);
1045 		return -EPERM;
1046 	}
1047 
1048 	val = (unsigned long)config->addr_val[idx];
1049 	spin_unlock(&drvdata->spinlock);
1050 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1051 }
1052 
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1053 static ssize_t addr_start_store(struct device *dev,
1054 				struct device_attribute *attr,
1055 				const char *buf, size_t size)
1056 {
1057 	u8 idx;
1058 	unsigned long val;
1059 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1060 	struct etmv4_config *config = &drvdata->config;
1061 
1062 	if (kstrtoul(buf, 16, &val))
1063 		return -EINVAL;
1064 
1065 	spin_lock(&drvdata->spinlock);
1066 	idx = config->addr_idx;
1067 	if (!drvdata->nr_addr_cmp) {
1068 		spin_unlock(&drvdata->spinlock);
1069 		return -EINVAL;
1070 	}
1071 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1072 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1073 		spin_unlock(&drvdata->spinlock);
1074 		return -EPERM;
1075 	}
1076 
1077 	config->addr_val[idx] = (u64)val;
1078 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1079 	config->vissctlr |= BIT(idx);
1080 	spin_unlock(&drvdata->spinlock);
1081 	return size;
1082 }
1083 static DEVICE_ATTR_RW(addr_start);
1084 
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1085 static ssize_t addr_stop_show(struct device *dev,
1086 			      struct device_attribute *attr,
1087 			      char *buf)
1088 {
1089 	u8 idx;
1090 	unsigned long val;
1091 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1092 	struct etmv4_config *config = &drvdata->config;
1093 
1094 	spin_lock(&drvdata->spinlock);
1095 	idx = config->addr_idx;
1096 
1097 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1098 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1099 		spin_unlock(&drvdata->spinlock);
1100 		return -EPERM;
1101 	}
1102 
1103 	val = (unsigned long)config->addr_val[idx];
1104 	spin_unlock(&drvdata->spinlock);
1105 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1106 }
1107 
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1108 static ssize_t addr_stop_store(struct device *dev,
1109 			       struct device_attribute *attr,
1110 			       const char *buf, size_t size)
1111 {
1112 	u8 idx;
1113 	unsigned long val;
1114 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1115 	struct etmv4_config *config = &drvdata->config;
1116 
1117 	if (kstrtoul(buf, 16, &val))
1118 		return -EINVAL;
1119 
1120 	spin_lock(&drvdata->spinlock);
1121 	idx = config->addr_idx;
1122 	if (!drvdata->nr_addr_cmp) {
1123 		spin_unlock(&drvdata->spinlock);
1124 		return -EINVAL;
1125 	}
1126 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1127 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1128 		spin_unlock(&drvdata->spinlock);
1129 		return -EPERM;
1130 	}
1131 
1132 	config->addr_val[idx] = (u64)val;
1133 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1134 	config->vissctlr |= BIT(idx + 16);
1135 	spin_unlock(&drvdata->spinlock);
1136 	return size;
1137 }
1138 static DEVICE_ATTR_RW(addr_stop);
1139 
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1140 static ssize_t addr_ctxtype_show(struct device *dev,
1141 				 struct device_attribute *attr,
1142 				 char *buf)
1143 {
1144 	ssize_t len;
1145 	u8 idx, val;
1146 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1147 	struct etmv4_config *config = &drvdata->config;
1148 
1149 	spin_lock(&drvdata->spinlock);
1150 	idx = config->addr_idx;
1151 	/* CONTEXTTYPE, bits[3:2] */
1152 	val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1153 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1154 			(val == ETM_CTX_CTXID ? "ctxid" :
1155 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1156 	spin_unlock(&drvdata->spinlock);
1157 	return len;
1158 }
1159 
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1160 static ssize_t addr_ctxtype_store(struct device *dev,
1161 				  struct device_attribute *attr,
1162 				  const char *buf, size_t size)
1163 {
1164 	u8 idx;
1165 	char str[10] = "";
1166 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1167 	struct etmv4_config *config = &drvdata->config;
1168 
1169 	if (strlen(buf) >= 10)
1170 		return -EINVAL;
1171 	if (sscanf(buf, "%s", str) != 1)
1172 		return -EINVAL;
1173 
1174 	spin_lock(&drvdata->spinlock);
1175 	idx = config->addr_idx;
1176 	if (!strcmp(str, "none"))
1177 		/* start by clearing context type bits */
1178 		config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1179 	else if (!strcmp(str, "ctxid")) {
1180 		/* 0b01 The trace unit performs a Context ID */
1181 		if (drvdata->numcidc) {
1182 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1183 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1184 		}
1185 	} else if (!strcmp(str, "vmid")) {
1186 		/* 0b10 The trace unit performs a VMID */
1187 		if (drvdata->numvmidc) {
1188 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1189 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1190 		}
1191 	} else if (!strcmp(str, "all")) {
1192 		/*
1193 		 * 0b11 The trace unit performs a Context ID
1194 		 * comparison and a VMID
1195 		 */
1196 		if (drvdata->numcidc)
1197 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1198 		if (drvdata->numvmidc)
1199 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1200 	}
1201 	spin_unlock(&drvdata->spinlock);
1202 	return size;
1203 }
1204 static DEVICE_ATTR_RW(addr_ctxtype);
1205 
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1206 static ssize_t addr_context_show(struct device *dev,
1207 				 struct device_attribute *attr,
1208 				 char *buf)
1209 {
1210 	u8 idx;
1211 	unsigned long val;
1212 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1213 	struct etmv4_config *config = &drvdata->config;
1214 
1215 	spin_lock(&drvdata->spinlock);
1216 	idx = config->addr_idx;
1217 	/* context ID comparator bits[6:4] */
1218 	val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1219 	spin_unlock(&drvdata->spinlock);
1220 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1221 }
1222 
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1223 static ssize_t addr_context_store(struct device *dev,
1224 				  struct device_attribute *attr,
1225 				  const char *buf, size_t size)
1226 {
1227 	u8 idx;
1228 	unsigned long val;
1229 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1230 	struct etmv4_config *config = &drvdata->config;
1231 
1232 	if (kstrtoul(buf, 16, &val))
1233 		return -EINVAL;
1234 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1235 		return -EINVAL;
1236 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1237 		     drvdata->numcidc : drvdata->numvmidc))
1238 		return -EINVAL;
1239 
1240 	spin_lock(&drvdata->spinlock);
1241 	idx = config->addr_idx;
1242 	/* clear context ID comparator bits[6:4] */
1243 	config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1244 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1245 	spin_unlock(&drvdata->spinlock);
1246 	return size;
1247 }
1248 static DEVICE_ATTR_RW(addr_context);
1249 
addr_exlevel_s_ns_show(struct device * dev,struct device_attribute * attr,char * buf)1250 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1251 				      struct device_attribute *attr,
1252 				      char *buf)
1253 {
1254 	u8 idx;
1255 	unsigned long val;
1256 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1257 	struct etmv4_config *config = &drvdata->config;
1258 
1259 	spin_lock(&drvdata->spinlock);
1260 	idx = config->addr_idx;
1261 	val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1262 	spin_unlock(&drvdata->spinlock);
1263 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1264 }
1265 
addr_exlevel_s_ns_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1266 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1267 				       struct device_attribute *attr,
1268 				       const char *buf, size_t size)
1269 {
1270 	u8 idx;
1271 	unsigned long val;
1272 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273 	struct etmv4_config *config = &drvdata->config;
1274 
1275 	if (kstrtoul(buf, 0, &val))
1276 		return -EINVAL;
1277 
1278 	if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1279 		return -EINVAL;
1280 
1281 	spin_lock(&drvdata->spinlock);
1282 	idx = config->addr_idx;
1283 	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1284 	config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1285 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1286 	spin_unlock(&drvdata->spinlock);
1287 	return size;
1288 }
1289 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1290 
1291 static const char * const addr_type_names[] = {
1292 	"unused",
1293 	"single",
1294 	"range",
1295 	"start",
1296 	"stop"
1297 };
1298 
addr_cmp_view_show(struct device * dev,struct device_attribute * attr,char * buf)1299 static ssize_t addr_cmp_view_show(struct device *dev,
1300 				  struct device_attribute *attr, char *buf)
1301 {
1302 	u8 idx, addr_type;
1303 	unsigned long addr_v, addr_v2, addr_ctrl;
1304 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1305 	struct etmv4_config *config = &drvdata->config;
1306 	int size = 0;
1307 	bool exclude = false;
1308 
1309 	spin_lock(&drvdata->spinlock);
1310 	idx = config->addr_idx;
1311 	addr_v = config->addr_val[idx];
1312 	addr_ctrl = config->addr_acc[idx];
1313 	addr_type = config->addr_type[idx];
1314 	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1315 		if (idx & 0x1) {
1316 			idx -= 1;
1317 			addr_v2 = addr_v;
1318 			addr_v = config->addr_val[idx];
1319 		} else {
1320 			addr_v2 = config->addr_val[idx + 1];
1321 		}
1322 		exclude = config->viiectlr & BIT(idx / 2 + 16);
1323 	}
1324 	spin_unlock(&drvdata->spinlock);
1325 	if (addr_type) {
1326 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1327 				 addr_type_names[addr_type], addr_v);
1328 		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1329 			size += scnprintf(buf + size, PAGE_SIZE - size,
1330 					  " %#lx %s", addr_v2,
1331 					  exclude ? "exclude" : "include");
1332 		}
1333 		size += scnprintf(buf + size, PAGE_SIZE - size,
1334 				  " ctrl(%#lx)\n", addr_ctrl);
1335 	} else {
1336 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1337 	}
1338 	return size;
1339 }
1340 static DEVICE_ATTR_RO(addr_cmp_view);
1341 
vinst_pe_cmp_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1342 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1343 					    struct device_attribute *attr,
1344 					    char *buf)
1345 {
1346 	unsigned long val;
1347 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1348 	struct etmv4_config *config = &drvdata->config;
1349 
1350 	if (!drvdata->nr_pe_cmp)
1351 		return -EINVAL;
1352 	val = config->vipcssctlr;
1353 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1354 }
vinst_pe_cmp_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1355 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1356 					     struct device_attribute *attr,
1357 					     const char *buf, size_t size)
1358 {
1359 	unsigned long val;
1360 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1361 	struct etmv4_config *config = &drvdata->config;
1362 
1363 	if (kstrtoul(buf, 16, &val))
1364 		return -EINVAL;
1365 	if (!drvdata->nr_pe_cmp)
1366 		return -EINVAL;
1367 
1368 	spin_lock(&drvdata->spinlock);
1369 	config->vipcssctlr = val;
1370 	spin_unlock(&drvdata->spinlock);
1371 	return size;
1372 }
1373 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1374 
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1375 static ssize_t seq_idx_show(struct device *dev,
1376 			    struct device_attribute *attr,
1377 			    char *buf)
1378 {
1379 	unsigned long val;
1380 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381 	struct etmv4_config *config = &drvdata->config;
1382 
1383 	val = config->seq_idx;
1384 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1385 }
1386 
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1387 static ssize_t seq_idx_store(struct device *dev,
1388 			     struct device_attribute *attr,
1389 			     const char *buf, size_t size)
1390 {
1391 	unsigned long val;
1392 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1393 	struct etmv4_config *config = &drvdata->config;
1394 
1395 	if (kstrtoul(buf, 16, &val))
1396 		return -EINVAL;
1397 	if (val >= drvdata->nrseqstate - 1)
1398 		return -EINVAL;
1399 
1400 	/*
1401 	 * Use spinlock to ensure index doesn't change while it gets
1402 	 * dereferenced multiple times within a spinlock block elsewhere.
1403 	 */
1404 	spin_lock(&drvdata->spinlock);
1405 	config->seq_idx = val;
1406 	spin_unlock(&drvdata->spinlock);
1407 	return size;
1408 }
1409 static DEVICE_ATTR_RW(seq_idx);
1410 
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1411 static ssize_t seq_state_show(struct device *dev,
1412 			      struct device_attribute *attr,
1413 			      char *buf)
1414 {
1415 	unsigned long val;
1416 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417 	struct etmv4_config *config = &drvdata->config;
1418 
1419 	val = config->seq_state;
1420 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1421 }
1422 
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1423 static ssize_t seq_state_store(struct device *dev,
1424 			       struct device_attribute *attr,
1425 			       const char *buf, size_t size)
1426 {
1427 	unsigned long val;
1428 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429 	struct etmv4_config *config = &drvdata->config;
1430 
1431 	if (kstrtoul(buf, 16, &val))
1432 		return -EINVAL;
1433 	if (val >= drvdata->nrseqstate)
1434 		return -EINVAL;
1435 
1436 	config->seq_state = val;
1437 	return size;
1438 }
1439 static DEVICE_ATTR_RW(seq_state);
1440 
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1441 static ssize_t seq_event_show(struct device *dev,
1442 			      struct device_attribute *attr,
1443 			      char *buf)
1444 {
1445 	u8 idx;
1446 	unsigned long val;
1447 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1448 	struct etmv4_config *config = &drvdata->config;
1449 
1450 	spin_lock(&drvdata->spinlock);
1451 	idx = config->seq_idx;
1452 	val = config->seq_ctrl[idx];
1453 	spin_unlock(&drvdata->spinlock);
1454 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1455 }
1456 
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1457 static ssize_t seq_event_store(struct device *dev,
1458 			       struct device_attribute *attr,
1459 			       const char *buf, size_t size)
1460 {
1461 	u8 idx;
1462 	unsigned long val;
1463 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1464 	struct etmv4_config *config = &drvdata->config;
1465 
1466 	if (kstrtoul(buf, 16, &val))
1467 		return -EINVAL;
1468 
1469 	spin_lock(&drvdata->spinlock);
1470 	idx = config->seq_idx;
1471 	/* Seq control has two masks B[15:8] F[7:0] */
1472 	config->seq_ctrl[idx] = val & 0xFFFF;
1473 	spin_unlock(&drvdata->spinlock);
1474 	return size;
1475 }
1476 static DEVICE_ATTR_RW(seq_event);
1477 
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1478 static ssize_t seq_reset_event_show(struct device *dev,
1479 				    struct device_attribute *attr,
1480 				    char *buf)
1481 {
1482 	unsigned long val;
1483 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1484 	struct etmv4_config *config = &drvdata->config;
1485 
1486 	val = config->seq_rst;
1487 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1488 }
1489 
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1490 static ssize_t seq_reset_event_store(struct device *dev,
1491 				     struct device_attribute *attr,
1492 				     const char *buf, size_t size)
1493 {
1494 	unsigned long val;
1495 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1496 	struct etmv4_config *config = &drvdata->config;
1497 
1498 	if (kstrtoul(buf, 16, &val))
1499 		return -EINVAL;
1500 	if (!(drvdata->nrseqstate))
1501 		return -EINVAL;
1502 
1503 	config->seq_rst = val & ETMv4_EVENT_MASK;
1504 	return size;
1505 }
1506 static DEVICE_ATTR_RW(seq_reset_event);
1507 
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1508 static ssize_t cntr_idx_show(struct device *dev,
1509 			     struct device_attribute *attr,
1510 			     char *buf)
1511 {
1512 	unsigned long val;
1513 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1514 	struct etmv4_config *config = &drvdata->config;
1515 
1516 	val = config->cntr_idx;
1517 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1518 }
1519 
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1520 static ssize_t cntr_idx_store(struct device *dev,
1521 			      struct device_attribute *attr,
1522 			      const char *buf, size_t size)
1523 {
1524 	unsigned long val;
1525 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1526 	struct etmv4_config *config = &drvdata->config;
1527 
1528 	if (kstrtoul(buf, 16, &val))
1529 		return -EINVAL;
1530 	if (val >= drvdata->nr_cntr)
1531 		return -EINVAL;
1532 
1533 	/*
1534 	 * Use spinlock to ensure index doesn't change while it gets
1535 	 * dereferenced multiple times within a spinlock block elsewhere.
1536 	 */
1537 	spin_lock(&drvdata->spinlock);
1538 	config->cntr_idx = val;
1539 	spin_unlock(&drvdata->spinlock);
1540 	return size;
1541 }
1542 static DEVICE_ATTR_RW(cntr_idx);
1543 
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1544 static ssize_t cntrldvr_show(struct device *dev,
1545 			     struct device_attribute *attr,
1546 			     char *buf)
1547 {
1548 	u8 idx;
1549 	unsigned long val;
1550 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1551 	struct etmv4_config *config = &drvdata->config;
1552 
1553 	spin_lock(&drvdata->spinlock);
1554 	idx = config->cntr_idx;
1555 	val = config->cntrldvr[idx];
1556 	spin_unlock(&drvdata->spinlock);
1557 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1558 }
1559 
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1560 static ssize_t cntrldvr_store(struct device *dev,
1561 			      struct device_attribute *attr,
1562 			      const char *buf, size_t size)
1563 {
1564 	u8 idx;
1565 	unsigned long val;
1566 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1567 	struct etmv4_config *config = &drvdata->config;
1568 
1569 	if (kstrtoul(buf, 16, &val))
1570 		return -EINVAL;
1571 	if (val > ETM_CNTR_MAX_VAL)
1572 		return -EINVAL;
1573 
1574 	spin_lock(&drvdata->spinlock);
1575 	idx = config->cntr_idx;
1576 	config->cntrldvr[idx] = val;
1577 	spin_unlock(&drvdata->spinlock);
1578 	return size;
1579 }
1580 static DEVICE_ATTR_RW(cntrldvr);
1581 
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1582 static ssize_t cntr_val_show(struct device *dev,
1583 			     struct device_attribute *attr,
1584 			     char *buf)
1585 {
1586 	u8 idx;
1587 	unsigned long val;
1588 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589 	struct etmv4_config *config = &drvdata->config;
1590 
1591 	spin_lock(&drvdata->spinlock);
1592 	idx = config->cntr_idx;
1593 	val = config->cntr_val[idx];
1594 	spin_unlock(&drvdata->spinlock);
1595 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1596 }
1597 
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1598 static ssize_t cntr_val_store(struct device *dev,
1599 			      struct device_attribute *attr,
1600 			      const char *buf, size_t size)
1601 {
1602 	u8 idx;
1603 	unsigned long val;
1604 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1605 	struct etmv4_config *config = &drvdata->config;
1606 
1607 	if (kstrtoul(buf, 16, &val))
1608 		return -EINVAL;
1609 	if (val > ETM_CNTR_MAX_VAL)
1610 		return -EINVAL;
1611 
1612 	spin_lock(&drvdata->spinlock);
1613 	idx = config->cntr_idx;
1614 	config->cntr_val[idx] = val;
1615 	spin_unlock(&drvdata->spinlock);
1616 	return size;
1617 }
1618 static DEVICE_ATTR_RW(cntr_val);
1619 
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1620 static ssize_t cntr_ctrl_show(struct device *dev,
1621 			      struct device_attribute *attr,
1622 			      char *buf)
1623 {
1624 	u8 idx;
1625 	unsigned long val;
1626 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1627 	struct etmv4_config *config = &drvdata->config;
1628 
1629 	spin_lock(&drvdata->spinlock);
1630 	idx = config->cntr_idx;
1631 	val = config->cntr_ctrl[idx];
1632 	spin_unlock(&drvdata->spinlock);
1633 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1634 }
1635 
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1636 static ssize_t cntr_ctrl_store(struct device *dev,
1637 			       struct device_attribute *attr,
1638 			       const char *buf, size_t size)
1639 {
1640 	u8 idx;
1641 	unsigned long val;
1642 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1643 	struct etmv4_config *config = &drvdata->config;
1644 
1645 	if (kstrtoul(buf, 16, &val))
1646 		return -EINVAL;
1647 
1648 	spin_lock(&drvdata->spinlock);
1649 	idx = config->cntr_idx;
1650 	config->cntr_ctrl[idx] = val;
1651 	spin_unlock(&drvdata->spinlock);
1652 	return size;
1653 }
1654 static DEVICE_ATTR_RW(cntr_ctrl);
1655 
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1656 static ssize_t res_idx_show(struct device *dev,
1657 			    struct device_attribute *attr,
1658 			    char *buf)
1659 {
1660 	unsigned long val;
1661 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1662 	struct etmv4_config *config = &drvdata->config;
1663 
1664 	val = config->res_idx;
1665 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1666 }
1667 
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1668 static ssize_t res_idx_store(struct device *dev,
1669 			     struct device_attribute *attr,
1670 			     const char *buf, size_t size)
1671 {
1672 	unsigned long val;
1673 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1674 	struct etmv4_config *config = &drvdata->config;
1675 
1676 	if (kstrtoul(buf, 16, &val))
1677 		return -EINVAL;
1678 	/*
1679 	 * Resource selector pair 0 is always implemented and reserved,
1680 	 * namely an idx with 0 and 1 is illegal.
1681 	 */
1682 	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1683 		return -EINVAL;
1684 
1685 	/*
1686 	 * Use spinlock to ensure index doesn't change while it gets
1687 	 * dereferenced multiple times within a spinlock block elsewhere.
1688 	 */
1689 	spin_lock(&drvdata->spinlock);
1690 	config->res_idx = val;
1691 	spin_unlock(&drvdata->spinlock);
1692 	return size;
1693 }
1694 static DEVICE_ATTR_RW(res_idx);
1695 
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1696 static ssize_t res_ctrl_show(struct device *dev,
1697 			     struct device_attribute *attr,
1698 			     char *buf)
1699 {
1700 	u8 idx;
1701 	unsigned long val;
1702 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1703 	struct etmv4_config *config = &drvdata->config;
1704 
1705 	spin_lock(&drvdata->spinlock);
1706 	idx = config->res_idx;
1707 	val = config->res_ctrl[idx];
1708 	spin_unlock(&drvdata->spinlock);
1709 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1710 }
1711 
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1712 static ssize_t res_ctrl_store(struct device *dev,
1713 			      struct device_attribute *attr,
1714 			      const char *buf, size_t size)
1715 {
1716 	u8 idx;
1717 	unsigned long val;
1718 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1719 	struct etmv4_config *config = &drvdata->config;
1720 
1721 	if (kstrtoul(buf, 16, &val))
1722 		return -EINVAL;
1723 
1724 	spin_lock(&drvdata->spinlock);
1725 	idx = config->res_idx;
1726 	/* For odd idx pair inversal bit is RES0 */
1727 	if (idx % 2 != 0)
1728 		/* PAIRINV, bit[21] */
1729 		val &= ~TRCRSCTLRn_PAIRINV;
1730 	config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1731 				       TRCRSCTLRn_INV |
1732 				       TRCRSCTLRn_GROUP_MASK |
1733 				       TRCRSCTLRn_SELECT_MASK);
1734 	spin_unlock(&drvdata->spinlock);
1735 	return size;
1736 }
1737 static DEVICE_ATTR_RW(res_ctrl);
1738 
sshot_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1739 static ssize_t sshot_idx_show(struct device *dev,
1740 			      struct device_attribute *attr, char *buf)
1741 {
1742 	unsigned long val;
1743 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1744 	struct etmv4_config *config = &drvdata->config;
1745 
1746 	val = config->ss_idx;
1747 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1748 }
1749 
sshot_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1750 static ssize_t sshot_idx_store(struct device *dev,
1751 			       struct device_attribute *attr,
1752 			       const char *buf, size_t size)
1753 {
1754 	unsigned long val;
1755 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1756 	struct etmv4_config *config = &drvdata->config;
1757 
1758 	if (kstrtoul(buf, 16, &val))
1759 		return -EINVAL;
1760 	if (val >= drvdata->nr_ss_cmp)
1761 		return -EINVAL;
1762 
1763 	spin_lock(&drvdata->spinlock);
1764 	config->ss_idx = val;
1765 	spin_unlock(&drvdata->spinlock);
1766 	return size;
1767 }
1768 static DEVICE_ATTR_RW(sshot_idx);
1769 
sshot_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1770 static ssize_t sshot_ctrl_show(struct device *dev,
1771 			       struct device_attribute *attr,
1772 			       char *buf)
1773 {
1774 	unsigned long val;
1775 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1776 	struct etmv4_config *config = &drvdata->config;
1777 
1778 	spin_lock(&drvdata->spinlock);
1779 	val = config->ss_ctrl[config->ss_idx];
1780 	spin_unlock(&drvdata->spinlock);
1781 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1782 }
1783 
sshot_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1784 static ssize_t sshot_ctrl_store(struct device *dev,
1785 				struct device_attribute *attr,
1786 				const char *buf, size_t size)
1787 {
1788 	u8 idx;
1789 	unsigned long val;
1790 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1791 	struct etmv4_config *config = &drvdata->config;
1792 
1793 	if (kstrtoul(buf, 16, &val))
1794 		return -EINVAL;
1795 
1796 	spin_lock(&drvdata->spinlock);
1797 	idx = config->ss_idx;
1798 	config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1799 	/* must clear bit 31 in related status register on programming */
1800 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1801 	spin_unlock(&drvdata->spinlock);
1802 	return size;
1803 }
1804 static DEVICE_ATTR_RW(sshot_ctrl);
1805 
sshot_status_show(struct device * dev,struct device_attribute * attr,char * buf)1806 static ssize_t sshot_status_show(struct device *dev,
1807 				 struct device_attribute *attr, char *buf)
1808 {
1809 	unsigned long val;
1810 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1811 	struct etmv4_config *config = &drvdata->config;
1812 
1813 	spin_lock(&drvdata->spinlock);
1814 	val = config->ss_status[config->ss_idx];
1815 	spin_unlock(&drvdata->spinlock);
1816 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1817 }
1818 static DEVICE_ATTR_RO(sshot_status);
1819 
sshot_pe_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1820 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1821 				  struct device_attribute *attr,
1822 				  char *buf)
1823 {
1824 	unsigned long val;
1825 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826 	struct etmv4_config *config = &drvdata->config;
1827 
1828 	spin_lock(&drvdata->spinlock);
1829 	val = config->ss_pe_cmp[config->ss_idx];
1830 	spin_unlock(&drvdata->spinlock);
1831 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1832 }
1833 
sshot_pe_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1834 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1835 				   struct device_attribute *attr,
1836 				   const char *buf, size_t size)
1837 {
1838 	u8 idx;
1839 	unsigned long val;
1840 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841 	struct etmv4_config *config = &drvdata->config;
1842 
1843 	if (kstrtoul(buf, 16, &val))
1844 		return -EINVAL;
1845 
1846 	spin_lock(&drvdata->spinlock);
1847 	idx = config->ss_idx;
1848 	config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1849 	/* must clear bit 31 in related status register on programming */
1850 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1851 	spin_unlock(&drvdata->spinlock);
1852 	return size;
1853 }
1854 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1855 
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1856 static ssize_t ctxid_idx_show(struct device *dev,
1857 			      struct device_attribute *attr,
1858 			      char *buf)
1859 {
1860 	unsigned long val;
1861 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1862 	struct etmv4_config *config = &drvdata->config;
1863 
1864 	val = config->ctxid_idx;
1865 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1866 }
1867 
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1868 static ssize_t ctxid_idx_store(struct device *dev,
1869 			       struct device_attribute *attr,
1870 			       const char *buf, size_t size)
1871 {
1872 	unsigned long val;
1873 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1874 	struct etmv4_config *config = &drvdata->config;
1875 
1876 	if (kstrtoul(buf, 16, &val))
1877 		return -EINVAL;
1878 	if (val >= drvdata->numcidc)
1879 		return -EINVAL;
1880 
1881 	/*
1882 	 * Use spinlock to ensure index doesn't change while it gets
1883 	 * dereferenced multiple times within a spinlock block elsewhere.
1884 	 */
1885 	spin_lock(&drvdata->spinlock);
1886 	config->ctxid_idx = val;
1887 	spin_unlock(&drvdata->spinlock);
1888 	return size;
1889 }
1890 static DEVICE_ATTR_RW(ctxid_idx);
1891 
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1892 static ssize_t ctxid_pid_show(struct device *dev,
1893 			      struct device_attribute *attr,
1894 			      char *buf)
1895 {
1896 	u8 idx;
1897 	unsigned long val;
1898 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1899 	struct etmv4_config *config = &drvdata->config;
1900 
1901 	/*
1902 	 * Don't use contextID tracing if coming from a PID namespace.  See
1903 	 * comment in ctxid_pid_store().
1904 	 */
1905 	if (task_active_pid_ns(current) != &init_pid_ns)
1906 		return -EINVAL;
1907 
1908 	spin_lock(&drvdata->spinlock);
1909 	idx = config->ctxid_idx;
1910 	val = (unsigned long)config->ctxid_pid[idx];
1911 	spin_unlock(&drvdata->spinlock);
1912 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1913 }
1914 
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1915 static ssize_t ctxid_pid_store(struct device *dev,
1916 			       struct device_attribute *attr,
1917 			       const char *buf, size_t size)
1918 {
1919 	u8 idx;
1920 	unsigned long pid;
1921 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1922 	struct etmv4_config *config = &drvdata->config;
1923 
1924 	/*
1925 	 * When contextID tracing is enabled the tracers will insert the
1926 	 * value found in the contextID register in the trace stream.  But if
1927 	 * a process is in a namespace the PID of that process as seen from the
1928 	 * namespace won't be what the kernel sees, something that makes the
1929 	 * feature confusing and can potentially leak kernel only information.
1930 	 * As such refuse to use the feature if @current is not in the initial
1931 	 * PID namespace.
1932 	 */
1933 	if (task_active_pid_ns(current) != &init_pid_ns)
1934 		return -EINVAL;
1935 
1936 	/*
1937 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1938 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1939 	 * in length
1940 	 */
1941 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1942 		return -EINVAL;
1943 	if (kstrtoul(buf, 16, &pid))
1944 		return -EINVAL;
1945 
1946 	spin_lock(&drvdata->spinlock);
1947 	idx = config->ctxid_idx;
1948 	config->ctxid_pid[idx] = (u64)pid;
1949 	spin_unlock(&drvdata->spinlock);
1950 	return size;
1951 }
1952 static DEVICE_ATTR_RW(ctxid_pid);
1953 
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1954 static ssize_t ctxid_masks_show(struct device *dev,
1955 				struct device_attribute *attr,
1956 				char *buf)
1957 {
1958 	unsigned long val1, val2;
1959 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1960 	struct etmv4_config *config = &drvdata->config;
1961 
1962 	/*
1963 	 * Don't use contextID tracing if coming from a PID namespace.  See
1964 	 * comment in ctxid_pid_store().
1965 	 */
1966 	if (task_active_pid_ns(current) != &init_pid_ns)
1967 		return -EINVAL;
1968 
1969 	spin_lock(&drvdata->spinlock);
1970 	val1 = config->ctxid_mask0;
1971 	val2 = config->ctxid_mask1;
1972 	spin_unlock(&drvdata->spinlock);
1973 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1974 }
1975 
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1976 static ssize_t ctxid_masks_store(struct device *dev,
1977 				struct device_attribute *attr,
1978 				const char *buf, size_t size)
1979 {
1980 	u8 i, j, maskbyte;
1981 	unsigned long val1, val2, mask;
1982 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1983 	struct etmv4_config *config = &drvdata->config;
1984 	int nr_inputs;
1985 
1986 	/*
1987 	 * Don't use contextID tracing if coming from a PID namespace.  See
1988 	 * comment in ctxid_pid_store().
1989 	 */
1990 	if (task_active_pid_ns(current) != &init_pid_ns)
1991 		return -EINVAL;
1992 
1993 	/*
1994 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1995 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1996 	 * in length
1997 	 */
1998 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1999 		return -EINVAL;
2000 	/* one mask if <= 4 comparators, two for up to 8 */
2001 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2002 	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2003 		return -EINVAL;
2004 
2005 	spin_lock(&drvdata->spinlock);
2006 	/*
2007 	 * each byte[0..3] controls mask value applied to ctxid
2008 	 * comparator[0..3]
2009 	 */
2010 	switch (drvdata->numcidc) {
2011 	case 0x1:
2012 		/* COMP0, bits[7:0] */
2013 		config->ctxid_mask0 = val1 & 0xFF;
2014 		break;
2015 	case 0x2:
2016 		/* COMP1, bits[15:8] */
2017 		config->ctxid_mask0 = val1 & 0xFFFF;
2018 		break;
2019 	case 0x3:
2020 		/* COMP2, bits[23:16] */
2021 		config->ctxid_mask0 = val1 & 0xFFFFFF;
2022 		break;
2023 	case 0x4:
2024 		 /* COMP3, bits[31:24] */
2025 		config->ctxid_mask0 = val1;
2026 		break;
2027 	case 0x5:
2028 		/* COMP4, bits[7:0] */
2029 		config->ctxid_mask0 = val1;
2030 		config->ctxid_mask1 = val2 & 0xFF;
2031 		break;
2032 	case 0x6:
2033 		/* COMP5, bits[15:8] */
2034 		config->ctxid_mask0 = val1;
2035 		config->ctxid_mask1 = val2 & 0xFFFF;
2036 		break;
2037 	case 0x7:
2038 		/* COMP6, bits[23:16] */
2039 		config->ctxid_mask0 = val1;
2040 		config->ctxid_mask1 = val2 & 0xFFFFFF;
2041 		break;
2042 	case 0x8:
2043 		/* COMP7, bits[31:24] */
2044 		config->ctxid_mask0 = val1;
2045 		config->ctxid_mask1 = val2;
2046 		break;
2047 	default:
2048 		break;
2049 	}
2050 	/*
2051 	 * If software sets a mask bit to 1, it must program relevant byte
2052 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2053 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2054 	 * of ctxid comparator0 value (corresponding to byte 0) register.
2055 	 */
2056 	mask = config->ctxid_mask0;
2057 	for (i = 0; i < drvdata->numcidc; i++) {
2058 		/* mask value of corresponding ctxid comparator */
2059 		maskbyte = mask & ETMv4_EVENT_MASK;
2060 		/*
2061 		 * each bit corresponds to a byte of respective ctxid comparator
2062 		 * value register
2063 		 */
2064 		for (j = 0; j < 8; j++) {
2065 			if (maskbyte & 1)
2066 				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2067 			maskbyte >>= 1;
2068 		}
2069 		/* Select the next ctxid comparator mask value */
2070 		if (i == 3)
2071 			/* ctxid comparators[4-7] */
2072 			mask = config->ctxid_mask1;
2073 		else
2074 			mask >>= 0x8;
2075 	}
2076 
2077 	spin_unlock(&drvdata->spinlock);
2078 	return size;
2079 }
2080 static DEVICE_ATTR_RW(ctxid_masks);
2081 
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)2082 static ssize_t vmid_idx_show(struct device *dev,
2083 			     struct device_attribute *attr,
2084 			     char *buf)
2085 {
2086 	unsigned long val;
2087 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2088 	struct etmv4_config *config = &drvdata->config;
2089 
2090 	val = config->vmid_idx;
2091 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2092 }
2093 
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2094 static ssize_t vmid_idx_store(struct device *dev,
2095 			      struct device_attribute *attr,
2096 			      const char *buf, size_t size)
2097 {
2098 	unsigned long val;
2099 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2100 	struct etmv4_config *config = &drvdata->config;
2101 
2102 	if (kstrtoul(buf, 16, &val))
2103 		return -EINVAL;
2104 	if (val >= drvdata->numvmidc)
2105 		return -EINVAL;
2106 
2107 	/*
2108 	 * Use spinlock to ensure index doesn't change while it gets
2109 	 * dereferenced multiple times within a spinlock block elsewhere.
2110 	 */
2111 	spin_lock(&drvdata->spinlock);
2112 	config->vmid_idx = val;
2113 	spin_unlock(&drvdata->spinlock);
2114 	return size;
2115 }
2116 static DEVICE_ATTR_RW(vmid_idx);
2117 
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)2118 static ssize_t vmid_val_show(struct device *dev,
2119 			     struct device_attribute *attr,
2120 			     char *buf)
2121 {
2122 	unsigned long val;
2123 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2124 	struct etmv4_config *config = &drvdata->config;
2125 
2126 	/*
2127 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2128 	 * See comment in ctxid_pid_store().
2129 	 */
2130 	if (!task_is_in_init_pid_ns(current))
2131 		return -EINVAL;
2132 
2133 	spin_lock(&drvdata->spinlock);
2134 	val = (unsigned long)config->vmid_val[config->vmid_idx];
2135 	spin_unlock(&drvdata->spinlock);
2136 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2137 }
2138 
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2139 static ssize_t vmid_val_store(struct device *dev,
2140 			      struct device_attribute *attr,
2141 			      const char *buf, size_t size)
2142 {
2143 	unsigned long val;
2144 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2145 	struct etmv4_config *config = &drvdata->config;
2146 
2147 	/*
2148 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2149 	 * See comment in ctxid_pid_store().
2150 	 */
2151 	if (!task_is_in_init_pid_ns(current))
2152 		return -EINVAL;
2153 
2154 	/*
2155 	 * only implemented when vmid tracing is enabled, i.e. at least one
2156 	 * vmid comparator is implemented and at least 8 bit vmid size
2157 	 */
2158 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2159 		return -EINVAL;
2160 	if (kstrtoul(buf, 16, &val))
2161 		return -EINVAL;
2162 
2163 	spin_lock(&drvdata->spinlock);
2164 	config->vmid_val[config->vmid_idx] = (u64)val;
2165 	spin_unlock(&drvdata->spinlock);
2166 	return size;
2167 }
2168 static DEVICE_ATTR_RW(vmid_val);
2169 
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)2170 static ssize_t vmid_masks_show(struct device *dev,
2171 			       struct device_attribute *attr, char *buf)
2172 {
2173 	unsigned long val1, val2;
2174 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2175 	struct etmv4_config *config = &drvdata->config;
2176 
2177 	/*
2178 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2179 	 * See comment in ctxid_pid_store().
2180 	 */
2181 	if (!task_is_in_init_pid_ns(current))
2182 		return -EINVAL;
2183 
2184 	spin_lock(&drvdata->spinlock);
2185 	val1 = config->vmid_mask0;
2186 	val2 = config->vmid_mask1;
2187 	spin_unlock(&drvdata->spinlock);
2188 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2189 }
2190 
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2191 static ssize_t vmid_masks_store(struct device *dev,
2192 				struct device_attribute *attr,
2193 				const char *buf, size_t size)
2194 {
2195 	u8 i, j, maskbyte;
2196 	unsigned long val1, val2, mask;
2197 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2198 	struct etmv4_config *config = &drvdata->config;
2199 	int nr_inputs;
2200 
2201 	/*
2202 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2203 	 * See comment in ctxid_pid_store().
2204 	 */
2205 	if (!task_is_in_init_pid_ns(current))
2206 		return -EINVAL;
2207 
2208 	/*
2209 	 * only implemented when vmid tracing is enabled, i.e. at least one
2210 	 * vmid comparator is implemented and at least 8 bit vmid size
2211 	 */
2212 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2213 		return -EINVAL;
2214 	/* one mask if <= 4 comparators, two for up to 8 */
2215 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2216 	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2217 		return -EINVAL;
2218 
2219 	spin_lock(&drvdata->spinlock);
2220 
2221 	/*
2222 	 * each byte[0..3] controls mask value applied to vmid
2223 	 * comparator[0..3]
2224 	 */
2225 	switch (drvdata->numvmidc) {
2226 	case 0x1:
2227 		/* COMP0, bits[7:0] */
2228 		config->vmid_mask0 = val1 & 0xFF;
2229 		break;
2230 	case 0x2:
2231 		/* COMP1, bits[15:8] */
2232 		config->vmid_mask0 = val1 & 0xFFFF;
2233 		break;
2234 	case 0x3:
2235 		/* COMP2, bits[23:16] */
2236 		config->vmid_mask0 = val1 & 0xFFFFFF;
2237 		break;
2238 	case 0x4:
2239 		/* COMP3, bits[31:24] */
2240 		config->vmid_mask0 = val1;
2241 		break;
2242 	case 0x5:
2243 		/* COMP4, bits[7:0] */
2244 		config->vmid_mask0 = val1;
2245 		config->vmid_mask1 = val2 & 0xFF;
2246 		break;
2247 	case 0x6:
2248 		/* COMP5, bits[15:8] */
2249 		config->vmid_mask0 = val1;
2250 		config->vmid_mask1 = val2 & 0xFFFF;
2251 		break;
2252 	case 0x7:
2253 		/* COMP6, bits[23:16] */
2254 		config->vmid_mask0 = val1;
2255 		config->vmid_mask1 = val2 & 0xFFFFFF;
2256 		break;
2257 	case 0x8:
2258 		/* COMP7, bits[31:24] */
2259 		config->vmid_mask0 = val1;
2260 		config->vmid_mask1 = val2;
2261 		break;
2262 	default:
2263 		break;
2264 	}
2265 
2266 	/*
2267 	 * If software sets a mask bit to 1, it must program relevant byte
2268 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2269 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2270 	 * of vmid comparator0 value (corresponding to byte 0) register.
2271 	 */
2272 	mask = config->vmid_mask0;
2273 	for (i = 0; i < drvdata->numvmidc; i++) {
2274 		/* mask value of corresponding vmid comparator */
2275 		maskbyte = mask & ETMv4_EVENT_MASK;
2276 		/*
2277 		 * each bit corresponds to a byte of respective vmid comparator
2278 		 * value register
2279 		 */
2280 		for (j = 0; j < 8; j++) {
2281 			if (maskbyte & 1)
2282 				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2283 			maskbyte >>= 1;
2284 		}
2285 		/* Select the next vmid comparator mask value */
2286 		if (i == 3)
2287 			/* vmid comparators[4-7] */
2288 			mask = config->vmid_mask1;
2289 		else
2290 			mask >>= 0x8;
2291 	}
2292 	spin_unlock(&drvdata->spinlock);
2293 	return size;
2294 }
2295 static DEVICE_ATTR_RW(vmid_masks);
2296 
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)2297 static ssize_t cpu_show(struct device *dev,
2298 			struct device_attribute *attr, char *buf)
2299 {
2300 	int val;
2301 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2302 
2303 	val = drvdata->cpu;
2304 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2305 
2306 }
2307 static DEVICE_ATTR_RO(cpu);
2308 
ts_source_show(struct device * dev,struct device_attribute * attr,char * buf)2309 static ssize_t ts_source_show(struct device *dev,
2310 			      struct device_attribute *attr,
2311 			      char *buf)
2312 {
2313 	int val;
2314 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2315 
2316 	if (!drvdata->trfcr) {
2317 		val = -1;
2318 		goto out;
2319 	}
2320 
2321 	switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
2322 	case TRFCR_ELx_TS_VIRTUAL:
2323 	case TRFCR_ELx_TS_GUEST_PHYSICAL:
2324 	case TRFCR_ELx_TS_PHYSICAL:
2325 		val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
2326 		break;
2327 	default:
2328 		val = -1;
2329 		break;
2330 	}
2331 
2332 out:
2333 	return sysfs_emit(buf, "%d\n", val);
2334 }
2335 static DEVICE_ATTR_RO(ts_source);
2336 
2337 static struct attribute *coresight_etmv4_attrs[] = {
2338 	&dev_attr_nr_pe_cmp.attr,
2339 	&dev_attr_nr_addr_cmp.attr,
2340 	&dev_attr_nr_cntr.attr,
2341 	&dev_attr_nr_ext_inp.attr,
2342 	&dev_attr_numcidc.attr,
2343 	&dev_attr_numvmidc.attr,
2344 	&dev_attr_nrseqstate.attr,
2345 	&dev_attr_nr_resource.attr,
2346 	&dev_attr_nr_ss_cmp.attr,
2347 	&dev_attr_reset.attr,
2348 	&dev_attr_mode.attr,
2349 	&dev_attr_pe.attr,
2350 	&dev_attr_event.attr,
2351 	&dev_attr_event_instren.attr,
2352 	&dev_attr_event_ts.attr,
2353 	&dev_attr_syncfreq.attr,
2354 	&dev_attr_cyc_threshold.attr,
2355 	&dev_attr_bb_ctrl.attr,
2356 	&dev_attr_event_vinst.attr,
2357 	&dev_attr_s_exlevel_vinst.attr,
2358 	&dev_attr_ns_exlevel_vinst.attr,
2359 	&dev_attr_addr_idx.attr,
2360 	&dev_attr_addr_instdatatype.attr,
2361 	&dev_attr_addr_single.attr,
2362 	&dev_attr_addr_range.attr,
2363 	&dev_attr_addr_start.attr,
2364 	&dev_attr_addr_stop.attr,
2365 	&dev_attr_addr_ctxtype.attr,
2366 	&dev_attr_addr_context.attr,
2367 	&dev_attr_addr_exlevel_s_ns.attr,
2368 	&dev_attr_addr_cmp_view.attr,
2369 	&dev_attr_vinst_pe_cmp_start_stop.attr,
2370 	&dev_attr_sshot_idx.attr,
2371 	&dev_attr_sshot_ctrl.attr,
2372 	&dev_attr_sshot_pe_ctrl.attr,
2373 	&dev_attr_sshot_status.attr,
2374 	&dev_attr_seq_idx.attr,
2375 	&dev_attr_seq_state.attr,
2376 	&dev_attr_seq_event.attr,
2377 	&dev_attr_seq_reset_event.attr,
2378 	&dev_attr_cntr_idx.attr,
2379 	&dev_attr_cntrldvr.attr,
2380 	&dev_attr_cntr_val.attr,
2381 	&dev_attr_cntr_ctrl.attr,
2382 	&dev_attr_res_idx.attr,
2383 	&dev_attr_res_ctrl.attr,
2384 	&dev_attr_ctxid_idx.attr,
2385 	&dev_attr_ctxid_pid.attr,
2386 	&dev_attr_ctxid_masks.attr,
2387 	&dev_attr_vmid_idx.attr,
2388 	&dev_attr_vmid_val.attr,
2389 	&dev_attr_vmid_masks.attr,
2390 	&dev_attr_cpu.attr,
2391 	&dev_attr_ts_source.attr,
2392 	NULL,
2393 };
2394 
2395 struct etmv4_reg {
2396 	struct coresight_device *csdev;
2397 	u32 offset;
2398 	u32 data;
2399 };
2400 
do_smp_cross_read(void * data)2401 static void do_smp_cross_read(void *data)
2402 {
2403 	struct etmv4_reg *reg = data;
2404 
2405 	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2406 }
2407 
etmv4_cross_read(const struct etmv4_drvdata * drvdata,u32 offset)2408 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2409 {
2410 	struct etmv4_reg reg;
2411 
2412 	reg.offset = offset;
2413 	reg.csdev = drvdata->csdev;
2414 
2415 	/*
2416 	 * smp cross call ensures the CPU will be powered up before
2417 	 * accessing the ETMv4 trace core registers
2418 	 */
2419 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2420 	return reg.data;
2421 }
2422 
coresight_etm4x_attr_to_offset(struct device_attribute * attr)2423 static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2424 {
2425 	struct dev_ext_attribute *eattr;
2426 
2427 	eattr = container_of(attr, struct dev_ext_attribute, attr);
2428 	return (u32)(unsigned long)eattr->var;
2429 }
2430 
coresight_etm4x_reg_show(struct device * dev,struct device_attribute * d_attr,char * buf)2431 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2432 					struct device_attribute *d_attr,
2433 					char *buf)
2434 {
2435 	u32 val, offset;
2436 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2437 
2438 	offset = coresight_etm4x_attr_to_offset(d_attr);
2439 
2440 	pm_runtime_get_sync(dev->parent);
2441 	val = etmv4_cross_read(drvdata, offset);
2442 	pm_runtime_put_sync(dev->parent);
2443 
2444 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2445 }
2446 
2447 static inline bool
etm4x_register_implemented(struct etmv4_drvdata * drvdata,u32 offset)2448 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2449 {
2450 	switch (offset) {
2451 	ETM_COMMON_SYSREG_LIST_CASES
2452 		/*
2453 		 * Common registers to ETE & ETM4x accessible via system
2454 		 * instructions are always implemented.
2455 		 */
2456 		return true;
2457 
2458 	ETM4x_ONLY_SYSREG_LIST_CASES
2459 		/*
2460 		 * We only support etm4x and ete. So if the device is not
2461 		 * ETE, it must be ETMv4x.
2462 		 */
2463 		return !etm4x_is_ete(drvdata);
2464 
2465 	ETM4x_MMAP_LIST_CASES
2466 		/*
2467 		 * Registers accessible only via memory-mapped registers
2468 		 * must not be accessed via system instructions.
2469 		 * We cannot access the drvdata->csdev here, as this
2470 		 * function is called during the device creation, via
2471 		 * coresight_register() and the csdev is not initialized
2472 		 * until that is done. So rely on the drvdata->base to
2473 		 * detect if we have a memory mapped access.
2474 		 * Also ETE doesn't implement memory mapped access, thus
2475 		 * it is sufficient to check that we are using mmio.
2476 		 */
2477 		return !!drvdata->base;
2478 
2479 	ETE_ONLY_SYSREG_LIST_CASES
2480 		return etm4x_is_ete(drvdata);
2481 	}
2482 
2483 	return false;
2484 }
2485 
2486 /*
2487  * Hide the ETM4x registers that may not be available on the
2488  * hardware.
2489  * There are certain management registers unavailable via system
2490  * instructions. Make those sysfs attributes hidden on such
2491  * systems.
2492  */
2493 static umode_t
coresight_etm4x_attr_reg_implemented(struct kobject * kobj,struct attribute * attr,int unused)2494 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2495 				     struct attribute *attr, int unused)
2496 {
2497 	struct device *dev = kobj_to_dev(kobj);
2498 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2499 	struct device_attribute *d_attr;
2500 	u32 offset;
2501 
2502 	d_attr = container_of(attr, struct device_attribute, attr);
2503 	offset = coresight_etm4x_attr_to_offset(d_attr);
2504 
2505 	if (etm4x_register_implemented(drvdata, offset))
2506 		return attr->mode;
2507 	return 0;
2508 }
2509 
2510 #define coresight_etm4x_reg(name, offset)				\
2511 	&((struct dev_ext_attribute[]) {				\
2512 	   {								\
2513 		__ATTR(name, 0444, coresight_etm4x_reg_show, NULL),	\
2514 		(void *)(unsigned long)offset				\
2515 	   }								\
2516 	})[0].attr.attr
2517 
2518 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2519 	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2520 	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2521 	coresight_etm4x_reg(trclsr, TRCLSR),
2522 	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2523 	coresight_etm4x_reg(trcdevid, TRCDEVID),
2524 	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2525 	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2526 	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2527 	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2528 	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2529 	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2530 	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2531 	coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2532 	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2533 	NULL,
2534 };
2535 
2536 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2537 	coresight_etm4x_reg(trcidr0, TRCIDR0),
2538 	coresight_etm4x_reg(trcidr1, TRCIDR1),
2539 	coresight_etm4x_reg(trcidr2, TRCIDR2),
2540 	coresight_etm4x_reg(trcidr3, TRCIDR3),
2541 	coresight_etm4x_reg(trcidr4, TRCIDR4),
2542 	coresight_etm4x_reg(trcidr5, TRCIDR5),
2543 	/* trcidr[6,7] are reserved */
2544 	coresight_etm4x_reg(trcidr8, TRCIDR8),
2545 	coresight_etm4x_reg(trcidr9, TRCIDR9),
2546 	coresight_etm4x_reg(trcidr10, TRCIDR10),
2547 	coresight_etm4x_reg(trcidr11, TRCIDR11),
2548 	coresight_etm4x_reg(trcidr12, TRCIDR12),
2549 	coresight_etm4x_reg(trcidr13, TRCIDR13),
2550 	NULL,
2551 };
2552 
2553 static const struct attribute_group coresight_etmv4_group = {
2554 	.attrs = coresight_etmv4_attrs,
2555 };
2556 
2557 static const struct attribute_group coresight_etmv4_mgmt_group = {
2558 	.is_visible = coresight_etm4x_attr_reg_implemented,
2559 	.attrs = coresight_etmv4_mgmt_attrs,
2560 	.name = "mgmt",
2561 };
2562 
2563 static const struct attribute_group coresight_etmv4_trcidr_group = {
2564 	.attrs = coresight_etmv4_trcidr_attrs,
2565 	.name = "trcidr",
2566 };
2567 
2568 const struct attribute_group *coresight_etmv4_groups[] = {
2569 	&coresight_etmv4_group,
2570 	&coresight_etmv4_mgmt_group,
2571 	&coresight_etmv4_trcidr_group,
2572 	NULL,
2573 };
2574