1 /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
20 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/smp.h>
25 #include <linux/sysfs.h>
26 #include <linux/stat.h>
27 #include <linux/clk.h>
28 #include <linux/cpu.h>
29 #include <linux/coresight.h>
30 #include <linux/pm_wakeup.h>
31 #include <linux/amba/bus.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/pm_runtime.h>
35 #include <asm/sections.h>
37 #include "coresight-etm4x.h"
39 static int boot_enable;
40 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
42 /* The number of ETMv4 currently registered */
43 static int etm4_count;
44 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
46 static void etm4_os_unlock(void *info)
48 struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
55 static bool etm4_arch_supported(u8 arch)
66 static int etm4_cpu_id(struct coresight_device *csdev)
68 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
73 static int etm4_trace_id(struct coresight_device *csdev)
75 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
80 return drvdata->trcid;
82 pm_runtime_get_sync(drvdata->dev);
83 spin_lock_irqsave(&drvdata->spinlock, flags);
85 CS_UNLOCK(drvdata->base);
86 trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
87 trace_id &= ETM_TRACEID_MASK;
88 CS_LOCK(drvdata->base);
90 spin_unlock_irqrestore(&drvdata->spinlock, flags);
91 pm_runtime_put(drvdata->dev);
96 static void etm4_enable_hw(void *info)
99 struct etmv4_drvdata *drvdata = info;
101 CS_UNLOCK(drvdata->base);
103 etm4_os_unlock(drvdata);
105 /* Disable the trace unit before programming trace registers */
106 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
108 /* wait for TRCSTATR.IDLE to go up */
109 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
110 dev_err(drvdata->dev,
111 "timeout observed when probing at offset %#x\n",
114 writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
115 writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
116 /* nothing specific implemented */
117 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
118 writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
119 writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
120 writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
121 writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
122 writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
123 writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
124 writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
125 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
126 writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
127 writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
128 writel_relaxed(drvdata->vissctlr,
129 drvdata->base + TRCVISSCTLR);
130 writel_relaxed(drvdata->vipcssctlr,
131 drvdata->base + TRCVIPCSSCTLR);
132 for (i = 0; i < drvdata->nrseqstate - 1; i++)
133 writel_relaxed(drvdata->seq_ctrl[i],
134 drvdata->base + TRCSEQEVRn(i));
135 writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
136 writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
137 writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
138 for (i = 0; i < drvdata->nr_cntr; i++) {
139 writel_relaxed(drvdata->cntrldvr[i],
140 drvdata->base + TRCCNTRLDVRn(i));
141 writel_relaxed(drvdata->cntr_ctrl[i],
142 drvdata->base + TRCCNTCTLRn(i));
143 writel_relaxed(drvdata->cntr_val[i],
144 drvdata->base + TRCCNTVRn(i));
147 /* Resource selector pair 0 is always implemented and reserved */
148 for (i = 2; i < drvdata->nr_resource * 2; i++)
149 writel_relaxed(drvdata->res_ctrl[i],
150 drvdata->base + TRCRSCTLRn(i));
152 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
153 writel_relaxed(drvdata->ss_ctrl[i],
154 drvdata->base + TRCSSCCRn(i));
155 writel_relaxed(drvdata->ss_status[i],
156 drvdata->base + TRCSSCSRn(i));
157 writel_relaxed(drvdata->ss_pe_cmp[i],
158 drvdata->base + TRCSSPCICRn(i));
160 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
161 writeq_relaxed(drvdata->addr_val[i],
162 drvdata->base + TRCACVRn(i));
163 writeq_relaxed(drvdata->addr_acc[i],
164 drvdata->base + TRCACATRn(i));
166 for (i = 0; i < drvdata->numcidc; i++)
167 writeq_relaxed(drvdata->ctxid_pid[i],
168 drvdata->base + TRCCIDCVRn(i));
169 writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
170 writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
172 for (i = 0; i < drvdata->numvmidc; i++)
173 writeq_relaxed(drvdata->vmid_val[i],
174 drvdata->base + TRCVMIDCVRn(i));
175 writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
176 writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
178 /* Enable the trace unit */
179 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
181 /* wait for TRCSTATR.IDLE to go back down to '0' */
182 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
183 dev_err(drvdata->dev,
184 "timeout observed when probing at offset %#x\n",
187 CS_LOCK(drvdata->base);
189 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
192 static int etm4_enable(struct coresight_device *csdev)
194 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
197 pm_runtime_get_sync(drvdata->dev);
198 spin_lock(&drvdata->spinlock);
201 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
202 * ensures that register writes occur when cpu is powered.
204 ret = smp_call_function_single(drvdata->cpu,
205 etm4_enable_hw, drvdata, 1);
208 drvdata->enable = true;
209 drvdata->sticky_enable = true;
211 spin_unlock(&drvdata->spinlock);
213 dev_info(drvdata->dev, "ETM tracing enabled\n");
216 spin_unlock(&drvdata->spinlock);
217 pm_runtime_put(drvdata->dev);
221 static void etm4_disable_hw(void *info)
224 struct etmv4_drvdata *drvdata = info;
226 CS_UNLOCK(drvdata->base);
228 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
230 /* EN, bit[0] Trace unit enable bit */
233 /* make sure everything completes before disabling */
236 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
238 CS_LOCK(drvdata->base);
240 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
243 static void etm4_disable(struct coresight_device *csdev)
245 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
248 * Taking hotplug lock here protects from clocks getting disabled
249 * with tracing being left on (crash scenario) if user disable occurs
250 * after cpu online mask indicates the cpu is offline but before the
251 * DYING hotplug callback is serviced by the ETM driver.
254 spin_lock(&drvdata->spinlock);
257 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
258 * ensures that register writes occur when cpu is powered.
260 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
261 drvdata->enable = false;
263 spin_unlock(&drvdata->spinlock);
266 pm_runtime_put(drvdata->dev);
268 dev_info(drvdata->dev, "ETM tracing disabled\n");
271 static const struct coresight_ops_source etm4_source_ops = {
272 .cpu_id = etm4_cpu_id,
273 .trace_id = etm4_trace_id,
274 .enable = etm4_enable,
275 .disable = etm4_disable,
278 static const struct coresight_ops etm4_cs_ops = {
279 .source_ops = &etm4_source_ops,
282 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
284 u8 idx = drvdata->addr_idx;
287 * TRCACATRn.TYPE bit[1:0]: type of comparison
288 * the trace unit performs
290 if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
295 * We are performing instruction address comparison. Set the
296 * relevant bit of ViewInst Include/Exclude Control register
297 * for corresponding address comparator pair.
299 if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
300 drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
303 if (exclude == true) {
305 * Set exclude bit and unset the include bit
306 * corresponding to comparator pair
308 drvdata->viiectlr |= BIT(idx / 2 + 16);
309 drvdata->viiectlr &= ~BIT(idx / 2);
312 * Set include bit and unset exclude bit
313 * corresponding to comparator pair
315 drvdata->viiectlr |= BIT(idx / 2);
316 drvdata->viiectlr &= ~BIT(idx / 2 + 16);
322 static ssize_t nr_pe_cmp_show(struct device *dev,
323 struct device_attribute *attr,
327 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
329 val = drvdata->nr_pe_cmp;
330 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
332 static DEVICE_ATTR_RO(nr_pe_cmp);
334 static ssize_t nr_addr_cmp_show(struct device *dev,
335 struct device_attribute *attr,
339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
341 val = drvdata->nr_addr_cmp;
342 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
344 static DEVICE_ATTR_RO(nr_addr_cmp);
346 static ssize_t nr_cntr_show(struct device *dev,
347 struct device_attribute *attr,
351 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
353 val = drvdata->nr_cntr;
354 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
356 static DEVICE_ATTR_RO(nr_cntr);
358 static ssize_t nr_ext_inp_show(struct device *dev,
359 struct device_attribute *attr,
363 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
365 val = drvdata->nr_ext_inp;
366 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
368 static DEVICE_ATTR_RO(nr_ext_inp);
370 static ssize_t numcidc_show(struct device *dev,
371 struct device_attribute *attr,
375 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
377 val = drvdata->numcidc;
378 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
380 static DEVICE_ATTR_RO(numcidc);
382 static ssize_t numvmidc_show(struct device *dev,
383 struct device_attribute *attr,
387 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
389 val = drvdata->numvmidc;
390 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
392 static DEVICE_ATTR_RO(numvmidc);
394 static ssize_t nrseqstate_show(struct device *dev,
395 struct device_attribute *attr,
399 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
401 val = drvdata->nrseqstate;
402 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
404 static DEVICE_ATTR_RO(nrseqstate);
406 static ssize_t nr_resource_show(struct device *dev,
407 struct device_attribute *attr,
411 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
413 val = drvdata->nr_resource;
414 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
416 static DEVICE_ATTR_RO(nr_resource);
418 static ssize_t nr_ss_cmp_show(struct device *dev,
419 struct device_attribute *attr,
423 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
425 val = drvdata->nr_ss_cmp;
426 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
428 static DEVICE_ATTR_RO(nr_ss_cmp);
430 static ssize_t reset_store(struct device *dev,
431 struct device_attribute *attr,
432 const char *buf, size_t size)
436 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
438 if (kstrtoul(buf, 16, &val))
441 spin_lock(&drvdata->spinlock);
445 /* Disable data tracing: do not trace load and store data transfers */
446 drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
447 drvdata->cfg &= ~(BIT(1) | BIT(2));
449 /* Disable data value and data address tracing */
450 drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
451 ETM_MODE_DATA_TRACE_VAL);
452 drvdata->cfg &= ~(BIT(16) | BIT(17));
454 /* Disable all events tracing */
455 drvdata->eventctrl0 = 0x0;
456 drvdata->eventctrl1 = 0x0;
458 /* Disable timestamp event */
459 drvdata->ts_ctrl = 0x0;
461 /* Disable stalling */
462 drvdata->stall_ctrl = 0x0;
464 /* Reset trace synchronization period to 2^8 = 256 bytes*/
465 if (drvdata->syncpr == false)
466 drvdata->syncfreq = 0x8;
469 * Enable ViewInst to trace everything with start-stop logic in
470 * started state. ARM recommends start-stop logic is set before
473 drvdata->vinst_ctrl |= BIT(0);
474 if (drvdata->nr_addr_cmp == true) {
475 drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
476 /* SSSTATUS, bit[9] */
477 drvdata->vinst_ctrl |= BIT(9);
480 /* No address range filtering for ViewInst */
481 drvdata->viiectlr = 0x0;
483 /* No start-stop filtering for ViewInst */
484 drvdata->vissctlr = 0x0;
486 /* Disable seq events */
487 for (i = 0; i < drvdata->nrseqstate-1; i++)
488 drvdata->seq_ctrl[i] = 0x0;
489 drvdata->seq_rst = 0x0;
490 drvdata->seq_state = 0x0;
492 /* Disable external input events */
493 drvdata->ext_inp = 0x0;
495 drvdata->cntr_idx = 0x0;
496 for (i = 0; i < drvdata->nr_cntr; i++) {
497 drvdata->cntrldvr[i] = 0x0;
498 drvdata->cntr_ctrl[i] = 0x0;
499 drvdata->cntr_val[i] = 0x0;
502 /* Resource selector pair 0 is always implemented and reserved */
503 drvdata->res_idx = 0x2;
504 for (i = 2; i < drvdata->nr_resource * 2; i++)
505 drvdata->res_ctrl[i] = 0x0;
507 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
508 drvdata->ss_ctrl[i] = 0x0;
509 drvdata->ss_pe_cmp[i] = 0x0;
512 drvdata->addr_idx = 0x0;
513 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
514 drvdata->addr_val[i] = 0x0;
515 drvdata->addr_acc[i] = 0x0;
516 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
519 drvdata->ctxid_idx = 0x0;
520 for (i = 0; i < drvdata->numcidc; i++) {
521 drvdata->ctxid_pid[i] = 0x0;
522 drvdata->ctxid_vpid[i] = 0x0;
525 drvdata->ctxid_mask0 = 0x0;
526 drvdata->ctxid_mask1 = 0x0;
528 drvdata->vmid_idx = 0x0;
529 for (i = 0; i < drvdata->numvmidc; i++)
530 drvdata->vmid_val[i] = 0x0;
531 drvdata->vmid_mask0 = 0x0;
532 drvdata->vmid_mask1 = 0x0;
534 drvdata->trcid = drvdata->cpu + 1;
535 spin_unlock(&drvdata->spinlock);
538 static DEVICE_ATTR_WO(reset);
540 static ssize_t mode_show(struct device *dev,
541 struct device_attribute *attr,
545 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
548 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
551 static ssize_t mode_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t size)
555 unsigned long val, mode;
556 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
558 if (kstrtoul(buf, 16, &val))
561 spin_lock(&drvdata->spinlock);
562 drvdata->mode = val & ETMv4_MODE_ALL;
564 if (drvdata->mode & ETM_MODE_EXCLUDE)
565 etm4_set_mode_exclude(drvdata, true);
567 etm4_set_mode_exclude(drvdata, false);
569 if (drvdata->instrp0 == true) {
570 /* start by clearing instruction P0 field */
571 drvdata->cfg &= ~(BIT(1) | BIT(2));
572 if (drvdata->mode & ETM_MODE_LOAD)
573 /* 0b01 Trace load instructions as P0 instructions */
574 drvdata->cfg |= BIT(1);
575 if (drvdata->mode & ETM_MODE_STORE)
576 /* 0b10 Trace store instructions as P0 instructions */
577 drvdata->cfg |= BIT(2);
578 if (drvdata->mode & ETM_MODE_LOAD_STORE)
580 * 0b11 Trace load and store instructions
583 drvdata->cfg |= BIT(1) | BIT(2);
586 /* bit[3], Branch broadcast mode */
587 if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
588 drvdata->cfg |= BIT(3);
590 drvdata->cfg &= ~BIT(3);
592 /* bit[4], Cycle counting instruction trace bit */
593 if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
594 (drvdata->trccci == true))
595 drvdata->cfg |= BIT(4);
597 drvdata->cfg &= ~BIT(4);
599 /* bit[6], Context ID tracing bit */
600 if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
601 drvdata->cfg |= BIT(6);
603 drvdata->cfg &= ~BIT(6);
605 if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
606 drvdata->cfg |= BIT(7);
608 drvdata->cfg &= ~BIT(7);
610 /* bits[10:8], Conditional instruction tracing bit */
611 mode = ETM_MODE_COND(drvdata->mode);
612 if (drvdata->trccond == true) {
613 drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
614 drvdata->cfg |= mode << 8;
617 /* bit[11], Global timestamp tracing bit */
618 if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
619 drvdata->cfg |= BIT(11);
621 drvdata->cfg &= ~BIT(11);
623 /* bit[12], Return stack enable bit */
624 if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
625 (drvdata->retstack == true))
626 drvdata->cfg |= BIT(12);
628 drvdata->cfg &= ~BIT(12);
630 /* bits[14:13], Q element enable field */
631 mode = ETM_MODE_QELEM(drvdata->mode);
632 /* start by clearing QE bits */
633 drvdata->cfg &= ~(BIT(13) | BIT(14));
634 /* if supported, Q elements with instruction counts are enabled */
635 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
636 drvdata->cfg |= BIT(13);
638 * if supported, Q elements with and without instruction
641 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
642 drvdata->cfg |= BIT(14);
644 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
645 if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
646 (drvdata->atbtrig == true))
647 drvdata->eventctrl1 |= BIT(11);
649 drvdata->eventctrl1 &= ~BIT(11);
651 /* bit[12], Low-power state behavior override bit */
652 if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
653 (drvdata->lpoverride == true))
654 drvdata->eventctrl1 |= BIT(12);
656 drvdata->eventctrl1 &= ~BIT(12);
658 /* bit[8], Instruction stall bit */
659 if (drvdata->mode & ETM_MODE_ISTALL_EN)
660 drvdata->stall_ctrl |= BIT(8);
662 drvdata->stall_ctrl &= ~BIT(8);
664 /* bit[10], Prioritize instruction trace bit */
665 if (drvdata->mode & ETM_MODE_INSTPRIO)
666 drvdata->stall_ctrl |= BIT(10);
668 drvdata->stall_ctrl &= ~BIT(10);
670 /* bit[13], Trace overflow prevention bit */
671 if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
672 (drvdata->nooverflow == true))
673 drvdata->stall_ctrl |= BIT(13);
675 drvdata->stall_ctrl &= ~BIT(13);
677 /* bit[9] Start/stop logic control bit */
678 if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
679 drvdata->vinst_ctrl |= BIT(9);
681 drvdata->vinst_ctrl &= ~BIT(9);
683 /* bit[10], Whether a trace unit must trace a Reset exception */
684 if (drvdata->mode & ETM_MODE_TRACE_RESET)
685 drvdata->vinst_ctrl |= BIT(10);
687 drvdata->vinst_ctrl &= ~BIT(10);
689 /* bit[11], Whether a trace unit must trace a system error exception */
690 if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
691 (drvdata->trc_error == true))
692 drvdata->vinst_ctrl |= BIT(11);
694 drvdata->vinst_ctrl &= ~BIT(11);
696 spin_unlock(&drvdata->spinlock);
699 static DEVICE_ATTR_RW(mode);
701 static ssize_t pe_show(struct device *dev,
702 struct device_attribute *attr,
706 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
708 val = drvdata->pe_sel;
709 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
712 static ssize_t pe_store(struct device *dev,
713 struct device_attribute *attr,
714 const char *buf, size_t size)
717 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
719 if (kstrtoul(buf, 16, &val))
722 spin_lock(&drvdata->spinlock);
723 if (val > drvdata->nr_pe) {
724 spin_unlock(&drvdata->spinlock);
728 drvdata->pe_sel = val;
729 spin_unlock(&drvdata->spinlock);
732 static DEVICE_ATTR_RW(pe);
734 static ssize_t event_show(struct device *dev,
735 struct device_attribute *attr,
739 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
741 val = drvdata->eventctrl0;
742 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
745 static ssize_t event_store(struct device *dev,
746 struct device_attribute *attr,
747 const char *buf, size_t size)
750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
752 if (kstrtoul(buf, 16, &val))
755 spin_lock(&drvdata->spinlock);
756 switch (drvdata->nr_event) {
758 /* EVENT0, bits[7:0] */
759 drvdata->eventctrl0 = val & 0xFF;
762 /* EVENT1, bits[15:8] */
763 drvdata->eventctrl0 = val & 0xFFFF;
766 /* EVENT2, bits[23:16] */
767 drvdata->eventctrl0 = val & 0xFFFFFF;
770 /* EVENT3, bits[31:24] */
771 drvdata->eventctrl0 = val;
776 spin_unlock(&drvdata->spinlock);
779 static DEVICE_ATTR_RW(event);
781 static ssize_t event_instren_show(struct device *dev,
782 struct device_attribute *attr,
786 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
788 val = BMVAL(drvdata->eventctrl1, 0, 3);
789 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
792 static ssize_t event_instren_store(struct device *dev,
793 struct device_attribute *attr,
794 const char *buf, size_t size)
797 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
799 if (kstrtoul(buf, 16, &val))
802 spin_lock(&drvdata->spinlock);
803 /* start by clearing all instruction event enable bits */
804 drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
805 switch (drvdata->nr_event) {
807 /* generate Event element for event 1 */
808 drvdata->eventctrl1 |= val & BIT(1);
811 /* generate Event element for event 1 and 2 */
812 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
815 /* generate Event element for event 1, 2 and 3 */
816 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
819 /* generate Event element for all 4 events */
820 drvdata->eventctrl1 |= val & 0xF;
825 spin_unlock(&drvdata->spinlock);
828 static DEVICE_ATTR_RW(event_instren);
830 static ssize_t event_ts_show(struct device *dev,
831 struct device_attribute *attr,
835 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
837 val = drvdata->ts_ctrl;
838 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
841 static ssize_t event_ts_store(struct device *dev,
842 struct device_attribute *attr,
843 const char *buf, size_t size)
846 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
848 if (kstrtoul(buf, 16, &val))
850 if (!drvdata->ts_size)
853 drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
856 static DEVICE_ATTR_RW(event_ts);
858 static ssize_t syncfreq_show(struct device *dev,
859 struct device_attribute *attr,
863 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
865 val = drvdata->syncfreq;
866 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
869 static ssize_t syncfreq_store(struct device *dev,
870 struct device_attribute *attr,
871 const char *buf, size_t size)
874 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
876 if (kstrtoul(buf, 16, &val))
878 if (drvdata->syncpr == true)
881 drvdata->syncfreq = val & ETMv4_SYNC_MASK;
884 static DEVICE_ATTR_RW(syncfreq);
886 static ssize_t cyc_threshold_show(struct device *dev,
887 struct device_attribute *attr,
891 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
893 val = drvdata->ccctlr;
894 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
897 static ssize_t cyc_threshold_store(struct device *dev,
898 struct device_attribute *attr,
899 const char *buf, size_t size)
902 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
904 if (kstrtoul(buf, 16, &val))
906 if (val < drvdata->ccitmin)
909 drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
912 static DEVICE_ATTR_RW(cyc_threshold);
914 static ssize_t bb_ctrl_show(struct device *dev,
915 struct device_attribute *attr,
919 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
921 val = drvdata->bb_ctrl;
922 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
925 static ssize_t bb_ctrl_store(struct device *dev,
926 struct device_attribute *attr,
927 const char *buf, size_t size)
930 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
932 if (kstrtoul(buf, 16, &val))
934 if (drvdata->trcbb == false)
936 if (!drvdata->nr_addr_cmp)
939 * Bit[7:0] selects which address range comparator is used for
940 * branch broadcast control.
942 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
945 drvdata->bb_ctrl = val;
948 static DEVICE_ATTR_RW(bb_ctrl);
950 static ssize_t event_vinst_show(struct device *dev,
951 struct device_attribute *attr,
955 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
957 val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
958 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
961 static ssize_t event_vinst_store(struct device *dev,
962 struct device_attribute *attr,
963 const char *buf, size_t size)
966 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
968 if (kstrtoul(buf, 16, &val))
971 spin_lock(&drvdata->spinlock);
972 val &= ETMv4_EVENT_MASK;
973 drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
974 drvdata->vinst_ctrl |= val;
975 spin_unlock(&drvdata->spinlock);
978 static DEVICE_ATTR_RW(event_vinst);
980 static ssize_t s_exlevel_vinst_show(struct device *dev,
981 struct device_attribute *attr,
985 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
987 val = BMVAL(drvdata->vinst_ctrl, 16, 19);
988 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
991 static ssize_t s_exlevel_vinst_store(struct device *dev,
992 struct device_attribute *attr,
993 const char *buf, size_t size)
996 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
998 if (kstrtoul(buf, 16, &val))
1001 spin_lock(&drvdata->spinlock);
1002 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
1003 drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
1004 /* enable instruction tracing for corresponding exception level */
1005 val &= drvdata->s_ex_level;
1006 drvdata->vinst_ctrl |= (val << 16);
1007 spin_unlock(&drvdata->spinlock);
1010 static DEVICE_ATTR_RW(s_exlevel_vinst);
1012 static ssize_t ns_exlevel_vinst_show(struct device *dev,
1013 struct device_attribute *attr,
1017 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1019 /* EXLEVEL_NS, bits[23:20] */
1020 val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1021 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1024 static ssize_t ns_exlevel_vinst_store(struct device *dev,
1025 struct device_attribute *attr,
1026 const char *buf, size_t size)
1029 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1031 if (kstrtoul(buf, 16, &val))
1034 spin_lock(&drvdata->spinlock);
1035 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1036 drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1037 /* enable instruction tracing for corresponding exception level */
1038 val &= drvdata->ns_ex_level;
1039 drvdata->vinst_ctrl |= (val << 20);
1040 spin_unlock(&drvdata->spinlock);
1043 static DEVICE_ATTR_RW(ns_exlevel_vinst);
1045 static ssize_t addr_idx_show(struct device *dev,
1046 struct device_attribute *attr,
1050 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1052 val = drvdata->addr_idx;
1053 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1056 static ssize_t addr_idx_store(struct device *dev,
1057 struct device_attribute *attr,
1058 const char *buf, size_t size)
1061 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1063 if (kstrtoul(buf, 16, &val))
1065 if (val >= drvdata->nr_addr_cmp * 2)
1069 * Use spinlock to ensure index doesn't change while it gets
1070 * dereferenced multiple times within a spinlock block elsewhere.
1072 spin_lock(&drvdata->spinlock);
1073 drvdata->addr_idx = val;
1074 spin_unlock(&drvdata->spinlock);
1077 static DEVICE_ATTR_RW(addr_idx);
1079 static ssize_t addr_instdatatype_show(struct device *dev,
1080 struct device_attribute *attr,
1085 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1087 spin_lock(&drvdata->spinlock);
1088 idx = drvdata->addr_idx;
1089 val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1090 len = scnprintf(buf, PAGE_SIZE, "%s\n",
1091 val == ETM_INSTR_ADDR ? "instr" :
1092 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
1093 (val == ETM_DATA_STORE_ADDR ? "data_store" :
1094 "data_load_store")));
1095 spin_unlock(&drvdata->spinlock);
1099 static ssize_t addr_instdatatype_store(struct device *dev,
1100 struct device_attribute *attr,
1101 const char *buf, size_t size)
1105 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1107 if (strlen(buf) >= 20)
1109 if (sscanf(buf, "%s", str) != 1)
1112 spin_lock(&drvdata->spinlock);
1113 idx = drvdata->addr_idx;
1114 if (!strcmp(str, "instr"))
1115 /* TYPE, bits[1:0] */
1116 drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1118 spin_unlock(&drvdata->spinlock);
1121 static DEVICE_ATTR_RW(addr_instdatatype);
1123 static ssize_t addr_single_show(struct device *dev,
1124 struct device_attribute *attr,
1129 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1131 idx = drvdata->addr_idx;
1132 spin_lock(&drvdata->spinlock);
1133 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1134 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1135 spin_unlock(&drvdata->spinlock);
1138 val = (unsigned long)drvdata->addr_val[idx];
1139 spin_unlock(&drvdata->spinlock);
1140 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1143 static ssize_t addr_single_store(struct device *dev,
1144 struct device_attribute *attr,
1145 const char *buf, size_t size)
1149 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1151 if (kstrtoul(buf, 16, &val))
1154 spin_lock(&drvdata->spinlock);
1155 idx = drvdata->addr_idx;
1156 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1157 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1158 spin_unlock(&drvdata->spinlock);
1162 drvdata->addr_val[idx] = (u64)val;
1163 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1164 spin_unlock(&drvdata->spinlock);
1167 static DEVICE_ATTR_RW(addr_single);
1169 static ssize_t addr_range_show(struct device *dev,
1170 struct device_attribute *attr,
1174 unsigned long val1, val2;
1175 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1177 spin_lock(&drvdata->spinlock);
1178 idx = drvdata->addr_idx;
1180 spin_unlock(&drvdata->spinlock);
1183 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1184 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1185 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1186 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1187 spin_unlock(&drvdata->spinlock);
1191 val1 = (unsigned long)drvdata->addr_val[idx];
1192 val2 = (unsigned long)drvdata->addr_val[idx + 1];
1193 spin_unlock(&drvdata->spinlock);
1194 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1197 static ssize_t addr_range_store(struct device *dev,
1198 struct device_attribute *attr,
1199 const char *buf, size_t size)
1202 unsigned long val1, val2;
1203 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1205 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1207 /* lower address comparator cannot have a higher address value */
1211 spin_lock(&drvdata->spinlock);
1212 idx = drvdata->addr_idx;
1214 spin_unlock(&drvdata->spinlock);
1218 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1219 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1220 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1221 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1222 spin_unlock(&drvdata->spinlock);
1226 drvdata->addr_val[idx] = (u64)val1;
1227 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1228 drvdata->addr_val[idx + 1] = (u64)val2;
1229 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1231 * Program include or exclude control bits for vinst or vdata
1232 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1234 if (drvdata->mode & ETM_MODE_EXCLUDE)
1235 etm4_set_mode_exclude(drvdata, true);
1237 etm4_set_mode_exclude(drvdata, false);
1239 spin_unlock(&drvdata->spinlock);
1242 static DEVICE_ATTR_RW(addr_range);
1244 static ssize_t addr_start_show(struct device *dev,
1245 struct device_attribute *attr,
1250 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1252 spin_lock(&drvdata->spinlock);
1253 idx = drvdata->addr_idx;
1255 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1256 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1257 spin_unlock(&drvdata->spinlock);
1261 val = (unsigned long)drvdata->addr_val[idx];
1262 spin_unlock(&drvdata->spinlock);
1263 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1266 static ssize_t addr_start_store(struct device *dev,
1267 struct device_attribute *attr,
1268 const char *buf, size_t size)
1272 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1274 if (kstrtoul(buf, 16, &val))
1277 spin_lock(&drvdata->spinlock);
1278 idx = drvdata->addr_idx;
1279 if (!drvdata->nr_addr_cmp) {
1280 spin_unlock(&drvdata->spinlock);
1283 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1284 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1285 spin_unlock(&drvdata->spinlock);
1289 drvdata->addr_val[idx] = (u64)val;
1290 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1291 drvdata->vissctlr |= BIT(idx);
1292 /* SSSTATUS, bit[9] - turn on start/stop logic */
1293 drvdata->vinst_ctrl |= BIT(9);
1294 spin_unlock(&drvdata->spinlock);
1297 static DEVICE_ATTR_RW(addr_start);
1299 static ssize_t addr_stop_show(struct device *dev,
1300 struct device_attribute *attr,
1305 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1307 spin_lock(&drvdata->spinlock);
1308 idx = drvdata->addr_idx;
1310 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1311 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1312 spin_unlock(&drvdata->spinlock);
1316 val = (unsigned long)drvdata->addr_val[idx];
1317 spin_unlock(&drvdata->spinlock);
1318 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1321 static ssize_t addr_stop_store(struct device *dev,
1322 struct device_attribute *attr,
1323 const char *buf, size_t size)
1327 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1329 if (kstrtoul(buf, 16, &val))
1332 spin_lock(&drvdata->spinlock);
1333 idx = drvdata->addr_idx;
1334 if (!drvdata->nr_addr_cmp) {
1335 spin_unlock(&drvdata->spinlock);
1338 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1339 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1340 spin_unlock(&drvdata->spinlock);
1344 drvdata->addr_val[idx] = (u64)val;
1345 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1346 drvdata->vissctlr |= BIT(idx + 16);
1347 /* SSSTATUS, bit[9] - turn on start/stop logic */
1348 drvdata->vinst_ctrl |= BIT(9);
1349 spin_unlock(&drvdata->spinlock);
1352 static DEVICE_ATTR_RW(addr_stop);
1354 static ssize_t addr_ctxtype_show(struct device *dev,
1355 struct device_attribute *attr,
1360 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1362 spin_lock(&drvdata->spinlock);
1363 idx = drvdata->addr_idx;
1364 /* CONTEXTTYPE, bits[3:2] */
1365 val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1366 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1367 (val == ETM_CTX_CTXID ? "ctxid" :
1368 (val == ETM_CTX_VMID ? "vmid" : "all")));
1369 spin_unlock(&drvdata->spinlock);
1373 static ssize_t addr_ctxtype_store(struct device *dev,
1374 struct device_attribute *attr,
1375 const char *buf, size_t size)
1379 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381 if (strlen(buf) >= 10)
1383 if (sscanf(buf, "%s", str) != 1)
1386 spin_lock(&drvdata->spinlock);
1387 idx = drvdata->addr_idx;
1388 if (!strcmp(str, "none"))
1389 /* start by clearing context type bits */
1390 drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1391 else if (!strcmp(str, "ctxid")) {
1392 /* 0b01 The trace unit performs a Context ID */
1393 if (drvdata->numcidc) {
1394 drvdata->addr_acc[idx] |= BIT(2);
1395 drvdata->addr_acc[idx] &= ~BIT(3);
1397 } else if (!strcmp(str, "vmid")) {
1398 /* 0b10 The trace unit performs a VMID */
1399 if (drvdata->numvmidc) {
1400 drvdata->addr_acc[idx] &= ~BIT(2);
1401 drvdata->addr_acc[idx] |= BIT(3);
1403 } else if (!strcmp(str, "all")) {
1405 * 0b11 The trace unit performs a Context ID
1406 * comparison and a VMID
1408 if (drvdata->numcidc)
1409 drvdata->addr_acc[idx] |= BIT(2);
1410 if (drvdata->numvmidc)
1411 drvdata->addr_acc[idx] |= BIT(3);
1413 spin_unlock(&drvdata->spinlock);
1416 static DEVICE_ATTR_RW(addr_ctxtype);
1418 static ssize_t addr_context_show(struct device *dev,
1419 struct device_attribute *attr,
1424 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1426 spin_lock(&drvdata->spinlock);
1427 idx = drvdata->addr_idx;
1428 /* context ID comparator bits[6:4] */
1429 val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1430 spin_unlock(&drvdata->spinlock);
1431 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1434 static ssize_t addr_context_store(struct device *dev,
1435 struct device_attribute *attr,
1436 const char *buf, size_t size)
1440 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1442 if (kstrtoul(buf, 16, &val))
1444 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1446 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1447 drvdata->numcidc : drvdata->numvmidc))
1450 spin_lock(&drvdata->spinlock);
1451 idx = drvdata->addr_idx;
1452 /* clear context ID comparator bits[6:4] */
1453 drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1454 drvdata->addr_acc[idx] |= (val << 4);
1455 spin_unlock(&drvdata->spinlock);
1458 static DEVICE_ATTR_RW(addr_context);
1460 static ssize_t seq_idx_show(struct device *dev,
1461 struct device_attribute *attr,
1465 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1467 val = drvdata->seq_idx;
1468 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1471 static ssize_t seq_idx_store(struct device *dev,
1472 struct device_attribute *attr,
1473 const char *buf, size_t size)
1476 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1478 if (kstrtoul(buf, 16, &val))
1480 if (val >= drvdata->nrseqstate - 1)
1484 * Use spinlock to ensure index doesn't change while it gets
1485 * dereferenced multiple times within a spinlock block elsewhere.
1487 spin_lock(&drvdata->spinlock);
1488 drvdata->seq_idx = val;
1489 spin_unlock(&drvdata->spinlock);
1492 static DEVICE_ATTR_RW(seq_idx);
1494 static ssize_t seq_state_show(struct device *dev,
1495 struct device_attribute *attr,
1499 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1501 val = drvdata->seq_state;
1502 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1505 static ssize_t seq_state_store(struct device *dev,
1506 struct device_attribute *attr,
1507 const char *buf, size_t size)
1510 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1512 if (kstrtoul(buf, 16, &val))
1514 if (val >= drvdata->nrseqstate)
1517 drvdata->seq_state = val;
1520 static DEVICE_ATTR_RW(seq_state);
1522 static ssize_t seq_event_show(struct device *dev,
1523 struct device_attribute *attr,
1528 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1530 spin_lock(&drvdata->spinlock);
1531 idx = drvdata->seq_idx;
1532 val = drvdata->seq_ctrl[idx];
1533 spin_unlock(&drvdata->spinlock);
1534 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1537 static ssize_t seq_event_store(struct device *dev,
1538 struct device_attribute *attr,
1539 const char *buf, size_t size)
1543 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1545 if (kstrtoul(buf, 16, &val))
1548 spin_lock(&drvdata->spinlock);
1549 idx = drvdata->seq_idx;
1550 /* RST, bits[7:0] */
1551 drvdata->seq_ctrl[idx] = val & 0xFF;
1552 spin_unlock(&drvdata->spinlock);
1555 static DEVICE_ATTR_RW(seq_event);
1557 static ssize_t seq_reset_event_show(struct device *dev,
1558 struct device_attribute *attr,
1562 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1564 val = drvdata->seq_rst;
1565 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1568 static ssize_t seq_reset_event_store(struct device *dev,
1569 struct device_attribute *attr,
1570 const char *buf, size_t size)
1573 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1575 if (kstrtoul(buf, 16, &val))
1577 if (!(drvdata->nrseqstate))
1580 drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1583 static DEVICE_ATTR_RW(seq_reset_event);
1585 static ssize_t cntr_idx_show(struct device *dev,
1586 struct device_attribute *attr,
1590 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1592 val = drvdata->cntr_idx;
1593 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1596 static ssize_t cntr_idx_store(struct device *dev,
1597 struct device_attribute *attr,
1598 const char *buf, size_t size)
1601 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1603 if (kstrtoul(buf, 16, &val))
1605 if (val >= drvdata->nr_cntr)
1609 * Use spinlock to ensure index doesn't change while it gets
1610 * dereferenced multiple times within a spinlock block elsewhere.
1612 spin_lock(&drvdata->spinlock);
1613 drvdata->cntr_idx = val;
1614 spin_unlock(&drvdata->spinlock);
1617 static DEVICE_ATTR_RW(cntr_idx);
1619 static ssize_t cntrldvr_show(struct device *dev,
1620 struct device_attribute *attr,
1625 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1627 spin_lock(&drvdata->spinlock);
1628 idx = drvdata->cntr_idx;
1629 val = drvdata->cntrldvr[idx];
1630 spin_unlock(&drvdata->spinlock);
1631 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1634 static ssize_t cntrldvr_store(struct device *dev,
1635 struct device_attribute *attr,
1636 const char *buf, size_t size)
1640 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1642 if (kstrtoul(buf, 16, &val))
1644 if (val > ETM_CNTR_MAX_VAL)
1647 spin_lock(&drvdata->spinlock);
1648 idx = drvdata->cntr_idx;
1649 drvdata->cntrldvr[idx] = val;
1650 spin_unlock(&drvdata->spinlock);
1653 static DEVICE_ATTR_RW(cntrldvr);
1655 static ssize_t cntr_val_show(struct device *dev,
1656 struct device_attribute *attr,
1661 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1663 spin_lock(&drvdata->spinlock);
1664 idx = drvdata->cntr_idx;
1665 val = drvdata->cntr_val[idx];
1666 spin_unlock(&drvdata->spinlock);
1667 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1670 static ssize_t cntr_val_store(struct device *dev,
1671 struct device_attribute *attr,
1672 const char *buf, size_t size)
1676 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1678 if (kstrtoul(buf, 16, &val))
1680 if (val > ETM_CNTR_MAX_VAL)
1683 spin_lock(&drvdata->spinlock);
1684 idx = drvdata->cntr_idx;
1685 drvdata->cntr_val[idx] = val;
1686 spin_unlock(&drvdata->spinlock);
1689 static DEVICE_ATTR_RW(cntr_val);
1691 static ssize_t cntr_ctrl_show(struct device *dev,
1692 struct device_attribute *attr,
1697 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1699 spin_lock(&drvdata->spinlock);
1700 idx = drvdata->cntr_idx;
1701 val = drvdata->cntr_ctrl[idx];
1702 spin_unlock(&drvdata->spinlock);
1703 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1706 static ssize_t cntr_ctrl_store(struct device *dev,
1707 struct device_attribute *attr,
1708 const char *buf, size_t size)
1712 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1714 if (kstrtoul(buf, 16, &val))
1717 spin_lock(&drvdata->spinlock);
1718 idx = drvdata->cntr_idx;
1719 drvdata->cntr_ctrl[idx] = val;
1720 spin_unlock(&drvdata->spinlock);
1723 static DEVICE_ATTR_RW(cntr_ctrl);
1725 static ssize_t res_idx_show(struct device *dev,
1726 struct device_attribute *attr,
1730 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1732 val = drvdata->res_idx;
1733 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1736 static ssize_t res_idx_store(struct device *dev,
1737 struct device_attribute *attr,
1738 const char *buf, size_t size)
1741 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1743 if (kstrtoul(buf, 16, &val))
1745 /* Resource selector pair 0 is always implemented and reserved */
1746 if (val < 2 || val >= drvdata->nr_resource * 2)
1750 * Use spinlock to ensure index doesn't change while it gets
1751 * dereferenced multiple times within a spinlock block elsewhere.
1753 spin_lock(&drvdata->spinlock);
1754 drvdata->res_idx = val;
1755 spin_unlock(&drvdata->spinlock);
1758 static DEVICE_ATTR_RW(res_idx);
1760 static ssize_t res_ctrl_show(struct device *dev,
1761 struct device_attribute *attr,
1766 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1768 spin_lock(&drvdata->spinlock);
1769 idx = drvdata->res_idx;
1770 val = drvdata->res_ctrl[idx];
1771 spin_unlock(&drvdata->spinlock);
1772 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1775 static ssize_t res_ctrl_store(struct device *dev,
1776 struct device_attribute *attr,
1777 const char *buf, size_t size)
1781 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1783 if (kstrtoul(buf, 16, &val))
1786 spin_lock(&drvdata->spinlock);
1787 idx = drvdata->res_idx;
1788 /* For odd idx pair inversal bit is RES0 */
1790 /* PAIRINV, bit[21] */
1792 drvdata->res_ctrl[idx] = val;
1793 spin_unlock(&drvdata->spinlock);
1796 static DEVICE_ATTR_RW(res_ctrl);
1798 static ssize_t ctxid_idx_show(struct device *dev,
1799 struct device_attribute *attr,
1803 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1805 val = drvdata->ctxid_idx;
1806 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1809 static ssize_t ctxid_idx_store(struct device *dev,
1810 struct device_attribute *attr,
1811 const char *buf, size_t size)
1814 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1816 if (kstrtoul(buf, 16, &val))
1818 if (val >= drvdata->numcidc)
1822 * Use spinlock to ensure index doesn't change while it gets
1823 * dereferenced multiple times within a spinlock block elsewhere.
1825 spin_lock(&drvdata->spinlock);
1826 drvdata->ctxid_idx = val;
1827 spin_unlock(&drvdata->spinlock);
1830 static DEVICE_ATTR_RW(ctxid_idx);
1832 static ssize_t ctxid_pid_show(struct device *dev,
1833 struct device_attribute *attr,
1838 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1840 spin_lock(&drvdata->spinlock);
1841 idx = drvdata->ctxid_idx;
1842 val = (unsigned long)drvdata->ctxid_vpid[idx];
1843 spin_unlock(&drvdata->spinlock);
1844 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1847 static ssize_t ctxid_pid_store(struct device *dev,
1848 struct device_attribute *attr,
1849 const char *buf, size_t size)
1852 unsigned long vpid, pid;
1853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1856 * only implemented when ctxid tracing is enabled, i.e. at least one
1857 * ctxid comparator is implemented and ctxid is greater than 0 bits
1860 if (!drvdata->ctxid_size || !drvdata->numcidc)
1862 if (kstrtoul(buf, 16, &vpid))
1865 pid = coresight_vpid_to_pid(vpid);
1867 spin_lock(&drvdata->spinlock);
1868 idx = drvdata->ctxid_idx;
1869 drvdata->ctxid_pid[idx] = (u64)pid;
1870 drvdata->ctxid_vpid[idx] = (u64)vpid;
1871 spin_unlock(&drvdata->spinlock);
1874 static DEVICE_ATTR_RW(ctxid_pid);
1876 static ssize_t ctxid_masks_show(struct device *dev,
1877 struct device_attribute *attr,
1880 unsigned long val1, val2;
1881 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1883 spin_lock(&drvdata->spinlock);
1884 val1 = drvdata->ctxid_mask0;
1885 val2 = drvdata->ctxid_mask1;
1886 spin_unlock(&drvdata->spinlock);
1887 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1890 static ssize_t ctxid_masks_store(struct device *dev,
1891 struct device_attribute *attr,
1892 const char *buf, size_t size)
1895 unsigned long val1, val2, mask;
1896 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1899 * only implemented when ctxid tracing is enabled, i.e. at least one
1900 * ctxid comparator is implemented and ctxid is greater than 0 bits
1903 if (!drvdata->ctxid_size || !drvdata->numcidc)
1905 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1908 spin_lock(&drvdata->spinlock);
1910 * each byte[0..3] controls mask value applied to ctxid
1913 switch (drvdata->numcidc) {
1915 /* COMP0, bits[7:0] */
1916 drvdata->ctxid_mask0 = val1 & 0xFF;
1919 /* COMP1, bits[15:8] */
1920 drvdata->ctxid_mask0 = val1 & 0xFFFF;
1923 /* COMP2, bits[23:16] */
1924 drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1927 /* COMP3, bits[31:24] */
1928 drvdata->ctxid_mask0 = val1;
1931 /* COMP4, bits[7:0] */
1932 drvdata->ctxid_mask0 = val1;
1933 drvdata->ctxid_mask1 = val2 & 0xFF;
1936 /* COMP5, bits[15:8] */
1937 drvdata->ctxid_mask0 = val1;
1938 drvdata->ctxid_mask1 = val2 & 0xFFFF;
1941 /* COMP6, bits[23:16] */
1942 drvdata->ctxid_mask0 = val1;
1943 drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1946 /* COMP7, bits[31:24] */
1947 drvdata->ctxid_mask0 = val1;
1948 drvdata->ctxid_mask1 = val2;
1954 * If software sets a mask bit to 1, it must program relevant byte
1955 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1956 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1957 * of ctxid comparator0 value (corresponding to byte 0) register.
1959 mask = drvdata->ctxid_mask0;
1960 for (i = 0; i < drvdata->numcidc; i++) {
1961 /* mask value of corresponding ctxid comparator */
1962 maskbyte = mask & ETMv4_EVENT_MASK;
1964 * each bit corresponds to a byte of respective ctxid comparator
1967 for (j = 0; j < 8; j++) {
1969 drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
1972 /* Select the next ctxid comparator mask value */
1974 /* ctxid comparators[4-7] */
1975 mask = drvdata->ctxid_mask1;
1980 spin_unlock(&drvdata->spinlock);
1983 static DEVICE_ATTR_RW(ctxid_masks);
1985 static ssize_t vmid_idx_show(struct device *dev,
1986 struct device_attribute *attr,
1990 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1992 val = drvdata->vmid_idx;
1993 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1996 static ssize_t vmid_idx_store(struct device *dev,
1997 struct device_attribute *attr,
1998 const char *buf, size_t size)
2001 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2003 if (kstrtoul(buf, 16, &val))
2005 if (val >= drvdata->numvmidc)
2009 * Use spinlock to ensure index doesn't change while it gets
2010 * dereferenced multiple times within a spinlock block elsewhere.
2012 spin_lock(&drvdata->spinlock);
2013 drvdata->vmid_idx = val;
2014 spin_unlock(&drvdata->spinlock);
2017 static DEVICE_ATTR_RW(vmid_idx);
2019 static ssize_t vmid_val_show(struct device *dev,
2020 struct device_attribute *attr,
2024 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2026 val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2027 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2030 static ssize_t vmid_val_store(struct device *dev,
2031 struct device_attribute *attr,
2032 const char *buf, size_t size)
2035 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2038 * only implemented when vmid tracing is enabled, i.e. at least one
2039 * vmid comparator is implemented and at least 8 bit vmid size
2041 if (!drvdata->vmid_size || !drvdata->numvmidc)
2043 if (kstrtoul(buf, 16, &val))
2046 spin_lock(&drvdata->spinlock);
2047 drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2048 spin_unlock(&drvdata->spinlock);
2051 static DEVICE_ATTR_RW(vmid_val);
2053 static ssize_t vmid_masks_show(struct device *dev,
2054 struct device_attribute *attr, char *buf)
2056 unsigned long val1, val2;
2057 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2059 spin_lock(&drvdata->spinlock);
2060 val1 = drvdata->vmid_mask0;
2061 val2 = drvdata->vmid_mask1;
2062 spin_unlock(&drvdata->spinlock);
2063 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2066 static ssize_t vmid_masks_store(struct device *dev,
2067 struct device_attribute *attr,
2068 const char *buf, size_t size)
2071 unsigned long val1, val2, mask;
2072 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2074 * only implemented when vmid tracing is enabled, i.e. at least one
2075 * vmid comparator is implemented and at least 8 bit vmid size
2077 if (!drvdata->vmid_size || !drvdata->numvmidc)
2079 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2082 spin_lock(&drvdata->spinlock);
2085 * each byte[0..3] controls mask value applied to vmid
2088 switch (drvdata->numvmidc) {
2090 /* COMP0, bits[7:0] */
2091 drvdata->vmid_mask0 = val1 & 0xFF;
2094 /* COMP1, bits[15:8] */
2095 drvdata->vmid_mask0 = val1 & 0xFFFF;
2098 /* COMP2, bits[23:16] */
2099 drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2102 /* COMP3, bits[31:24] */
2103 drvdata->vmid_mask0 = val1;
2106 /* COMP4, bits[7:0] */
2107 drvdata->vmid_mask0 = val1;
2108 drvdata->vmid_mask1 = val2 & 0xFF;
2111 /* COMP5, bits[15:8] */
2112 drvdata->vmid_mask0 = val1;
2113 drvdata->vmid_mask1 = val2 & 0xFFFF;
2116 /* COMP6, bits[23:16] */
2117 drvdata->vmid_mask0 = val1;
2118 drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2121 /* COMP7, bits[31:24] */
2122 drvdata->vmid_mask0 = val1;
2123 drvdata->vmid_mask1 = val2;
2130 * If software sets a mask bit to 1, it must program relevant byte
2131 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2132 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2133 * of vmid comparator0 value (corresponding to byte 0) register.
2135 mask = drvdata->vmid_mask0;
2136 for (i = 0; i < drvdata->numvmidc; i++) {
2137 /* mask value of corresponding vmid comparator */
2138 maskbyte = mask & ETMv4_EVENT_MASK;
2140 * each bit corresponds to a byte of respective vmid comparator
2143 for (j = 0; j < 8; j++) {
2145 drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2148 /* Select the next vmid comparator mask value */
2150 /* vmid comparators[4-7] */
2151 mask = drvdata->vmid_mask1;
2155 spin_unlock(&drvdata->spinlock);
2158 static DEVICE_ATTR_RW(vmid_masks);
2160 static ssize_t cpu_show(struct device *dev,
2161 struct device_attribute *attr, char *buf)
2164 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2167 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2170 static DEVICE_ATTR_RO(cpu);
2172 static struct attribute *coresight_etmv4_attrs[] = {
2173 &dev_attr_nr_pe_cmp.attr,
2174 &dev_attr_nr_addr_cmp.attr,
2175 &dev_attr_nr_cntr.attr,
2176 &dev_attr_nr_ext_inp.attr,
2177 &dev_attr_numcidc.attr,
2178 &dev_attr_numvmidc.attr,
2179 &dev_attr_nrseqstate.attr,
2180 &dev_attr_nr_resource.attr,
2181 &dev_attr_nr_ss_cmp.attr,
2182 &dev_attr_reset.attr,
2183 &dev_attr_mode.attr,
2185 &dev_attr_event.attr,
2186 &dev_attr_event_instren.attr,
2187 &dev_attr_event_ts.attr,
2188 &dev_attr_syncfreq.attr,
2189 &dev_attr_cyc_threshold.attr,
2190 &dev_attr_bb_ctrl.attr,
2191 &dev_attr_event_vinst.attr,
2192 &dev_attr_s_exlevel_vinst.attr,
2193 &dev_attr_ns_exlevel_vinst.attr,
2194 &dev_attr_addr_idx.attr,
2195 &dev_attr_addr_instdatatype.attr,
2196 &dev_attr_addr_single.attr,
2197 &dev_attr_addr_range.attr,
2198 &dev_attr_addr_start.attr,
2199 &dev_attr_addr_stop.attr,
2200 &dev_attr_addr_ctxtype.attr,
2201 &dev_attr_addr_context.attr,
2202 &dev_attr_seq_idx.attr,
2203 &dev_attr_seq_state.attr,
2204 &dev_attr_seq_event.attr,
2205 &dev_attr_seq_reset_event.attr,
2206 &dev_attr_cntr_idx.attr,
2207 &dev_attr_cntrldvr.attr,
2208 &dev_attr_cntr_val.attr,
2209 &dev_attr_cntr_ctrl.attr,
2210 &dev_attr_res_idx.attr,
2211 &dev_attr_res_ctrl.attr,
2212 &dev_attr_ctxid_idx.attr,
2213 &dev_attr_ctxid_pid.attr,
2214 &dev_attr_ctxid_masks.attr,
2215 &dev_attr_vmid_idx.attr,
2216 &dev_attr_vmid_val.attr,
2217 &dev_attr_vmid_masks.attr,
2222 #define coresight_simple_func(name, offset) \
2223 static ssize_t name##_show(struct device *_dev, \
2224 struct device_attribute *attr, char *buf) \
2226 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2227 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2228 readl_relaxed(drvdata->base + offset)); \
2230 DEVICE_ATTR_RO(name)
2232 coresight_simple_func(trcoslsr, TRCOSLSR);
2233 coresight_simple_func(trcpdcr, TRCPDCR);
2234 coresight_simple_func(trcpdsr, TRCPDSR);
2235 coresight_simple_func(trclsr, TRCLSR);
2236 coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2237 coresight_simple_func(trcdevid, TRCDEVID);
2238 coresight_simple_func(trcdevtype, TRCDEVTYPE);
2239 coresight_simple_func(trcpidr0, TRCPIDR0);
2240 coresight_simple_func(trcpidr1, TRCPIDR1);
2241 coresight_simple_func(trcpidr2, TRCPIDR2);
2242 coresight_simple_func(trcpidr3, TRCPIDR3);
2244 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2245 &dev_attr_trcoslsr.attr,
2246 &dev_attr_trcpdcr.attr,
2247 &dev_attr_trcpdsr.attr,
2248 &dev_attr_trclsr.attr,
2249 &dev_attr_trcauthstatus.attr,
2250 &dev_attr_trcdevid.attr,
2251 &dev_attr_trcdevtype.attr,
2252 &dev_attr_trcpidr0.attr,
2253 &dev_attr_trcpidr1.attr,
2254 &dev_attr_trcpidr2.attr,
2255 &dev_attr_trcpidr3.attr,
2259 coresight_simple_func(trcidr0, TRCIDR0);
2260 coresight_simple_func(trcidr1, TRCIDR1);
2261 coresight_simple_func(trcidr2, TRCIDR2);
2262 coresight_simple_func(trcidr3, TRCIDR3);
2263 coresight_simple_func(trcidr4, TRCIDR4);
2264 coresight_simple_func(trcidr5, TRCIDR5);
2265 /* trcidr[6,7] are reserved */
2266 coresight_simple_func(trcidr8, TRCIDR8);
2267 coresight_simple_func(trcidr9, TRCIDR9);
2268 coresight_simple_func(trcidr10, TRCIDR10);
2269 coresight_simple_func(trcidr11, TRCIDR11);
2270 coresight_simple_func(trcidr12, TRCIDR12);
2271 coresight_simple_func(trcidr13, TRCIDR13);
2273 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2274 &dev_attr_trcidr0.attr,
2275 &dev_attr_trcidr1.attr,
2276 &dev_attr_trcidr2.attr,
2277 &dev_attr_trcidr3.attr,
2278 &dev_attr_trcidr4.attr,
2279 &dev_attr_trcidr5.attr,
2280 /* trcidr[6,7] are reserved */
2281 &dev_attr_trcidr8.attr,
2282 &dev_attr_trcidr9.attr,
2283 &dev_attr_trcidr10.attr,
2284 &dev_attr_trcidr11.attr,
2285 &dev_attr_trcidr12.attr,
2286 &dev_attr_trcidr13.attr,
2290 static const struct attribute_group coresight_etmv4_group = {
2291 .attrs = coresight_etmv4_attrs,
2294 static const struct attribute_group coresight_etmv4_mgmt_group = {
2295 .attrs = coresight_etmv4_mgmt_attrs,
2299 static const struct attribute_group coresight_etmv4_trcidr_group = {
2300 .attrs = coresight_etmv4_trcidr_attrs,
2304 static const struct attribute_group *coresight_etmv4_groups[] = {
2305 &coresight_etmv4_group,
2306 &coresight_etmv4_mgmt_group,
2307 &coresight_etmv4_trcidr_group,
2311 static void etm4_init_arch_data(void *info)
2319 struct etmv4_drvdata *drvdata = info;
2321 CS_UNLOCK(drvdata->base);
2323 /* find all capabilities of the tracing unit */
2324 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
2326 /* INSTP0, bits[2:1] P0 tracing support field */
2327 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
2328 drvdata->instrp0 = true;
2330 drvdata->instrp0 = false;
2332 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2333 if (BMVAL(etmidr0, 5, 5))
2334 drvdata->trcbb = true;
2336 drvdata->trcbb = false;
2338 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2339 if (BMVAL(etmidr0, 6, 6))
2340 drvdata->trccond = true;
2342 drvdata->trccond = false;
2344 /* TRCCCI, bit[7] Cycle counting instruction bit */
2345 if (BMVAL(etmidr0, 7, 7))
2346 drvdata->trccci = true;
2348 drvdata->trccci = false;
2350 /* RETSTACK, bit[9] Return stack bit */
2351 if (BMVAL(etmidr0, 9, 9))
2352 drvdata->retstack = true;
2354 drvdata->retstack = false;
2356 /* NUMEVENT, bits[11:10] Number of events field */
2357 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
2358 /* QSUPP, bits[16:15] Q element support field */
2359 drvdata->q_support = BMVAL(etmidr0, 15, 16);
2360 /* TSSIZE, bits[28:24] Global timestamp size field */
2361 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
2363 /* base architecture of trace unit */
2364 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
2366 * TRCARCHMIN, bits[7:4] architecture the minor version number
2367 * TRCARCHMAJ, bits[11:8] architecture major versin number
2369 drvdata->arch = BMVAL(etmidr1, 4, 11);
2371 /* maximum size of resources */
2372 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
2373 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2374 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
2375 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2376 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
2377 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2378 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
2380 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
2381 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2382 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
2383 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2384 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
2385 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2386 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
2389 * TRCERR, bit[24] whether a trace unit can trace a
2390 * system error exception.
2392 if (BMVAL(etmidr3, 24, 24))
2393 drvdata->trc_error = true;
2395 drvdata->trc_error = false;
2397 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2398 if (BMVAL(etmidr3, 25, 25))
2399 drvdata->syncpr = true;
2401 drvdata->syncpr = false;
2403 /* STALLCTL, bit[26] is stall control implemented? */
2404 if (BMVAL(etmidr3, 26, 26))
2405 drvdata->stallctl = true;
2407 drvdata->stallctl = false;
2409 /* SYSSTALL, bit[27] implementation can support stall control? */
2410 if (BMVAL(etmidr3, 27, 27))
2411 drvdata->sysstall = true;
2413 drvdata->sysstall = false;
2415 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2416 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
2418 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2419 if (BMVAL(etmidr3, 31, 31))
2420 drvdata->nooverflow = true;
2422 drvdata->nooverflow = false;
2424 /* number of resources trace unit supports */
2425 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
2426 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2427 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2428 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2429 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
2431 * NUMRSPAIR, bits[19:16]
2432 * The number of resource pairs conveyed by the HW starts at 0, i.e a
2433 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
2434 * As such add 1 to the value of NUMRSPAIR for a better representation.
2436 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
2438 * NUMSSCC, bits[23:20] the number of single-shot
2439 * comparator control for tracing
2441 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
2442 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2443 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
2444 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2445 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
2447 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
2448 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2449 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
2450 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2451 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
2452 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2453 if (BMVAL(etmidr5, 22, 22))
2454 drvdata->atbtrig = true;
2456 drvdata->atbtrig = false;
2458 * LPOVERRIDE, bit[23] implementation supports
2459 * low-power state override
2461 if (BMVAL(etmidr5, 23, 23))
2462 drvdata->lpoverride = true;
2464 drvdata->lpoverride = false;
2465 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2466 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
2467 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2468 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
2469 CS_LOCK(drvdata->base);
2472 static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2476 drvdata->pe_sel = 0x0;
2477 drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
2478 ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
2480 /* disable all events tracing */
2481 drvdata->eventctrl0 = 0x0;
2482 drvdata->eventctrl1 = 0x0;
2484 /* disable stalling */
2485 drvdata->stall_ctrl = 0x0;
2487 /* disable timestamp event */
2488 drvdata->ts_ctrl = 0x0;
2490 /* enable trace synchronization every 4096 bytes for trace */
2491 if (drvdata->syncpr == false)
2492 drvdata->syncfreq = 0xC;
2495 * enable viewInst to trace everything with start-stop logic in
2498 drvdata->vinst_ctrl |= BIT(0);
2499 /* set initial state of start-stop logic */
2500 if (drvdata->nr_addr_cmp)
2501 drvdata->vinst_ctrl |= BIT(9);
2503 /* no address range filtering for ViewInst */
2504 drvdata->viiectlr = 0x0;
2505 /* no start-stop filtering for ViewInst */
2506 drvdata->vissctlr = 0x0;
2508 /* disable seq events */
2509 for (i = 0; i < drvdata->nrseqstate-1; i++)
2510 drvdata->seq_ctrl[i] = 0x0;
2511 drvdata->seq_rst = 0x0;
2512 drvdata->seq_state = 0x0;
2514 /* disable external input events */
2515 drvdata->ext_inp = 0x0;
2517 for (i = 0; i < drvdata->nr_cntr; i++) {
2518 drvdata->cntrldvr[i] = 0x0;
2519 drvdata->cntr_ctrl[i] = 0x0;
2520 drvdata->cntr_val[i] = 0x0;
2523 /* Resource selector pair 0 is always implemented and reserved */
2524 drvdata->res_idx = 0x2;
2525 for (i = 2; i < drvdata->nr_resource * 2; i++)
2526 drvdata->res_ctrl[i] = 0x0;
2528 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
2529 drvdata->ss_ctrl[i] = 0x0;
2530 drvdata->ss_pe_cmp[i] = 0x0;
2533 if (drvdata->nr_addr_cmp >= 1) {
2534 drvdata->addr_val[0] = (unsigned long)_stext;
2535 drvdata->addr_val[1] = (unsigned long)_etext;
2536 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2537 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2540 for (i = 0; i < drvdata->numcidc; i++) {
2541 drvdata->ctxid_pid[i] = 0x0;
2542 drvdata->ctxid_vpid[i] = 0x0;
2545 drvdata->ctxid_mask0 = 0x0;
2546 drvdata->ctxid_mask1 = 0x0;
2548 for (i = 0; i < drvdata->numvmidc; i++)
2549 drvdata->vmid_val[i] = 0x0;
2550 drvdata->vmid_mask0 = 0x0;
2551 drvdata->vmid_mask1 = 0x0;
2554 * A trace ID value of 0 is invalid, so let's start at some
2555 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2558 drvdata->trcid = 0x20 + drvdata->cpu;
2561 static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2564 unsigned int cpu = (unsigned long)hcpu;
2566 if (!etmdrvdata[cpu])
2569 switch (action & (~CPU_TASKS_FROZEN)) {
2571 spin_lock(&etmdrvdata[cpu]->spinlock);
2572 if (!etmdrvdata[cpu]->os_unlock) {
2573 etm4_os_unlock(etmdrvdata[cpu]);
2574 etmdrvdata[cpu]->os_unlock = true;
2577 if (etmdrvdata[cpu]->enable)
2578 etm4_enable_hw(etmdrvdata[cpu]);
2579 spin_unlock(&etmdrvdata[cpu]->spinlock);
2583 if (etmdrvdata[cpu]->boot_enable &&
2584 !etmdrvdata[cpu]->sticky_enable)
2585 coresight_enable(etmdrvdata[cpu]->csdev);
2589 spin_lock(&etmdrvdata[cpu]->spinlock);
2590 if (etmdrvdata[cpu]->enable)
2591 etm4_disable_hw(etmdrvdata[cpu]);
2592 spin_unlock(&etmdrvdata[cpu]->spinlock);
2599 static struct notifier_block etm4_cpu_notifier = {
2600 .notifier_call = etm4_cpu_callback,
2603 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2607 struct device *dev = &adev->dev;
2608 struct coresight_platform_data *pdata = NULL;
2609 struct etmv4_drvdata *drvdata;
2610 struct resource *res = &adev->res;
2611 struct coresight_desc *desc;
2612 struct device_node *np = adev->dev.of_node;
2614 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
2618 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2623 pdata = of_get_coresight_platform_data(dev, np);
2625 return PTR_ERR(pdata);
2626 adev->dev.platform_data = pdata;
2629 drvdata->dev = &adev->dev;
2630 dev_set_drvdata(dev, drvdata);
2632 /* Validity for the resource is already checked by the AMBA core */
2633 base = devm_ioremap_resource(dev, res);
2635 return PTR_ERR(base);
2637 drvdata->base = base;
2639 spin_lock_init(&drvdata->spinlock);
2641 drvdata->cpu = pdata ? pdata->cpu : 0;
2644 etmdrvdata[drvdata->cpu] = drvdata;
2646 if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2647 drvdata->os_unlock = true;
2649 if (smp_call_function_single(drvdata->cpu,
2650 etm4_init_arch_data, drvdata, 1))
2651 dev_err(dev, "ETM arch init failed\n");
2654 register_hotcpu_notifier(&etm4_cpu_notifier);
2658 if (etm4_arch_supported(drvdata->arch) == false) {
2660 goto err_arch_supported;
2662 etm4_init_default_data(drvdata);
2664 pm_runtime_put(&adev->dev);
2666 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2667 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2668 desc->ops = &etm4_cs_ops;
2669 desc->pdata = pdata;
2671 desc->groups = coresight_etmv4_groups;
2672 drvdata->csdev = coresight_register(desc);
2673 if (IS_ERR(drvdata->csdev)) {
2674 ret = PTR_ERR(drvdata->csdev);
2675 goto err_coresight_register;
2678 dev_info(dev, "%s initialized\n", (char *)id->data);
2681 coresight_enable(drvdata->csdev);
2682 drvdata->boot_enable = true;
2688 pm_runtime_put(&adev->dev);
2689 err_coresight_register:
2690 if (--etm4_count == 0)
2691 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2695 static struct amba_id etm4_ids[] = {
2696 { /* ETM 4.0 - Qualcomm */
2701 { /* ETM 4.0 - Juno board */
2709 static struct amba_driver etm4x_driver = {
2711 .name = "coresight-etm4x",
2712 .suppress_bind_attrs = true,
2714 .probe = etm4_probe,
2715 .id_table = etm4_ids,
2718 module_amba_driver(etm4x_driver);