Merge tag 'v4.4.63' into linux-linaro-lsk-v4.4
[firefly-linux-kernel-4.4.55.git] / drivers / hwtracing / coresight / coresight-tmc-etr.c
1 /*
2  * Copyright(C) 2016 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/circ_buf.h>
19 #include <linux/coresight.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22
23 #include "coresight-priv.h"
24 #include "coresight-tmc.h"
25
26 /**
27  * struct cs_etr_buffer - keep track of a recording session' specifics
28  * @tmc:        generic portion of the TMC buffers
29  * @paddr:      the physical address of a DMA'able contiguous memory area
30  * @vaddr:      the virtual address associated to @paddr
31  * @size:       how much memory we have, starting at @paddr
32  * @dev:        the device @vaddr has been tied to
33  */
34 struct cs_etr_buffers {
35         struct cs_buffers       tmc;
36         dma_addr_t              paddr;
37         void __iomem            *vaddr;
38         u32                     size;
39         struct device           *dev;
40 };
41
42 void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
43 {
44         u32 axictl;
45
46         /* Zero out the memory to help with debug */
47         memset(drvdata->vaddr, 0, drvdata->size);
48
49         CS_UNLOCK(drvdata->base);
50
51         /* Wait for TMCSReady bit to be set */
52         tmc_wait_for_tmcready(drvdata);
53
54         writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
55         writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
56
57         axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
58         axictl |= TMC_AXICTL_WR_BURST_16;
59         writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
60         axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
61         writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
62         axictl = (axictl &
63                   ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
64                   TMC_AXICTL_PROT_CTL_B1;
65         writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
66
67         writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
68         writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
69         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
70                        TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
71                        TMC_FFCR_TRIGON_TRIGIN,
72                        drvdata->base + TMC_FFCR);
73         writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
74         tmc_enable_hw(drvdata);
75
76         CS_LOCK(drvdata->base);
77 }
78
79 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
80 {
81         u32 rwp, val;
82
83         rwp = readl_relaxed(drvdata->base + TMC_RWP);
84         val = readl_relaxed(drvdata->base + TMC_STS);
85
86         /* How much memory do we still have */
87         if (val & BIT(0))
88                 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
89         else
90                 drvdata->buf = drvdata->vaddr;
91 }
92
93 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
94 {
95         CS_UNLOCK(drvdata->base);
96
97         tmc_flush_and_stop(drvdata);
98         /*
99          * When operating in sysFS mode the content of the buffer needs to be
100          * read before the TMC is disabled.
101          */
102         if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
103                 tmc_etr_dump_hw(drvdata);
104         tmc_disable_hw(drvdata);
105
106         CS_LOCK(drvdata->base);
107 }
108
109 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
110 {
111         int ret = 0;
112         bool used = false;
113         long val;
114         unsigned long flags;
115         void __iomem *vaddr = NULL;
116         dma_addr_t paddr;
117         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
118
119          /* This shouldn't be happening */
120         if (WARN_ON(mode != CS_MODE_SYSFS))
121                 return -EINVAL;
122
123         /*
124          * If we don't have a buffer release the lock and allocate memory.
125          * Otherwise keep the lock and move along.
126          */
127         spin_lock_irqsave(&drvdata->spinlock, flags);
128         if (!drvdata->vaddr) {
129                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
130
131                 /*
132                  * Contiguous  memory can't be allocated while a spinlock is
133                  * held.  As such allocate memory here and free it if a buffer
134                  * has already been allocated (from a previous session).
135                  */
136                 vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
137                                            &paddr, GFP_KERNEL);
138                 if (!vaddr)
139                         return -ENOMEM;
140
141                 /* Let's try again */
142                 spin_lock_irqsave(&drvdata->spinlock, flags);
143         }
144
145         if (drvdata->reading) {
146                 ret = -EBUSY;
147                 goto out;
148         }
149
150         val = local_xchg(&drvdata->mode, mode);
151         /*
152          * In sysFS mode we can have multiple writers per sink.  Since this
153          * sink is already enabled no memory is needed and the HW need not be
154          * touched.
155          */
156         if (val == CS_MODE_SYSFS)
157                 goto out;
158
159         /*
160          * If drvdata::buf == NULL, use the memory allocated above.
161          * Otherwise a buffer still exists from a previous session, so
162          * simply use that.
163          */
164         if (drvdata->buf == NULL) {
165                 used = true;
166                 drvdata->vaddr = vaddr;
167                 drvdata->paddr = paddr;
168                 drvdata->buf = drvdata->vaddr;
169         }
170
171         memset(drvdata->vaddr, 0, drvdata->size);
172
173         tmc_etr_enable_hw(drvdata);
174 out:
175         spin_unlock_irqrestore(&drvdata->spinlock, flags);
176
177         /* Free memory outside the spinlock if need be */
178         if (!used && vaddr)
179                 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
180
181         if (!ret)
182                 dev_info(drvdata->dev, "TMC-ETR enabled\n");
183
184         return ret;
185 }
186
187 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
188 {
189         int ret = 0;
190         long val;
191         unsigned long flags;
192         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
193
194          /* This shouldn't be happening */
195         if (WARN_ON(mode != CS_MODE_PERF))
196                 return -EINVAL;
197
198         spin_lock_irqsave(&drvdata->spinlock, flags);
199         if (drvdata->reading) {
200                 ret = -EINVAL;
201                 goto out;
202         }
203
204         val = local_xchg(&drvdata->mode, mode);
205         /*
206          * In Perf mode there can be only one writer per sink.  There
207          * is also no need to continue if the ETR is already operated
208          * from sysFS.
209          */
210         if (val != CS_MODE_DISABLED) {
211                 ret = -EINVAL;
212                 goto out;
213         }
214
215         tmc_etr_enable_hw(drvdata);
216 out:
217         spin_unlock_irqrestore(&drvdata->spinlock, flags);
218
219         return ret;
220 }
221
222 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
223 {
224         switch (mode) {
225         case CS_MODE_SYSFS:
226                 return tmc_enable_etr_sink_sysfs(csdev, mode);
227         case CS_MODE_PERF:
228                 return tmc_enable_etr_sink_perf(csdev, mode);
229         }
230
231         /* We shouldn't be here */
232         return -EINVAL;
233 }
234
235 static void tmc_disable_etr_sink(struct coresight_device *csdev)
236 {
237         long val;
238         unsigned long flags;
239         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
240
241         spin_lock_irqsave(&drvdata->spinlock, flags);
242         if (drvdata->reading) {
243                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
244                 return;
245         }
246
247         val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
248         /* Disable the TMC only if it needs to */
249         if (val != CS_MODE_DISABLED)
250                 tmc_etr_disable_hw(drvdata);
251
252         spin_unlock_irqrestore(&drvdata->spinlock, flags);
253
254         dev_info(drvdata->dev, "TMC-ETR disabled\n");
255 }
256
257 static void *tmc_alloc_etr_buffer(struct coresight_device *csdev, int cpu,
258                                   void **pages, int nr_pages, bool overwrite)
259 {
260         int node;
261         struct cs_etr_buffers *buf;
262         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
263
264         if (cpu == -1)
265                 cpu = smp_processor_id();
266         node = cpu_to_node(cpu);
267
268         /* Allocate memory structure for interaction with Perf */
269         buf = kzalloc_node(sizeof(struct cs_etr_buffers), GFP_KERNEL, node);
270         if (!buf)
271                 return NULL;
272
273         buf->dev = drvdata->dev;
274         buf->size = drvdata->size;
275         buf->vaddr = dma_alloc_coherent(buf->dev, buf->size,
276                                         &buf->paddr, GFP_KERNEL);
277         if (!buf->vaddr) {
278                 kfree(buf);
279                 return NULL;
280         }
281
282         buf->tmc.snapshot = overwrite;
283         buf->tmc.nr_pages = nr_pages;
284         buf->tmc.data_pages = pages;
285
286         return buf;
287 }
288
289 static void tmc_free_etr_buffer(void *config)
290 {
291         struct cs_etr_buffers *buf = config;
292
293         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->paddr);
294         kfree(buf);
295 }
296
297 static int tmc_set_etr_buffer(struct coresight_device *csdev,
298                               struct perf_output_handle *handle,
299                               void *sink_config)
300 {
301         int ret = 0;
302         unsigned long head;
303         struct cs_etr_buffers *buf = sink_config;
304         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
305
306         /* wrap head around to the amount of space we have */
307         head = handle->head & ((buf->tmc.nr_pages << PAGE_SHIFT) - 1);
308
309         /* find the page to write to */
310         buf->tmc.cur = head / PAGE_SIZE;
311
312         /* and offset within that page */
313         buf->tmc.offset = head % PAGE_SIZE;
314
315         local_set(&buf->tmc.data_size, 0);
316
317         /* Tell the HW where to put the trace data */
318         drvdata->vaddr = buf->vaddr;
319         drvdata->paddr = buf->paddr;
320         memset(drvdata->vaddr, 0, drvdata->size);
321
322         return ret;
323 }
324
325 static unsigned long tmc_reset_etr_buffer(struct coresight_device *csdev,
326                                           struct perf_output_handle *handle,
327                                           void *sink_config, bool *lost)
328 {
329         long size = 0;
330         struct cs_etr_buffers *buf = sink_config;
331         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
332
333         if (buf) {
334                 /*
335                  * In snapshot mode ->data_size holds the new address of the
336                  * ring buffer's head.  The size itself is the whole address
337                  * range since we want the latest information.
338                  */
339                 if (buf->tmc.snapshot) {
340                         size = buf->tmc.nr_pages << PAGE_SHIFT;
341                         handle->head = local_xchg(&buf->tmc.data_size, size);
342                 }
343
344                 /*
345                  * Tell the tracer PMU how much we got in this run and if
346                  * something went wrong along the way.  Nobody else can use
347                  * this cs_etr_buffers instance until we are done.  As such
348                  * resetting parameters here and squaring off with the ring
349                  * buffer API in the tracer PMU is fine.
350                  */
351                 *lost = !!local_xchg(&buf->tmc.lost, 0);
352                 size = local_xchg(&buf->tmc.data_size, 0);
353         }
354
355         /* Get ready for another run */
356         drvdata->vaddr = NULL;
357         drvdata->paddr = 0;
358
359         return size;
360 }
361
362 static void tmc_update_etr_buffer(struct coresight_device *csdev,
363                                   struct perf_output_handle *handle,
364                                   void *sink_config)
365 {
366         int i, cur;
367         u32 *buf_ptr;
368         u32 read_ptr, write_ptr;
369         u32 status, to_read;
370         unsigned long offset;
371         struct cs_buffers *buf = sink_config;
372         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
373
374         if (!buf)
375                 return;
376
377         /* This shouldn't happen */
378         if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
379                 return;
380
381         CS_UNLOCK(drvdata->base);
382
383         tmc_flush_and_stop(drvdata);
384
385         read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
386         write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
387
388         /*
389          * Get a hold of the status register and see if a wrap around
390          * has occurred.  If so adjust things accordingly.
391          */
392         status = readl_relaxed(drvdata->base + TMC_STS);
393         if (status & TMC_STS_FULL) {
394                 local_inc(&buf->lost);
395                 to_read = drvdata->size;
396         } else {
397                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
398         }
399
400         /*
401          * The TMC RAM buffer may be bigger than the space available in the
402          * perf ring buffer (handle->size).  If so advance the RRP so that we
403          * get the latest trace data.
404          */
405         if (to_read > handle->size) {
406                 u32 buffer_start, mask = 0;
407
408                 /* Read buffer start address in system memory */
409                 buffer_start = readl_relaxed(drvdata->base + TMC_DBALO);
410
411                 /*
412                  * The value written to RRP must be byte-address aligned to
413                  * the width of the trace memory databus _and_ to a frame
414                  * boundary (16 byte), whichever is the biggest. For example,
415                  * for 32-bit, 64-bit and 128-bit wide trace memory, the four
416                  * LSBs must be 0s. For 256-bit wide trace memory, the five
417                  * LSBs must be 0s.
418                  */
419                 switch (drvdata->memwidth) {
420                 case TMC_MEM_INTF_WIDTH_32BITS:
421                 case TMC_MEM_INTF_WIDTH_64BITS:
422                 case TMC_MEM_INTF_WIDTH_128BITS:
423                         mask = GENMASK(31, 5);
424                         break;
425                 case TMC_MEM_INTF_WIDTH_256BITS:
426                         mask = GENMASK(31, 6);
427                         break;
428                 }
429
430                 /*
431                  * Make sure the new size is aligned in accordance with the
432                  * requirement explained above.
433                  */
434                 to_read = handle->size & mask;
435                 /* Move the RAM read pointer up */
436                 read_ptr = (write_ptr + drvdata->size) - to_read;
437                 /* Make sure we are still within our limits */
438                 if (read_ptr > (buffer_start + (drvdata->size - 1)))
439                         read_ptr -= drvdata->size;
440                 /* Tell the HW */
441                 writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
442                 local_inc(&buf->lost);
443         }
444
445         cur = buf->cur;
446         offset = buf->offset;
447
448         /* for every byte to read */
449         for (i = 0; i < to_read; i += 4) {
450                 buf_ptr = buf->data_pages[cur] + offset;
451                 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
452
453                 offset += 4;
454                 if (offset >= PAGE_SIZE) {
455                         offset = 0;
456                         cur++;
457                         /* wrap around at the end of the buffer */
458                         cur &= buf->nr_pages - 1;
459                 }
460         }
461
462         /*
463          * In snapshot mode all we have to do is communicate to
464          * perf_aux_output_end() the address of the current head.  In full
465          * trace mode the same function expects a size to move rb->aux_head
466          * forward.
467          */
468         if (buf->snapshot)
469                 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
470         else
471                 local_add(to_read, &buf->data_size);
472
473         CS_LOCK(drvdata->base);
474 }
475
476 static const struct coresight_ops_sink tmc_etr_sink_ops = {
477         .enable         = tmc_enable_etr_sink,
478         .disable        = tmc_disable_etr_sink,
479         .alloc_buffer   = tmc_alloc_etr_buffer,
480         .free_buffer    = tmc_free_etr_buffer,
481         .set_buffer     = tmc_set_etr_buffer,
482         .reset_buffer   = tmc_reset_etr_buffer,
483         .update_buffer  = tmc_update_etr_buffer,
484 };
485
486 const struct coresight_ops tmc_etr_cs_ops = {
487         .sink_ops       = &tmc_etr_sink_ops,
488 };
489
490 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
491 {
492         int ret = 0;
493         long val;
494         unsigned long flags;
495
496         /* config types are set a boot time and never change */
497         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
498                 return -EINVAL;
499
500         spin_lock_irqsave(&drvdata->spinlock, flags);
501         if (drvdata->reading) {
502                 ret = -EBUSY;
503                 goto out;
504         }
505
506         val = local_read(&drvdata->mode);
507         /* Don't interfere if operated from Perf */
508         if (val == CS_MODE_PERF) {
509                 ret = -EINVAL;
510                 goto out;
511         }
512
513         /* If drvdata::buf is NULL the trace data has been read already */
514         if (drvdata->buf == NULL) {
515                 ret = -EINVAL;
516                 goto out;
517         }
518
519         /* Disable the TMC if need be */
520         if (val == CS_MODE_SYSFS)
521                 tmc_etr_disable_hw(drvdata);
522
523         drvdata->reading = true;
524 out:
525         spin_unlock_irqrestore(&drvdata->spinlock, flags);
526
527         return ret;
528 }
529
530 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
531 {
532         unsigned long flags;
533         dma_addr_t paddr;
534         void __iomem *vaddr = NULL;
535
536         /* config types are set a boot time and never change */
537         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
538                 return -EINVAL;
539
540         spin_lock_irqsave(&drvdata->spinlock, flags);
541
542         /* RE-enable the TMC if need be */
543         if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
544                 /*
545                  * The trace run will continue with the same allocated trace
546                  * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
547                  * so we don't have to explicitly clear it. Also, since the
548                  * tracer is still enabled drvdata::buf can't be NULL.
549                  */
550                 tmc_etr_enable_hw(drvdata);
551         } else {
552                 /*
553                  * The ETR is not tracing and the buffer was just read.
554                  * As such prepare to free the trace buffer.
555                  */
556                 vaddr = drvdata->vaddr;
557                 paddr = drvdata->paddr;
558                 drvdata->buf = drvdata->vaddr = NULL;
559         }
560
561         drvdata->reading = false;
562         spin_unlock_irqrestore(&drvdata->spinlock, flags);
563
564         /* Free allocated memory out side of the spinlock */
565         if (vaddr)
566                 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
567
568         return 0;
569 }