blob: 2db48576be5bb50b925c0f872925d948054275ac [file] [log] [blame]
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -06001/*
2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Mathieu Poirier62593d12015-12-10 11:36:15 -070018#include <linux/circ_buf.h>
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -060019#include <linux/coresight.h>
Mathieu Poirierde546192016-05-03 11:33:52 -060020#include <linux/dma-mapping.h>
Mathieu Poirier62593d12015-12-10 11:36:15 -070021#include <linux/slab.h>
22
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -060023#include "coresight-priv.h"
24#include "coresight-tmc.h"
25
Mathieu Poirier62593d12015-12-10 11:36:15 -070026/**
27 * struct cs_etr_buffer - keep track of a recording session' specifics
28 * @tmc: generic portion of the TMC buffers
29 * @paddr: the physical address of a DMA'able contiguous memory area
30 * @vaddr: the virtual address associated to @paddr
31 * @size: how much memory we have, starting at @paddr
32 * @dev: the device @vaddr has been tied to
33 */
34struct cs_etr_buffers {
35 struct cs_buffers tmc;
36 dma_addr_t paddr;
37 void __iomem *vaddr;
38 u32 size;
39 struct device *dev;
40};
41
Baoyou Xie0ef75282016-09-08 16:50:39 -060042static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -060043{
44 u32 axictl;
45
46 /* Zero out the memory to help with debug */
47 memset(drvdata->vaddr, 0, drvdata->size);
48
49 CS_UNLOCK(drvdata->base);
50
51 /* Wait for TMCSReady bit to be set */
52 tmc_wait_for_tmcready(drvdata);
53
54 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
55 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
56
57 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
58 axictl |= TMC_AXICTL_WR_BURST_16;
59 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
60 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
61 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
62 axictl = (axictl &
63 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
64 TMC_AXICTL_PROT_CTL_B1;
65 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
66
67 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
68 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
69 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
70 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
71 TMC_FFCR_TRIGON_TRIGIN,
72 drvdata->base + TMC_FFCR);
73 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
74 tmc_enable_hw(drvdata);
75
76 CS_LOCK(drvdata->base);
77}
78
79static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
80{
81 u32 rwp, val;
82
83 rwp = readl_relaxed(drvdata->base + TMC_RWP);
84 val = readl_relaxed(drvdata->base + TMC_STS);
85
Suzuki K Poulose8505fea2016-08-25 15:18:57 -060086 /*
87 * Adjust the buffer to point to the beginning of the trace data
88 * and update the available trace data.
89 */
Suzuki K Poulose1c9cbe12016-08-25 15:18:59 -060090 if (val & TMC_STS_FULL) {
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -060091 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
Suzuki K Poulose8505fea2016-08-25 15:18:57 -060092 drvdata->len = drvdata->size;
93 } else {
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -060094 drvdata->buf = drvdata->vaddr;
Suzuki K Poulose8505fea2016-08-25 15:18:57 -060095 drvdata->len = rwp - drvdata->paddr;
96 }
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -060097}
98
Mathieu Poirier45254122016-05-03 11:33:51 -060099static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600100{
101 CS_UNLOCK(drvdata->base);
102
103 tmc_flush_and_stop(drvdata);
Mathieu Poiriera40318f2016-05-03 11:33:55 -0600104 /*
105 * When operating in sysFS mode the content of the buffer needs to be
106 * read before the TMC is disabled.
107 */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700108 if (drvdata->mode == CS_MODE_SYSFS)
Mathieu Poiriera40318f2016-05-03 11:33:55 -0600109 tmc_etr_dump_hw(drvdata);
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600110 tmc_disable_hw(drvdata);
111
112 CS_LOCK(drvdata->base);
113}
114
Suzuki K. Poulosed61eabc2016-11-29 09:47:16 -0700115static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600116{
Mathieu Poirierde546192016-05-03 11:33:52 -0600117 int ret = 0;
118 bool used = false;
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600119 unsigned long flags;
Mathieu Poirierde546192016-05-03 11:33:52 -0600120 void __iomem *vaddr = NULL;
121 dma_addr_t paddr;
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600122 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
123
Mathieu Poirierde546192016-05-03 11:33:52 -0600124
125 /*
126 * If we don't have a buffer release the lock and allocate memory.
127 * Otherwise keep the lock and move along.
128 */
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600129 spin_lock_irqsave(&drvdata->spinlock, flags);
Mathieu Poirierde546192016-05-03 11:33:52 -0600130 if (!drvdata->vaddr) {
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600131 spin_unlock_irqrestore(&drvdata->spinlock, flags);
Mathieu Poirierde546192016-05-03 11:33:52 -0600132
133 /*
134 * Contiguous memory can't be allocated while a spinlock is
135 * held. As such allocate memory here and free it if a buffer
136 * has already been allocated (from a previous session).
137 */
138 vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
139 &paddr, GFP_KERNEL);
140 if (!vaddr)
141 return -ENOMEM;
142
143 /* Let's try again */
144 spin_lock_irqsave(&drvdata->spinlock, flags);
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600145 }
146
Mathieu Poirierde546192016-05-03 11:33:52 -0600147 if (drvdata->reading) {
148 ret = -EBUSY;
149 goto out;
150 }
151
Mathieu Poirierf2facc32016-05-03 11:33:54 -0600152 /*
153 * In sysFS mode we can have multiple writers per sink. Since this
154 * sink is already enabled no memory is needed and the HW need not be
155 * touched.
156 */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700157 if (drvdata->mode == CS_MODE_SYSFS)
Mathieu Poirierf2facc32016-05-03 11:33:54 -0600158 goto out;
159
Mathieu Poirierde546192016-05-03 11:33:52 -0600160 /*
161 * If drvdata::buf == NULL, use the memory allocated above.
162 * Otherwise a buffer still exists from a previous session, so
163 * simply use that.
164 */
165 if (drvdata->buf == NULL) {
166 used = true;
167 drvdata->vaddr = vaddr;
168 drvdata->paddr = paddr;
169 drvdata->buf = drvdata->vaddr;
170 }
171
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700172 drvdata->mode = CS_MODE_SYSFS;
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600173 tmc_etr_enable_hw(drvdata);
Mathieu Poirierde546192016-05-03 11:33:52 -0600174out:
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600175 spin_unlock_irqrestore(&drvdata->spinlock, flags);
176
Mathieu Poirierde546192016-05-03 11:33:52 -0600177 /* Free memory outside the spinlock if need be */
178 if (!used && vaddr)
179 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
180
181 if (!ret)
182 dev_info(drvdata->dev, "TMC-ETR enabled\n");
183
184 return ret;
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600185}
186
Suzuki K. Poulosed61eabc2016-11-29 09:47:16 -0700187static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
Mathieu Poirierb2176012016-05-03 11:33:56 -0600188{
189 int ret = 0;
Mathieu Poirierb2176012016-05-03 11:33:56 -0600190 unsigned long flags;
191 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
192
Mathieu Poirierb2176012016-05-03 11:33:56 -0600193 spin_lock_irqsave(&drvdata->spinlock, flags);
194 if (drvdata->reading) {
195 ret = -EINVAL;
196 goto out;
197 }
198
Mathieu Poirierb2176012016-05-03 11:33:56 -0600199 /*
200 * In Perf mode there can be only one writer per sink. There
201 * is also no need to continue if the ETR is already operated
202 * from sysFS.
203 */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700204 if (drvdata->mode != CS_MODE_DISABLED) {
Mathieu Poirierb2176012016-05-03 11:33:56 -0600205 ret = -EINVAL;
206 goto out;
207 }
208
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700209 drvdata->mode = CS_MODE_PERF;
Mathieu Poirierb2176012016-05-03 11:33:56 -0600210 tmc_etr_enable_hw(drvdata);
211out:
212 spin_unlock_irqrestore(&drvdata->spinlock, flags);
213
214 return ret;
215}
216
217static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
218{
219 switch (mode) {
220 case CS_MODE_SYSFS:
Suzuki K. Poulosed61eabc2016-11-29 09:47:16 -0700221 return tmc_enable_etr_sink_sysfs(csdev);
Mathieu Poirierb2176012016-05-03 11:33:56 -0600222 case CS_MODE_PERF:
Suzuki K. Poulosed61eabc2016-11-29 09:47:16 -0700223 return tmc_enable_etr_sink_perf(csdev);
Mathieu Poirierb2176012016-05-03 11:33:56 -0600224 }
225
226 /* We shouldn't be here */
227 return -EINVAL;
228}
229
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600230static void tmc_disable_etr_sink(struct coresight_device *csdev)
231{
232 unsigned long flags;
233 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
234
235 spin_lock_irqsave(&drvdata->spinlock, flags);
236 if (drvdata->reading) {
237 spin_unlock_irqrestore(&drvdata->spinlock, flags);
238 return;
239 }
240
Mathieu Poirierf2facc32016-05-03 11:33:54 -0600241 /* Disable the TMC only if it needs to */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700242 if (drvdata->mode != CS_MODE_DISABLED) {
Mathieu Poirierf2facc32016-05-03 11:33:54 -0600243 tmc_etr_disable_hw(drvdata);
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700244 drvdata->mode = CS_MODE_DISABLED;
245 }
Mathieu Poirierf2facc32016-05-03 11:33:54 -0600246
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600247 spin_unlock_irqrestore(&drvdata->spinlock, flags);
248
249 dev_info(drvdata->dev, "TMC-ETR disabled\n");
250}
251
Mathieu Poirier62593d12015-12-10 11:36:15 -0700252static void *tmc_alloc_etr_buffer(struct coresight_device *csdev, int cpu,
253 void **pages, int nr_pages, bool overwrite)
254{
255 int node;
256 struct cs_etr_buffers *buf;
257 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
258
259 if (cpu == -1)
260 cpu = smp_processor_id();
261 node = cpu_to_node(cpu);
262
263 /* Allocate memory structure for interaction with Perf */
264 buf = kzalloc_node(sizeof(struct cs_etr_buffers), GFP_KERNEL, node);
265 if (!buf)
266 return NULL;
267
268 buf->dev = drvdata->dev;
269 buf->size = drvdata->size;
270 buf->vaddr = dma_alloc_coherent(buf->dev, buf->size,
271 &buf->paddr, GFP_KERNEL);
272 if (!buf->vaddr) {
273 kfree(buf);
274 return NULL;
275 }
276
277 buf->tmc.snapshot = overwrite;
278 buf->tmc.nr_pages = nr_pages;
279 buf->tmc.data_pages = pages;
280
281 return buf;
282}
283
284static void tmc_free_etr_buffer(void *config)
285{
286 struct cs_etr_buffers *buf = config;
287
288 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->paddr);
289 kfree(buf);
290}
291
292static int tmc_set_etr_buffer(struct coresight_device *csdev,
293 struct perf_output_handle *handle,
294 void *sink_config)
295{
296 int ret = 0;
297 unsigned long head;
298 struct cs_etr_buffers *buf = sink_config;
299 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
300
301 /* wrap head around to the amount of space we have */
302 head = handle->head & ((buf->tmc.nr_pages << PAGE_SHIFT) - 1);
303
304 /* find the page to write to */
305 buf->tmc.cur = head / PAGE_SIZE;
306
307 /* and offset within that page */
308 buf->tmc.offset = head % PAGE_SIZE;
309
310 local_set(&buf->tmc.data_size, 0);
311
312 /* Tell the HW where to put the trace data */
313 drvdata->vaddr = buf->vaddr;
314 drvdata->paddr = buf->paddr;
315 memset(drvdata->vaddr, 0, drvdata->size);
316
317 return ret;
318}
319
320static unsigned long tmc_reset_etr_buffer(struct coresight_device *csdev,
321 struct perf_output_handle *handle,
322 void *sink_config, bool *lost)
323{
324 long size = 0;
325 struct cs_etr_buffers *buf = sink_config;
326 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
327
328 if (buf) {
329 /*
330 * In snapshot mode ->data_size holds the new address of the
331 * ring buffer's head. The size itself is the whole address
332 * range since we want the latest information.
333 */
334 if (buf->tmc.snapshot) {
335 size = buf->tmc.nr_pages << PAGE_SHIFT;
336 handle->head = local_xchg(&buf->tmc.data_size, size);
337 }
338
339 /*
340 * Tell the tracer PMU how much we got in this run and if
341 * something went wrong along the way. Nobody else can use
342 * this cs_etr_buffers instance until we are done. As such
343 * resetting parameters here and squaring off with the ring
344 * buffer API in the tracer PMU is fine.
345 */
346 *lost = !!local_xchg(&buf->tmc.lost, 0);
347 size = local_xchg(&buf->tmc.data_size, 0);
348 }
349
350 /* Get ready for another run */
351 drvdata->vaddr = NULL;
352 drvdata->paddr = 0;
353
354 return size;
355}
356
357static void tmc_update_etr_buffer(struct coresight_device *csdev,
358 struct perf_output_handle *handle,
359 void *sink_config)
360{
361 int i, cur;
362 u32 *buf_ptr;
363 u32 read_ptr, write_ptr;
364 u32 status, to_read;
365 unsigned long offset;
366 struct cs_buffers *buf = sink_config;
367 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
368
369 if (!buf)
370 return;
371
372 /* This shouldn't happen */
373 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
374 return;
375
376 CS_UNLOCK(drvdata->base);
377
378 tmc_flush_and_stop(drvdata);
379
380 read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
381 write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
382
383 /*
384 * Get a hold of the status register and see if a wrap around
385 * has occurred. If so adjust things accordingly.
386 */
387 status = readl_relaxed(drvdata->base + TMC_STS);
388 if (status & TMC_STS_FULL) {
389 local_inc(&buf->lost);
390 to_read = drvdata->size;
391 } else {
392 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
393 }
394
395 /*
396 * The TMC RAM buffer may be bigger than the space available in the
397 * perf ring buffer (handle->size). If so advance the RRP so that we
398 * get the latest trace data.
399 */
400 if (to_read > handle->size) {
401 u32 buffer_start, mask = 0;
402
403 /* Read buffer start address in system memory */
404 buffer_start = readl_relaxed(drvdata->base + TMC_DBALO);
405
406 /*
407 * The value written to RRP must be byte-address aligned to
408 * the width of the trace memory databus _and_ to a frame
409 * boundary (16 byte), whichever is the biggest. For example,
410 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
411 * LSBs must be 0s. For 256-bit wide trace memory, the five
412 * LSBs must be 0s.
413 */
414 switch (drvdata->memwidth) {
415 case TMC_MEM_INTF_WIDTH_32BITS:
416 case TMC_MEM_INTF_WIDTH_64BITS:
417 case TMC_MEM_INTF_WIDTH_128BITS:
418 mask = GENMASK(31, 5);
419 break;
420 case TMC_MEM_INTF_WIDTH_256BITS:
421 mask = GENMASK(31, 6);
422 break;
423 }
424
425 /*
426 * Make sure the new size is aligned in accordance with the
427 * requirement explained above.
428 */
429 to_read = handle->size & mask;
430 /* Move the RAM read pointer up */
431 read_ptr = (write_ptr + drvdata->size) - to_read;
432 /* Make sure we are still within our limits */
433 if (read_ptr > (buffer_start + (drvdata->size - 1)))
434 read_ptr -= drvdata->size;
435 /* Tell the HW */
436 writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
437 local_inc(&buf->lost);
438 }
439
440 cur = buf->cur;
441 offset = buf->offset;
442
443 /* for every byte to read */
444 for (i = 0; i < to_read; i += 4) {
445 buf_ptr = buf->data_pages[cur] + offset;
446 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
447
448 offset += 4;
449 if (offset >= PAGE_SIZE) {
450 offset = 0;
451 cur++;
452 /* wrap around at the end of the buffer */
453 cur &= buf->nr_pages - 1;
454 }
455 }
456
457 /*
458 * In snapshot mode all we have to do is communicate to
459 * perf_aux_output_end() the address of the current head. In full
460 * trace mode the same function expects a size to move rb->aux_head
461 * forward.
462 */
463 if (buf->snapshot)
464 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
465 else
466 local_add(to_read, &buf->data_size);
467
468 CS_LOCK(drvdata->base);
469}
470
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600471static const struct coresight_ops_sink tmc_etr_sink_ops = {
472 .enable = tmc_enable_etr_sink,
473 .disable = tmc_disable_etr_sink,
Mathieu Poirier62593d12015-12-10 11:36:15 -0700474 .alloc_buffer = tmc_alloc_etr_buffer,
475 .free_buffer = tmc_free_etr_buffer,
476 .set_buffer = tmc_set_etr_buffer,
477 .reset_buffer = tmc_reset_etr_buffer,
478 .update_buffer = tmc_update_etr_buffer,
Mathieu Poirier6c6ed1e2016-05-03 11:33:50 -0600479};
480
481const struct coresight_ops tmc_etr_cs_ops = {
482 .sink_ops = &tmc_etr_sink_ops,
483};
Mathieu Poirier45254122016-05-03 11:33:51 -0600484
485int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
486{
Mathieu Poirierde546192016-05-03 11:33:52 -0600487 int ret = 0;
Mathieu Poirier45254122016-05-03 11:33:51 -0600488 unsigned long flags;
489
490 /* config types are set a boot time and never change */
491 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
492 return -EINVAL;
493
494 spin_lock_irqsave(&drvdata->spinlock, flags);
Mathieu Poirierf74debb2016-05-03 11:33:53 -0600495 if (drvdata->reading) {
496 ret = -EBUSY;
497 goto out;
498 }
Mathieu Poirier45254122016-05-03 11:33:51 -0600499
Mathieu Poirierb2176012016-05-03 11:33:56 -0600500 /* Don't interfere if operated from Perf */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700501 if (drvdata->mode == CS_MODE_PERF) {
Mathieu Poirierb2176012016-05-03 11:33:56 -0600502 ret = -EINVAL;
503 goto out;
504 }
505
Mathieu Poirierde546192016-05-03 11:33:52 -0600506 /* If drvdata::buf is NULL the trace data has been read already */
507 if (drvdata->buf == NULL) {
508 ret = -EINVAL;
509 goto out;
510 }
511
Mathieu Poirier45254122016-05-03 11:33:51 -0600512 /* Disable the TMC if need be */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700513 if (drvdata->mode == CS_MODE_SYSFS)
Mathieu Poirier45254122016-05-03 11:33:51 -0600514 tmc_etr_disable_hw(drvdata);
515
516 drvdata->reading = true;
Mathieu Poirierde546192016-05-03 11:33:52 -0600517out:
Mathieu Poirier45254122016-05-03 11:33:51 -0600518 spin_unlock_irqrestore(&drvdata->spinlock, flags);
519
Mathieu Poirierb2176012016-05-03 11:33:56 -0600520 return ret;
Mathieu Poirier45254122016-05-03 11:33:51 -0600521}
522
523int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
524{
525 unsigned long flags;
Mathieu Poirierde546192016-05-03 11:33:52 -0600526 dma_addr_t paddr;
527 void __iomem *vaddr = NULL;
Mathieu Poirier45254122016-05-03 11:33:51 -0600528
529 /* config types are set a boot time and never change */
530 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
531 return -EINVAL;
532
533 spin_lock_irqsave(&drvdata->spinlock, flags);
534
535 /* RE-enable the TMC if need be */
Suzuki K. Pouloseef8490f2016-11-29 09:47:15 -0700536 if (drvdata->mode == CS_MODE_SYSFS) {
Mathieu Poirierde546192016-05-03 11:33:52 -0600537 /*
538 * The trace run will continue with the same allocated trace
Suzuki K Poulosef3b81722016-06-14 11:17:14 -0600539 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
540 * so we don't have to explicitly clear it. Also, since the
541 * tracer is still enabled drvdata::buf can't be NULL.
Mathieu Poirierde546192016-05-03 11:33:52 -0600542 */
Mathieu Poirier45254122016-05-03 11:33:51 -0600543 tmc_etr_enable_hw(drvdata);
Mathieu Poirierde546192016-05-03 11:33:52 -0600544 } else {
545 /*
546 * The ETR is not tracing and the buffer was just read.
547 * As such prepare to free the trace buffer.
548 */
549 vaddr = drvdata->vaddr;
550 paddr = drvdata->paddr;
Suzuki K Poulose8e215292016-06-14 11:17:13 -0600551 drvdata->buf = drvdata->vaddr = NULL;
Mathieu Poirierde546192016-05-03 11:33:52 -0600552 }
Mathieu Poirier45254122016-05-03 11:33:51 -0600553
554 drvdata->reading = false;
555 spin_unlock_irqrestore(&drvdata->spinlock, flags);
556
Mathieu Poirierde546192016-05-03 11:33:52 -0600557 /* Free allocated memory out side of the spinlock */
558 if (vaddr)
559 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
560
Mathieu Poirier45254122016-05-03 11:33:51 -0600561 return 0;
562}