blob: 1ff4468589637164c7107e790ceb4586747c9578 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
Steve Mucklef132c6c2012-06-06 18:30:57 -070034#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <mach/dma.h>
36#include <mach/msm_tsif.h>
37
38/*
39 * TSIF register offsets
40 */
41#define TSIF_STS_CTL_OFF (0x0)
42#define TSIF_TIME_LIMIT_OFF (0x4)
43#define TSIF_CLK_REF_OFF (0x8)
44#define TSIF_LPBK_FLAGS_OFF (0xc)
45#define TSIF_LPBK_DATA_OFF (0x10)
46#define TSIF_TEST_CTL_OFF (0x14)
47#define TSIF_TEST_MODE_OFF (0x18)
48#define TSIF_TEST_RESET_OFF (0x1c)
49#define TSIF_TEST_EXPORT_OFF (0x20)
50#define TSIF_TEST_CURRENT_OFF (0x24)
51
52#define TSIF_DATA_PORT_OFF (0x100)
53
54/* bits for TSIF_STS_CTL register */
55#define TSIF_STS_CTL_EN_IRQ (1 << 28)
56#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
57#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
58#define TSIF_STS_CTL_OVERFLOW (1 << 25)
59#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
60#define TSIF_STS_CTL_TIMEOUT (1 << 23)
61#define TSIF_STS_CTL_INV_SYNC (1 << 21)
62#define TSIF_STS_CTL_INV_NULL (1 << 20)
63#define TSIF_STS_CTL_INV_ERROR (1 << 19)
64#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
65#define TSIF_STS_CTL_INV_DATA (1 << 17)
66#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
67#define TSIF_STS_CTL_SPARE (1 << 15)
68#define TSIF_STS_CTL_EN_NULL (1 << 11)
69#define TSIF_STS_CTL_EN_ERROR (1 << 10)
70#define TSIF_STS_CTL_LAST_BIT (1 << 9)
71#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
72#define TSIF_STS_CTL_EN_TCR (1 << 7)
73#define TSIF_STS_CTL_TEST_MODE (3 << 5)
74#define TSIF_STS_CTL_EN_DM (1 << 4)
75#define TSIF_STS_CTL_STOP (1 << 3)
76#define TSIF_STS_CTL_START (1 << 0)
77
78/*
79 * Data buffering parameters
80 *
81 * Data stored in cyclic buffer;
82 *
83 * Data organized in chunks of packets.
84 * One chunk processed at a time by the data mover
85 *
86 */
87#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
88#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
89#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
90#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
91#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
92#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030093#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094
95#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
96#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
97#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
98#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
99#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
100
101/* used to create debugfs entries */
102static const struct {
103 const char *name;
104 mode_t mode;
105 int offset;
106} debugfs_tsif_regs[] = {
107 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
108 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
109 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
110 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
111 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
112 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
113 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
114 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
115 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
116 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
117 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
118};
119
120/* structures for Data Mover */
121struct tsif_dmov_cmd {
122 dmov_box box;
123 dma_addr_t box_ptr;
124};
125
126struct msm_tsif_device;
127
128struct tsif_xfer {
129 struct msm_dmov_cmd hdr;
130 struct msm_tsif_device *tsif_device;
131 int busy;
132 int wi; /**< set devices's write index after xfer */
133};
134
135struct msm_tsif_device {
136 struct list_head devlist;
137 struct platform_device *pdev;
138 struct resource *memres;
139 void __iomem *base;
140 unsigned int irq;
141 int mode;
142 u32 time_limit;
143 enum tsif_state state;
144 struct wake_lock wake_lock;
145 /* clocks */
146 struct clk *tsif_clk;
147 struct clk *tsif_pclk;
148 struct clk *tsif_ref_clk;
149 /* debugfs */
150 struct dentry *dent_tsif;
151 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
152 struct dentry *debugfs_gpio;
153 struct dentry *debugfs_action;
154 struct dentry *debugfs_dma;
155 struct dentry *debugfs_databuf;
156 struct debugfs_blob_wrapper blob_wrapper_databuf;
157 /* DMA related */
158 int dma;
159 int crci;
160 void *data_buffer;
161 dma_addr_t data_buffer_dma;
162 u32 pkts_per_chunk;
163 u32 chunks_per_buf;
164 int ri;
165 int wi;
166 int dmwi; /**< DataMover write index */
167 struct tsif_dmov_cmd *dmov_cmd[2];
168 dma_addr_t dmov_cmd_dma[2];
169 struct tsif_xfer xfer[2];
170 struct tasklet_struct dma_refill;
Joel Nider6682b382012-07-03 13:59:27 +0300171 struct tasklet_struct clocks_off;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 /* statistics */
173 u32 stat_rx;
174 u32 stat_overflow;
175 u32 stat_lost_sync;
176 u32 stat_timeout;
177 u32 stat_dmov_err;
178 u32 stat_soft_drop;
179 int stat_ifi; /* inter frame interval */
180 u32 stat0, stat1;
181 /* client */
182 void *client_data;
183 void (*client_notify)(void *client_data);
184};
185
186/* ===clocks begin=== */
187
188static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
189{
190 if (tsif_device->tsif_clk) {
191 clk_put(tsif_device->tsif_clk);
192 tsif_device->tsif_clk = NULL;
193 }
194 if (tsif_device->tsif_pclk) {
195 clk_put(tsif_device->tsif_pclk);
196 tsif_device->tsif_pclk = NULL;
197 }
198
199 if (tsif_device->tsif_ref_clk) {
200 clk_put(tsif_device->tsif_ref_clk);
201 tsif_device->tsif_ref_clk = NULL;
202 }
203}
204
205static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
206{
207 struct msm_tsif_platform_data *pdata =
208 tsif_device->pdev->dev.platform_data;
209 int rc = 0;
210
211 if (pdata->tsif_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700212 tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
213 pdata->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 if (IS_ERR(tsif_device->tsif_clk)) {
215 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
216 pdata->tsif_clk);
217 rc = PTR_ERR(tsif_device->tsif_clk);
218 tsif_device->tsif_clk = NULL;
219 goto ret;
220 }
221 }
222 if (pdata->tsif_pclk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700223 tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
224 pdata->tsif_pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 if (IS_ERR(tsif_device->tsif_pclk)) {
226 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
227 pdata->tsif_pclk);
228 rc = PTR_ERR(tsif_device->tsif_pclk);
229 tsif_device->tsif_pclk = NULL;
230 goto ret;
231 }
232 }
233 if (pdata->tsif_ref_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700234 tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
235 pdata->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 if (IS_ERR(tsif_device->tsif_ref_clk)) {
237 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
238 pdata->tsif_ref_clk);
239 rc = PTR_ERR(tsif_device->tsif_ref_clk);
240 tsif_device->tsif_ref_clk = NULL;
241 goto ret;
242 }
243 }
244 return 0;
245ret:
246 tsif_put_clocks(tsif_device);
247 return rc;
248}
249
250static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
251{
252 if (on) {
253 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300254 clk_prepare_enable(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300256 clk_prepare_enable(tsif_device->tsif_pclk);
257 clk_prepare_enable(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 } else {
259 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300260 clk_disable_unprepare(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300262 clk_disable_unprepare(tsif_device->tsif_pclk);
263 clk_disable_unprepare(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 }
265}
Joel Nider6682b382012-07-03 13:59:27 +0300266
267static void tsif_clocks_off(unsigned long data)
268{
269 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
270 tsif_clock(tsif_device, 0);
271}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272/* ===clocks end=== */
273/* ===gpio begin=== */
274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275static int tsif_gpios_disable(const struct msm_gpio *table, int size)
276{
277 int rc = 0;
278 int i;
279 const struct msm_gpio *g;
280 for (i = size-1; i >= 0; i--) {
281 int tmp;
282 g = table + i;
Joel Nider951b2832012-05-07 21:13:38 +0300283 tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
284 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
285 GPIO_CFG_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 if (tmp) {
287 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
288 " <%s> failed: %d\n",
289 g->gpio_cfg, g->label ?: "?", rc);
290 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
291 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
292 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
293 GPIO_DRVSTR(g->gpio_cfg));
294 if (!rc)
295 rc = tmp;
296 }
297 }
298
299 return rc;
300}
301
302static int tsif_gpios_enable(const struct msm_gpio *table, int size)
303{
304 int rc;
305 int i;
306 const struct msm_gpio *g;
307 for (i = 0; i < size; i++) {
308 g = table + i;
309 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
310 if (rc) {
311 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
312 " <%s> failed: %d\n",
313 g->gpio_cfg, g->label ?: "?", rc);
314 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
315 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
316 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
317 GPIO_DRVSTR(g->gpio_cfg));
318 goto err;
319 }
320 }
321 return 0;
322err:
323 tsif_gpios_disable(table, i);
324 return rc;
325}
326
327static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
328{
Hamad Kadmanyb18fac52012-09-01 12:57:24 +0300329 int rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 rc = tsif_gpios_enable(table, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331 return rc;
332}
333
334static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
335{
336 tsif_gpios_disable(table, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337}
338
339static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
340{
341 struct msm_tsif_platform_data *pdata =
342 tsif_device->pdev->dev.platform_data;
343 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
344}
345
346static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
347{
348 struct msm_tsif_platform_data *pdata =
349 tsif_device->pdev->dev.platform_data;
350 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
351}
352
353/* ===gpio end=== */
354
355static int tsif_start_hw(struct msm_tsif_device *tsif_device)
356{
357 u32 ctl = TSIF_STS_CTL_EN_IRQ |
358 TSIF_STS_CTL_EN_TIME_LIM |
359 TSIF_STS_CTL_EN_TCR |
360 TSIF_STS_CTL_EN_DM;
361 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
362 switch (tsif_device->mode) {
363 case 1: /* mode 1 */
364 ctl |= (0 << 5);
365 break;
366 case 2: /* mode 2 */
367 ctl |= (1 << 5);
368 break;
369 case 3: /* manual - control from debugfs */
370 return 0;
371 break;
372 default:
373 return -EINVAL;
374 }
375 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
376 iowrite32(tsif_device->time_limit,
377 tsif_device->base + TSIF_TIME_LIMIT_OFF);
378 wmb();
379 iowrite32(ctl | TSIF_STS_CTL_START,
380 tsif_device->base + TSIF_STS_CTL_OFF);
381 wmb();
382 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
383 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
384}
385
386static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
387{
388 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
389 wmb();
390}
391
392/* ===DMA begin=== */
393/**
394 * TSIF DMA theory of operation
395 *
396 * Circular memory buffer \a tsif_mem_buffer allocated;
397 * 4 pointers points to and moved forward on:
398 * - \a ri index of first ready to read packet.
399 * Updated by client's call to tsif_reclaim_packets()
400 * - \a wi points to the next packet to be written by DM.
401 * Data below is valid and will not be overriden by DMA.
402 * Moved on DM callback
403 * - \a dmwi points to the next packet not scheduled yet for DM
404 * moved when packet scheduled for DM
405 *
406 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
407 * at time immediately after scheduling.
408 *
409 * Initially, 2 packets get scheduled for the DM.
410 *
411 * Upon packet receive, DM writes packet to the pre-programmed
412 * location and invoke its callback.
413 *
414 * DM callback moves sets wi pointer to \a xfer->wi;
415 * then it schedules next packet for DM and moves \a dmwi pointer.
416 *
417 * Buffer overflow handling
418 *
419 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
420 * DMA re-scheduled to the same index.
421 * Callback check and not move \a wi to become equal to \a ri
422 *
423 * On \a read request, data between \a ri and \a wi pointers may be read;
424 * \ri pointer moved accordingly.
425 *
426 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
427 * \a wi is between [\a ri, \a dmwi]
428 *
429 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
430 *
431 * Number of scheduled packets for DM: (dmwi-wi)
432 */
433
434/**
435 * tsif_dma_schedule - schedule DMA transfers
436 *
437 * @tsif_device: device
438 *
439 * Executed from process context on init, or from tasklet when
440 * re-scheduling upon DMA completion.
441 * This prevent concurrent execution from several CPU's
442 */
443static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
444{
445 int i, dmwi0, dmwi1, found = 0;
446 /* find free entry */
447 for (i = 0; i < 2; i++) {
448 struct tsif_xfer *xfer = &tsif_device->xfer[i];
449 if (xfer->busy)
450 continue;
451 found++;
452 xfer->busy = 1;
453 dmwi0 = tsif_device->dmwi;
454 tsif_device->dmov_cmd[i]->box.dst_row_addr =
455 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
456 /* proposed value for dmwi */
457 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
458 /**
459 * If dmwi going to overlap with ri,
460 * overflow occurs because data was not read.
461 * Still get this packet, to not interrupt TSIF
462 * hardware, but do not advance dmwi.
463 *
464 * Upon receive, packet will be dropped.
465 */
466 if (dmwi1 != tsif_device->ri) {
467 tsif_device->dmwi = dmwi1;
468 } else {
469 dev_info(&tsif_device->pdev->dev,
470 "Overflow detected\n");
471 }
472 xfer->wi = tsif_device->dmwi;
473#ifdef CONFIG_TSIF_DEBUG
474 dev_info(&tsif_device->pdev->dev,
475 "schedule xfer[%d] -> [%2d]{%2d}\n",
476 i, dmwi0, xfer->wi);
477#endif
478 /* complete all the writes to box */
479 dma_coherent_pre_ops();
480 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
481 }
482 if (!found)
483 dev_info(&tsif_device->pdev->dev,
484 "All xfer entries are busy\n");
485}
486
487/**
488 * tsif_dmov_complete_func - DataMover completion callback
489 *
490 * @cmd: original DM command
491 * @result: DM result
492 * @err: optional error buffer
493 *
494 * Executed in IRQ context (Data Mover's IRQ)
495 * DataMover's spinlock @msm_dmov_lock held.
496 */
497static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
498 unsigned int result,
499 struct msm_dmov_errdata *err)
500{
501 int i;
502 u32 data_offset;
503 struct tsif_xfer *xfer;
504 struct msm_tsif_device *tsif_device;
505 int reschedule = 0;
506 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
507 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
508 return;
509 }
510 /* restore original context */
511 xfer = container_of(cmd, struct tsif_xfer, hdr);
512 tsif_device = xfer->tsif_device;
513 i = xfer - tsif_device->xfer;
514 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
515 tsif_device->data_buffer_dma;
516
517 /* order reads from the xferred buffer */
518 dma_coherent_post_ops();
519 if (result & DMOV_RSLT_DONE) {
520 int w = data_offset / TSIF_PKT_SIZE;
521 tsif_device->stat_rx++;
522 /*
523 * sowtware overflow when I was scheduled?
524 *
525 * @w is where this xfer was actually written to;
526 * @xfer->wi is where device's @wi will be set;
527 *
528 * if these 2 are equal, we are short in space and
529 * going to overwrite this xfer - this is "soft drop"
530 */
531 if (w == xfer->wi)
532 tsif_device->stat_soft_drop++;
533 reschedule = (tsif_device->state == tsif_state_running);
534#ifdef CONFIG_TSIF_DEBUG
535 /* IFI calculation */
536 /*
537 * update stat_ifi (inter frame interval)
538 *
539 * Calculate time difference between last and 1-st
540 * packets in chunk
541 *
542 * To be removed after tuning
543 */
544 if (TSIF_PKTS_IN_CHUNK > 1) {
545 void *ptr = tsif_device->data_buffer + data_offset;
546 u32 *p0 = ptr;
547 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
548 TSIF_PKT_SIZE;
549 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
550 tsif_pkt_status(p0));
551 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
552 tsif_pkt_status(p1));
553 tsif_device->stat_ifi = (tts1 - tts0) /
554 (TSIF_PKTS_IN_CHUNK - 1);
555 }
556#endif
557 } else {
558 /**
559 * Error or flush
560 *
561 * To recover - re-open TSIF device.
562 */
563 /* mark status "not valid" in data buffer */
564 int n;
565 void *ptr = tsif_device->data_buffer + data_offset;
566 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
567 u32 *p = ptr + (n * TSIF_PKT_SIZE);
568 /* last dword is status + TTS */
569 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
570 }
571 if (result & DMOV_RSLT_ERROR) {
572 dev_err(&tsif_device->pdev->dev,
573 "DMA error (0x%08x)\n", result);
574 tsif_device->stat_dmov_err++;
575 /* force device close */
576 if (tsif_device->state == tsif_state_running) {
577 tsif_stop_hw(tsif_device);
578 /*
Joel Nider6682b382012-07-03 13:59:27 +0300579 * This branch is taken only in case of
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 * severe hardware problem (I don't even know
Joel Nider6682b382012-07-03 13:59:27 +0300581 * what should happen for DMOV_RSLT_ERROR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582 * thus I prefer code simplicity over
583 * performance.
Joel Nider6682b382012-07-03 13:59:27 +0300584 * Clocks are turned off from outside the
585 * interrupt context.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 */
Joel Nider6682b382012-07-03 13:59:27 +0300587 tasklet_schedule(&tsif_device->clocks_off);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 tsif_device->state = tsif_state_flushing;
589 }
590 }
591 if (result & DMOV_RSLT_FLUSH) {
592 /*
593 * Flushing normally happens in process of
594 * @tsif_stop(), when we are waiting for outstanding
595 * DMA commands to be flushed.
596 */
597 dev_info(&tsif_device->pdev->dev,
598 "DMA channel flushed (0x%08x)\n", result);
599 if (tsif_device->state == tsif_state_flushing) {
600 if ((!tsif_device->xfer[0].busy) &&
601 (!tsif_device->xfer[1].busy)) {
602 tsif_device->state = tsif_state_stopped;
603 }
604 }
605 }
606 if (err)
607 dev_err(&tsif_device->pdev->dev,
608 "Flush data: %08x %08x %08x %08x %08x %08x\n",
609 err->flush[0], err->flush[1], err->flush[2],
610 err->flush[3], err->flush[4], err->flush[5]);
611 }
612 tsif_device->wi = xfer->wi;
613 xfer->busy = 0;
614 if (tsif_device->client_notify)
615 tsif_device->client_notify(tsif_device->client_data);
616 /*
617 * Can't schedule next DMA -
618 * DataMover driver still hold its semaphore,
619 * deadlock will occur.
620 */
621 if (reschedule)
622 tasklet_schedule(&tsif_device->dma_refill);
623}
624
625/**
626 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
627 *
628 * @data: tsif_device
629 *
630 * Reschedule DMA requests
631 *
632 * Executed in tasklet
633 */
634static void tsif_dma_refill(unsigned long data)
635{
636 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
637 if (tsif_device->state == tsif_state_running)
638 tsif_dma_schedule(tsif_device);
639}
640
641/**
642 * tsif_dma_flush - flush DMA channel
643 *
644 * @tsif_device:
645 *
646 * busy wait till DMA flushed
647 */
648static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
649{
650 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
651 tsif_device->state = tsif_state_flushing;
652 while (tsif_device->xfer[0].busy ||
653 tsif_device->xfer[1].busy) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700654 msm_dmov_flush(tsif_device->dma, 1);
Joel Nider951b2832012-05-07 21:13:38 +0300655 usleep(10000);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 }
657 }
658 tsif_device->state = tsif_state_stopped;
659 if (tsif_device->client_notify)
660 tsif_device->client_notify(tsif_device->client_data);
661}
662
663static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
664{
665 int i;
666 tsif_device->state = tsif_state_flushing;
667 tasklet_kill(&tsif_device->dma_refill);
668 tsif_dma_flush(tsif_device);
669 for (i = 0; i < 2; i++) {
670 if (tsif_device->dmov_cmd[i]) {
671 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
672 tsif_device->dmov_cmd[i],
673 tsif_device->dmov_cmd_dma[i]);
674 tsif_device->dmov_cmd[i] = NULL;
675 }
676 }
677 if (tsif_device->data_buffer) {
678 tsif_device->blob_wrapper_databuf.data = NULL;
679 tsif_device->blob_wrapper_databuf.size = 0;
680 dma_free_coherent(NULL, TSIF_BUF_SIZE,
681 tsif_device->data_buffer,
682 tsif_device->data_buffer_dma);
683 tsif_device->data_buffer = NULL;
684 }
685}
686
687static int tsif_dma_init(struct msm_tsif_device *tsif_device)
688{
689 int i;
690 /* TODO: allocate all DMA memory in one buffer */
691 /* Note: don't pass device,
692 it require coherent_dma_mask id device definition */
693 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
694 &tsif_device->data_buffer_dma, GFP_KERNEL);
695 if (!tsif_device->data_buffer)
696 goto err;
697 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
698 tsif_device->data_buffer, tsif_device->data_buffer_dma);
699 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
700 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
701 tsif_device->ri = 0;
702 tsif_device->wi = 0;
703 tsif_device->dmwi = 0;
704 for (i = 0; i < 2; i++) {
705 dmov_box *box;
706 struct msm_dmov_cmd *hdr;
707 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
708 sizeof(struct tsif_dmov_cmd),
709 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
710 if (!tsif_device->dmov_cmd[i])
711 goto err;
712 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
713 i, tsif_device->dmov_cmd[i],
714 tsif_device->dmov_cmd_dma[i]);
715 /* dst in 16 LSB, src in 16 MSB */
716 box = &(tsif_device->dmov_cmd[i]->box);
717 box->cmd = CMD_MODE_BOX | CMD_LC |
718 CMD_SRC_CRCI(tsif_device->crci);
719 box->src_row_addr =
720 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
721 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
722 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
723 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
724
725 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
726 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
727 offsetof(struct tsif_dmov_cmd, box));
728 tsif_device->xfer[i].tsif_device = tsif_device;
729 hdr = &tsif_device->xfer[i].hdr;
730 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
731 offsetof(struct tsif_dmov_cmd, box_ptr));
732 hdr->complete_func = tsif_dmov_complete_func;
733 }
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700734 msm_dmov_flush(tsif_device->dma, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735 return 0;
736err:
737 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
738 tsif_dma_exit(tsif_device);
739 return -ENOMEM;
740}
741
742/* ===DMA end=== */
743
744/* ===IRQ begin=== */
745
746static irqreturn_t tsif_irq(int irq, void *dev_id)
747{
748 struct msm_tsif_device *tsif_device = dev_id;
749 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
750 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
751 TSIF_STS_CTL_OVERFLOW |
752 TSIF_STS_CTL_LOST_SYNC |
753 TSIF_STS_CTL_TIMEOUT))) {
754 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
755 return IRQ_NONE;
756 }
757 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
758 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
759 tsif_device->stat_rx++;
760 }
761 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
762 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
763 tsif_device->stat_overflow++;
764 }
765 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
766 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
767 tsif_device->stat_lost_sync++;
768 }
769 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
770 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
771 tsif_device->stat_timeout++;
772 }
773 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
774 wmb();
775 return IRQ_HANDLED;
776}
777
778/* ===IRQ end=== */
779
780/* ===Device attributes begin=== */
781
782static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
783 char *buf)
784{
785 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
786 char *state_string;
787 switch (tsif_device->state) {
788 case tsif_state_stopped:
789 state_string = "stopped";
790 break;
791 case tsif_state_running:
792 state_string = "running";
793 break;
794 case tsif_state_flushing:
795 state_string = "flushing";
796 break;
797 default:
798 state_string = "???";
799 }
800 return snprintf(buf, PAGE_SIZE,
801 "Device %s\n"
802 "Mode = %d\n"
803 "Time limit = %d\n"
804 "State %s\n"
805 "Client = %p\n"
806 "Pkt/Buf = %d\n"
807 "Pkt/chunk = %d\n"
808 "--statistics--\n"
809 "Rx chunks = %d\n"
810 "Overflow = %d\n"
811 "Lost sync = %d\n"
812 "Timeout = %d\n"
813 "DMA error = %d\n"
814 "Soft drop = %d\n"
815 "IFI = %d\n"
816 "(0x%08x - 0x%08x) / %d\n"
817 "--debug--\n"
818 "GLBL_CLK_ENA = 0x%08x\n"
819 "ROW_RESET = 0x%08x\n"
820 "CLK_HALT_STATEB = 0x%08x\n"
821 "TV_NS_REG = 0x%08x\n"
822 "TSIF_NS_REG = 0x%08x\n",
823 dev_name(dev),
824 tsif_device->mode,
825 tsif_device->time_limit,
826 state_string,
827 tsif_device->client_data,
828 TSIF_PKTS_IN_BUF,
829 TSIF_PKTS_IN_CHUNK,
830 tsif_device->stat_rx,
831 tsif_device->stat_overflow,
832 tsif_device->stat_lost_sync,
833 tsif_device->stat_timeout,
834 tsif_device->stat_dmov_err,
835 tsif_device->stat_soft_drop,
836 tsif_device->stat_ifi,
837 tsif_device->stat1,
838 tsif_device->stat0,
839 TSIF_PKTS_IN_CHUNK - 1,
840 ioread32(GLBL_CLK_ENA),
841 ioread32(ROW_RESET),
842 ioread32(CLK_HALT_STATEB),
843 ioread32(TV_NS_REG),
844 ioread32(TSIF_NS_REG)
845 );
846}
847/**
848 * set_stats - reset statistics on write
849 *
850 * @dev:
851 * @attr:
852 * @buf:
853 * @count:
854 */
855static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
856 const char *buf, size_t count)
857{
858 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
859 tsif_device->stat_rx = 0;
860 tsif_device->stat_overflow = 0;
861 tsif_device->stat_lost_sync = 0;
862 tsif_device->stat_timeout = 0;
863 tsif_device->stat_dmov_err = 0;
864 tsif_device->stat_soft_drop = 0;
865 tsif_device->stat_ifi = 0;
866 return count;
867}
868static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
869
870static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
871 char *buf)
872{
873 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
874 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
875}
876
877static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
878 const char *buf, size_t count)
879{
880 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
881 int value;
882 int rc;
883 if (1 != sscanf(buf, "%d", &value)) {
884 dev_err(&tsif_device->pdev->dev,
885 "Failed to parse integer: <%s>\n", buf);
886 return -EINVAL;
887 }
888 rc = tsif_set_mode(tsif_device, value);
889 if (!rc)
890 rc = count;
891 return rc;
892}
893static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
894
895static ssize_t show_time_limit(struct device *dev,
896 struct device_attribute *attr,
897 char *buf)
898{
899 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
900 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
901}
902
903static ssize_t set_time_limit(struct device *dev,
904 struct device_attribute *attr,
905 const char *buf, size_t count)
906{
907 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
908 int value;
909 int rc;
910 if (1 != sscanf(buf, "%d", &value)) {
911 dev_err(&tsif_device->pdev->dev,
912 "Failed to parse integer: <%s>\n", buf);
913 return -EINVAL;
914 }
915 rc = tsif_set_time_limit(tsif_device, value);
916 if (!rc)
917 rc = count;
918 return rc;
919}
920static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
921 show_time_limit, set_time_limit);
922
923static ssize_t show_buf_config(struct device *dev,
924 struct device_attribute *attr,
925 char *buf)
926{
927 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
928 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
929 tsif_device->pkts_per_chunk,
930 tsif_device->chunks_per_buf);
931}
932
933static ssize_t set_buf_config(struct device *dev,
934 struct device_attribute *attr,
935 const char *buf, size_t count)
936{
937 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
938 u32 p, c;
939 int rc;
940 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
941 dev_err(&tsif_device->pdev->dev,
942 "Failed to parse integer: <%s>\n", buf);
943 return -EINVAL;
944 }
945 rc = tsif_set_buf_config(tsif_device, p, c);
946 if (!rc)
947 rc = count;
948 return rc;
949}
950static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
951 show_buf_config, set_buf_config);
952
953static struct attribute *dev_attrs[] = {
954 &dev_attr_stats.attr,
955 &dev_attr_mode.attr,
956 &dev_attr_time_limit.attr,
957 &dev_attr_buf_config.attr,
958 NULL,
959};
960static struct attribute_group dev_attr_grp = {
961 .attrs = dev_attrs,
962};
963/* ===Device attributes end=== */
964
965/* ===debugfs begin=== */
966
967static int debugfs_iomem_x32_set(void *data, u64 val)
968{
969 iowrite32(val, data);
970 wmb();
971 return 0;
972}
973
974static int debugfs_iomem_x32_get(void *data, u64 *val)
975{
976 *val = ioread32(data);
977 return 0;
978}
979
980DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
981 debugfs_iomem_x32_set, "0x%08llx\n");
982
983struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
984 struct dentry *parent, u32 *value)
985{
986 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
987}
988
989static int action_open(struct msm_tsif_device *tsif_device)
990{
991 int rc = -EINVAL;
992 int result;
993
994 struct msm_tsif_platform_data *pdata =
995 tsif_device->pdev->dev.platform_data;
Hamad Kadmanyb18fac52012-09-01 12:57:24 +0300996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
998 if (tsif_device->state != tsif_state_stopped)
999 return -EAGAIN;
1000 rc = tsif_dma_init(tsif_device);
1001 if (rc) {
1002 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1003 return rc;
1004 }
1005 tsif_device->state = tsif_state_running;
Joel Nider951b2832012-05-07 21:13:38 +03001006
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007 /*
1008 * DMA should be scheduled prior to TSIF hardware initialization,
1009 * otherwise "bus error" will be reported by Data Mover
1010 */
1011 enable_irq(tsif_device->irq);
1012 tsif_clock(tsif_device, 1);
1013 tsif_dma_schedule(tsif_device);
1014 /*
1015 * init the device if required
1016 */
1017 if (pdata->init)
1018 pdata->init(pdata);
1019 rc = tsif_start_hw(tsif_device);
1020 if (rc) {
1021 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
1022 tsif_dma_exit(tsif_device);
1023 tsif_clock(tsif_device, 0);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001024 disable_irq(tsif_device->irq);
1025 return rc;
1026 }
1027
1028 /* make sure the GPIO's are set up */
1029 rc = tsif_start_gpios(tsif_device);
1030 if (rc) {
1031 dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
1032 tsif_stop_hw(tsif_device);
1033 tsif_dma_exit(tsif_device);
1034 tsif_clock(tsif_device, 0);
1035 disable_irq(tsif_device->irq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 return rc;
1037 }
1038
1039 result = pm_runtime_get(&tsif_device->pdev->dev);
1040 if (result < 0) {
1041 dev_err(&tsif_device->pdev->dev,
1042 "Runtime PM: Unable to wake up the device, rc = %d\n",
1043 result);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001044 tsif_stop_gpios(tsif_device);
1045 tsif_stop_hw(tsif_device);
1046 tsif_dma_exit(tsif_device);
1047 tsif_clock(tsif_device, 0);
1048 disable_irq(tsif_device->irq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 return result;
1050 }
1051
1052 wake_lock(&tsif_device->wake_lock);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001053 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054}
1055
1056static int action_close(struct msm_tsif_device *tsif_device)
1057{
1058 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1059 (int)tsif_device->state);
Joel Nider951b2832012-05-07 21:13:38 +03001060
1061 /* turn off the GPIO's to prevent new data from entering */
1062 tsif_stop_gpios(tsif_device);
1063
1064 /* we unfortunately must sleep here to give the ADM time to
1065 * complete any outstanding reads after the GPIO's are turned
1066 * off. There is no indication from the ADM hardware that
1067 * there are any outstanding reads on the bus, and if we
1068 * stop the TSIF too quickly, it can cause a bus error.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069 */
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001070 msleep(250);
Joel Nider951b2832012-05-07 21:13:38 +03001071
1072 /* now we can stop the core */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 tsif_stop_hw(tsif_device);
1074 tsif_dma_exit(tsif_device);
1075 tsif_clock(tsif_device, 0);
1076 disable_irq(tsif_device->irq);
1077
1078 pm_runtime_put(&tsif_device->pdev->dev);
1079 wake_unlock(&tsif_device->wake_lock);
1080 return 0;
1081}
1082
1083
1084static struct {
1085 int (*func)(struct msm_tsif_device *);
1086 const char *name;
1087} actions[] = {
1088 { action_open, "open"},
1089 { action_close, "close"},
1090};
1091
1092static ssize_t tsif_debugfs_action_write(struct file *filp,
1093 const char __user *userbuf,
1094 size_t count, loff_t *f_pos)
1095{
1096 int i;
1097 struct msm_tsif_device *tsif_device = filp->private_data;
1098 char s[40];
1099 int len = min(sizeof(s) - 1, count);
1100 if (copy_from_user(s, userbuf, len))
1101 return -EFAULT;
1102 s[len] = '\0';
1103 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1104 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1105 if (!strncmp(s, actions[i].name,
1106 min(count, strlen(actions[i].name)))) {
1107 int rc = actions[i].func(tsif_device);
1108 if (!rc)
1109 rc = count;
1110 return rc;
1111 }
1112 }
1113 return -EINVAL;
1114}
1115
1116static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1117{
1118 filp->private_data = inode->i_private;
1119 return 0;
1120}
1121
1122static const struct file_operations fops_debugfs_action = {
1123 .open = tsif_debugfs_generic_open,
1124 .write = tsif_debugfs_action_write,
1125};
1126
1127static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1128 size_t count, loff_t *f_pos)
1129{
1130 static char bufa[200];
1131 static char *buf = bufa;
1132 int sz = sizeof(bufa);
1133 struct msm_tsif_device *tsif_device = filp->private_data;
1134 int len = 0;
1135 if (tsif_device) {
1136 int i;
1137 len += snprintf(buf + len, sz - len,
1138 "ri %3d | wi %3d | dmwi %3d |",
1139 tsif_device->ri, tsif_device->wi,
1140 tsif_device->dmwi);
1141 for (i = 0; i < 2; i++) {
1142 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1143 if (xfer->busy) {
1144 u32 dst =
1145 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1146 u32 base = tsif_device->data_buffer_dma;
1147 int w = (dst - base) / TSIF_PKT_SIZE;
1148 len += snprintf(buf + len, sz - len,
1149 " [%3d]{%3d}",
1150 w, xfer->wi);
1151 } else {
1152 len += snprintf(buf + len, sz - len,
1153 " ---idle---");
1154 }
1155 }
1156 len += snprintf(buf + len, sz - len, "\n");
1157 } else {
1158 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1159 }
1160 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1161}
1162
1163static const struct file_operations fops_debugfs_dma = {
1164 .open = tsif_debugfs_generic_open,
1165 .read = tsif_debugfs_dma_read,
1166};
1167
1168static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1169 size_t count, loff_t *f_pos)
1170{
1171 static char bufa[300];
1172 static char *buf = bufa;
1173 int sz = sizeof(bufa);
1174 struct msm_tsif_device *tsif_device = filp->private_data;
1175 int len = 0;
1176 if (tsif_device) {
1177 struct msm_tsif_platform_data *pdata =
1178 tsif_device->pdev->dev.platform_data;
1179 int i;
1180 for (i = 0; i < pdata->num_gpios; i++) {
1181 if (pdata->gpios[i].gpio_cfg) {
1182 int x = !!gpio_get_value(GPIO_PIN(
1183 pdata->gpios[i].gpio_cfg));
1184 len += snprintf(buf + len, sz - len,
1185 "%15s: %d\n",
1186 pdata->gpios[i].label, x);
1187 }
1188 }
1189 } else {
1190 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1191 }
1192 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1193}
1194
1195static const struct file_operations fops_debugfs_gpios = {
1196 .open = tsif_debugfs_generic_open,
1197 .read = tsif_debugfs_gpios_read,
1198};
1199
1200
1201static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1202{
1203 tsif_device->dent_tsif = debugfs_create_dir(
1204 dev_name(&tsif_device->pdev->dev), NULL);
1205 if (tsif_device->dent_tsif) {
1206 int i;
1207 void __iomem *base = tsif_device->base;
1208 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1209 tsif_device->debugfs_tsif_regs[i] =
1210 debugfs_create_iomem_x32(
1211 debugfs_tsif_regs[i].name,
1212 debugfs_tsif_regs[i].mode,
1213 tsif_device->dent_tsif,
1214 base + debugfs_tsif_regs[i].offset);
1215 }
1216 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1217 S_IRUGO,
1218 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1219 tsif_device->debugfs_action = debugfs_create_file("action",
1220 S_IWUSR,
1221 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1222 tsif_device->debugfs_dma = debugfs_create_file("dma",
1223 S_IRUGO,
1224 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1225 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1226 S_IRUGO,
1227 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1228 }
1229}
1230
1231static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1232{
1233 if (tsif_device->dent_tsif) {
1234 int i;
1235 debugfs_remove_recursive(tsif_device->dent_tsif);
1236 tsif_device->dent_tsif = NULL;
1237 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1238 tsif_device->debugfs_tsif_regs[i] = NULL;
1239 tsif_device->debugfs_gpio = NULL;
1240 tsif_device->debugfs_action = NULL;
1241 tsif_device->debugfs_dma = NULL;
1242 tsif_device->debugfs_databuf = NULL;
1243 }
1244}
1245/* ===debugfs end=== */
1246
1247/* ===module begin=== */
1248static LIST_HEAD(tsif_devices);
1249
1250static struct msm_tsif_device *tsif_find_by_id(int id)
1251{
1252 struct msm_tsif_device *tsif_device;
1253 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1254 if (tsif_device->pdev->id == id)
1255 return tsif_device;
1256 }
1257 return NULL;
1258}
1259
1260static int __devinit msm_tsif_probe(struct platform_device *pdev)
1261{
1262 int rc = -ENODEV;
1263 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1264 struct msm_tsif_device *tsif_device;
1265 struct resource *res;
1266 /* check device validity */
1267 /* must have platform data */
1268 if (!plat) {
1269 dev_err(&pdev->dev, "Platform data not available\n");
1270 rc = -EINVAL;
1271 goto out;
1272 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001273
1274 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1276 rc = -EINVAL;
1277 goto out;
1278 }
1279 /* OK, we will use this device */
1280 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1281 if (!tsif_device) {
1282 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1283 rc = -ENOMEM;
1284 goto out;
1285 }
1286 /* cross links */
1287 tsif_device->pdev = pdev;
1288 platform_set_drvdata(pdev, tsif_device);
1289 tsif_device->mode = 1;
1290 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1291 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1292 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1293 (unsigned long)tsif_device);
Joel Nider6682b382012-07-03 13:59:27 +03001294 tasklet_init(&tsif_device->clocks_off, tsif_clocks_off,
1295 (unsigned long)tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296 if (tsif_get_clocks(tsif_device))
1297 goto err_clocks;
1298/* map I/O memory */
1299 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1300 if (!tsif_device->memres) {
1301 dev_err(&pdev->dev, "Missing MEM resource\n");
1302 rc = -ENXIO;
1303 goto err_rgn;
1304 }
1305 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1306 if (!res) {
1307 dev_err(&pdev->dev, "Missing DMA resource\n");
1308 rc = -ENXIO;
1309 goto err_rgn;
1310 }
1311 tsif_device->dma = res->start;
1312 tsif_device->crci = res->end;
1313 tsif_device->base = ioremap(tsif_device->memres->start,
1314 resource_size(tsif_device->memres));
1315 if (!tsif_device->base) {
1316 dev_err(&pdev->dev, "ioremap failed\n");
1317 goto err_ioremap;
1318 }
1319 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1320 tsif_device->memres->start, tsif_device->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321
1322 pm_runtime_set_active(&pdev->dev);
1323 pm_runtime_enable(&pdev->dev);
1324
1325 tsif_debugfs_init(tsif_device);
1326 rc = platform_get_irq(pdev, 0);
1327 if (rc > 0) {
1328 tsif_device->irq = rc;
1329 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1330 dev_name(&pdev->dev), tsif_device);
1331 disable_irq(tsif_device->irq);
1332 }
1333 if (rc) {
1334 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1335 tsif_device->irq, rc);
1336 goto err_irq;
1337 }
1338 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1339 if (rc) {
1340 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1341 goto err_attrs;
1342 }
1343 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1344 dev_name(&pdev->dev));
1345 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1346 tsif_device->irq, tsif_device->memres->start,
1347 tsif_device->dma, tsif_device->crci);
1348 list_add(&tsif_device->devlist, &tsif_devices);
1349 return 0;
1350/* error path */
1351 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1352err_attrs:
1353 free_irq(tsif_device->irq, tsif_device);
1354err_irq:
1355 tsif_debugfs_exit(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 iounmap(tsif_device->base);
1357err_ioremap:
1358err_rgn:
1359 tsif_put_clocks(tsif_device);
1360err_clocks:
1361 kfree(tsif_device);
1362out:
1363 return rc;
1364}
1365
1366static int __devexit msm_tsif_remove(struct platform_device *pdev)
1367{
1368 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1369 dev_info(&pdev->dev, "Unload\n");
1370 list_del(&tsif_device->devlist);
1371 wake_lock_destroy(&tsif_device->wake_lock);
1372 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1373 free_irq(tsif_device->irq, tsif_device);
1374 tsif_debugfs_exit(tsif_device);
1375 tsif_dma_exit(tsif_device);
1376 tsif_stop_gpios(tsif_device);
1377 iounmap(tsif_device->base);
1378 tsif_put_clocks(tsif_device);
1379
1380 pm_runtime_put(&pdev->dev);
1381 pm_runtime_disable(&pdev->dev);
1382 kfree(tsif_device);
1383 return 0;
1384}
1385
1386static int tsif_runtime_suspend(struct device *dev)
1387{
1388 dev_dbg(dev, "pm_runtime: suspending...\n");
1389 return 0;
1390}
1391
1392static int tsif_runtime_resume(struct device *dev)
1393{
1394 dev_dbg(dev, "pm_runtime: resuming...\n");
1395 return 0;
1396}
1397
1398static const struct dev_pm_ops tsif_dev_pm_ops = {
1399 .runtime_suspend = tsif_runtime_suspend,
1400 .runtime_resume = tsif_runtime_resume,
1401};
1402
1403
1404static struct platform_driver msm_tsif_driver = {
1405 .probe = msm_tsif_probe,
1406 .remove = __exit_p(msm_tsif_remove),
1407 .driver = {
1408 .name = "msm_tsif",
1409 .pm = &tsif_dev_pm_ops,
1410 },
1411};
1412
1413static int __init mod_init(void)
1414{
1415 int rc = platform_driver_register(&msm_tsif_driver);
1416 if (rc)
1417 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1418 return rc;
1419}
1420
1421static void __exit mod_exit(void)
1422{
1423 platform_driver_unregister(&msm_tsif_driver);
1424}
1425/* ===module end=== */
1426
1427/* public API */
1428
Joel Nider5578bdb2011-08-12 09:37:11 +03001429int tsif_get_active(void)
1430{
1431 struct msm_tsif_device *tsif_device;
1432 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1433 return tsif_device->pdev->id;
1434 }
1435 return -ENODEV;
1436}
1437EXPORT_SYMBOL(tsif_get_active);
1438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1440{
1441 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001442 if (!tsif_device)
1443 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001444 if (tsif_device->client_notify || tsif_device->client_data)
1445 return ERR_PTR(-EBUSY);
1446 tsif_device->client_notify = notify;
1447 tsif_device->client_data = data;
1448 /* prevent from unloading */
1449 get_device(&tsif_device->pdev->dev);
1450 return tsif_device;
1451}
1452EXPORT_SYMBOL(tsif_attach);
1453
1454void tsif_detach(void *cookie)
1455{
1456 struct msm_tsif_device *tsif_device = cookie;
1457 tsif_device->client_notify = NULL;
1458 tsif_device->client_data = NULL;
1459 put_device(&tsif_device->pdev->dev);
1460}
1461EXPORT_SYMBOL(tsif_detach);
1462
1463void tsif_get_info(void *cookie, void **pdata, int *psize)
1464{
1465 struct msm_tsif_device *tsif_device = cookie;
1466 if (pdata)
1467 *pdata = tsif_device->data_buffer;
1468 if (psize)
1469 *psize = TSIF_PKTS_IN_BUF;
1470}
1471EXPORT_SYMBOL(tsif_get_info);
1472
1473int tsif_set_mode(void *cookie, int mode)
1474{
1475 struct msm_tsif_device *tsif_device = cookie;
1476 if (tsif_device->state != tsif_state_stopped) {
1477 dev_err(&tsif_device->pdev->dev,
1478 "Can't change mode while device is active\n");
1479 return -EBUSY;
1480 }
1481 switch (mode) {
1482 case 1:
1483 case 2:
1484 case 3:
1485 tsif_device->mode = mode;
1486 break;
1487 default:
1488 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1489 return -EINVAL;
1490 }
1491 return 0;
1492}
1493EXPORT_SYMBOL(tsif_set_mode);
1494
1495int tsif_set_time_limit(void *cookie, u32 value)
1496{
1497 struct msm_tsif_device *tsif_device = cookie;
1498 if (tsif_device->state != tsif_state_stopped) {
1499 dev_err(&tsif_device->pdev->dev,
1500 "Can't change time limit while device is active\n");
1501 return -EBUSY;
1502 }
1503 if (value != (value & 0xFFFFFF)) {
1504 dev_err(&tsif_device->pdev->dev,
1505 "Invalid time limit (should be 24 bit): %#x\n", value);
1506 return -EINVAL;
1507 }
1508 tsif_device->time_limit = value;
1509 return 0;
1510}
1511EXPORT_SYMBOL(tsif_set_time_limit);
1512
1513int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1514{
1515 struct msm_tsif_device *tsif_device = cookie;
1516 if (tsif_device->data_buffer) {
1517 dev_err(&tsif_device->pdev->dev,
1518 "Data buffer already allocated: %p\n",
1519 tsif_device->data_buffer);
1520 return -EBUSY;
1521 }
1522 /* check for crazy user */
1523 if (pkts_in_chunk * chunks_in_buf > 10240) {
1524 dev_err(&tsif_device->pdev->dev,
1525 "Buffer requested is too large: %d * %d\n",
1526 pkts_in_chunk,
1527 chunks_in_buf);
1528 return -EINVAL;
1529 }
1530 /* parameters are OK, execute */
1531 tsif_device->pkts_per_chunk = pkts_in_chunk;
1532 tsif_device->chunks_per_buf = chunks_in_buf;
1533 return 0;
1534}
1535EXPORT_SYMBOL(tsif_set_buf_config);
1536
1537void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1538{
1539 struct msm_tsif_device *tsif_device = cookie;
1540 if (ri)
1541 *ri = tsif_device->ri;
1542 if (wi)
1543 *wi = tsif_device->wi;
1544 if (state)
1545 *state = tsif_device->state;
1546}
1547EXPORT_SYMBOL(tsif_get_state);
1548
1549int tsif_start(void *cookie)
1550{
1551 struct msm_tsif_device *tsif_device = cookie;
1552 return action_open(tsif_device);
1553}
1554EXPORT_SYMBOL(tsif_start);
1555
1556void tsif_stop(void *cookie)
1557{
1558 struct msm_tsif_device *tsif_device = cookie;
1559 action_close(tsif_device);
1560}
1561EXPORT_SYMBOL(tsif_stop);
1562
1563void tsif_reclaim_packets(void *cookie, int read_index)
1564{
1565 struct msm_tsif_device *tsif_device = cookie;
1566 tsif_device->ri = read_index;
1567}
1568EXPORT_SYMBOL(tsif_reclaim_packets);
1569
1570module_init(mod_init);
1571module_exit(mod_exit);
1572
1573MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1574 " Driver for the MSM chipset");
1575MODULE_LICENSE("GPL v2");
1576