blob: 7e59c989d989723fbed0404e603664c9c5683e76 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
Steve Mucklef132c6c2012-06-06 18:30:57 -070034#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036#include <mach/dma.h>
37#include <mach/msm_tsif.h>
38
39/*
40 * TSIF register offsets
41 */
42#define TSIF_STS_CTL_OFF (0x0)
43#define TSIF_TIME_LIMIT_OFF (0x4)
44#define TSIF_CLK_REF_OFF (0x8)
45#define TSIF_LPBK_FLAGS_OFF (0xc)
46#define TSIF_LPBK_DATA_OFF (0x10)
47#define TSIF_TEST_CTL_OFF (0x14)
48#define TSIF_TEST_MODE_OFF (0x18)
49#define TSIF_TEST_RESET_OFF (0x1c)
50#define TSIF_TEST_EXPORT_OFF (0x20)
51#define TSIF_TEST_CURRENT_OFF (0x24)
52
53#define TSIF_DATA_PORT_OFF (0x100)
54
55/* bits for TSIF_STS_CTL register */
56#define TSIF_STS_CTL_EN_IRQ (1 << 28)
57#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
58#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
59#define TSIF_STS_CTL_OVERFLOW (1 << 25)
60#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
61#define TSIF_STS_CTL_TIMEOUT (1 << 23)
62#define TSIF_STS_CTL_INV_SYNC (1 << 21)
63#define TSIF_STS_CTL_INV_NULL (1 << 20)
64#define TSIF_STS_CTL_INV_ERROR (1 << 19)
65#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
66#define TSIF_STS_CTL_INV_DATA (1 << 17)
67#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
68#define TSIF_STS_CTL_SPARE (1 << 15)
69#define TSIF_STS_CTL_EN_NULL (1 << 11)
70#define TSIF_STS_CTL_EN_ERROR (1 << 10)
71#define TSIF_STS_CTL_LAST_BIT (1 << 9)
72#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
73#define TSIF_STS_CTL_EN_TCR (1 << 7)
74#define TSIF_STS_CTL_TEST_MODE (3 << 5)
75#define TSIF_STS_CTL_EN_DM (1 << 4)
76#define TSIF_STS_CTL_STOP (1 << 3)
77#define TSIF_STS_CTL_START (1 << 0)
78
79/*
80 * Data buffering parameters
81 *
82 * Data stored in cyclic buffer;
83 *
84 * Data organized in chunks of packets.
85 * One chunk processed at a time by the data mover
86 *
87 */
88#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
89#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
90#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
91#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
92#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
93#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030094#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
97#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
98#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
99#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
100#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
101
102/* used to create debugfs entries */
103static const struct {
104 const char *name;
105 mode_t mode;
106 int offset;
107} debugfs_tsif_regs[] = {
108 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
109 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
110 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
111 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
112 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
113 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
114 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
115 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
116 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
117 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
118 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
119};
120
121/* structures for Data Mover */
122struct tsif_dmov_cmd {
123 dmov_box box;
124 dma_addr_t box_ptr;
125};
126
127struct msm_tsif_device;
128
129struct tsif_xfer {
130 struct msm_dmov_cmd hdr;
131 struct msm_tsif_device *tsif_device;
132 int busy;
133 int wi; /**< set devices's write index after xfer */
134};
135
136struct msm_tsif_device {
137 struct list_head devlist;
138 struct platform_device *pdev;
139 struct resource *memres;
140 void __iomem *base;
141 unsigned int irq;
142 int mode;
143 u32 time_limit;
144 enum tsif_state state;
145 struct wake_lock wake_lock;
146 /* clocks */
147 struct clk *tsif_clk;
148 struct clk *tsif_pclk;
149 struct clk *tsif_ref_clk;
150 /* debugfs */
151 struct dentry *dent_tsif;
152 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
153 struct dentry *debugfs_gpio;
154 struct dentry *debugfs_action;
155 struct dentry *debugfs_dma;
156 struct dentry *debugfs_databuf;
157 struct debugfs_blob_wrapper blob_wrapper_databuf;
158 /* DMA related */
159 int dma;
160 int crci;
161 void *data_buffer;
162 dma_addr_t data_buffer_dma;
163 u32 pkts_per_chunk;
164 u32 chunks_per_buf;
165 int ri;
166 int wi;
167 int dmwi; /**< DataMover write index */
168 struct tsif_dmov_cmd *dmov_cmd[2];
169 dma_addr_t dmov_cmd_dma[2];
170 struct tsif_xfer xfer[2];
171 struct tasklet_struct dma_refill;
Joel Nider6682b382012-07-03 13:59:27 +0300172 struct tasklet_struct clocks_off;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 /* statistics */
174 u32 stat_rx;
175 u32 stat_overflow;
176 u32 stat_lost_sync;
177 u32 stat_timeout;
178 u32 stat_dmov_err;
179 u32 stat_soft_drop;
180 int stat_ifi; /* inter frame interval */
181 u32 stat0, stat1;
182 /* client */
183 void *client_data;
184 void (*client_notify)(void *client_data);
185};
186
187/* ===clocks begin=== */
188
189static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
190{
191 if (tsif_device->tsif_clk) {
192 clk_put(tsif_device->tsif_clk);
193 tsif_device->tsif_clk = NULL;
194 }
195 if (tsif_device->tsif_pclk) {
196 clk_put(tsif_device->tsif_pclk);
197 tsif_device->tsif_pclk = NULL;
198 }
199
200 if (tsif_device->tsif_ref_clk) {
201 clk_put(tsif_device->tsif_ref_clk);
202 tsif_device->tsif_ref_clk = NULL;
203 }
204}
205
206static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
207{
208 struct msm_tsif_platform_data *pdata =
209 tsif_device->pdev->dev.platform_data;
210 int rc = 0;
211
212 if (pdata->tsif_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700213 tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
214 pdata->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 if (IS_ERR(tsif_device->tsif_clk)) {
216 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
217 pdata->tsif_clk);
218 rc = PTR_ERR(tsif_device->tsif_clk);
219 tsif_device->tsif_clk = NULL;
220 goto ret;
221 }
222 }
223 if (pdata->tsif_pclk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700224 tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
225 pdata->tsif_pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 if (IS_ERR(tsif_device->tsif_pclk)) {
227 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
228 pdata->tsif_pclk);
229 rc = PTR_ERR(tsif_device->tsif_pclk);
230 tsif_device->tsif_pclk = NULL;
231 goto ret;
232 }
233 }
234 if (pdata->tsif_ref_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700235 tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
236 pdata->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 if (IS_ERR(tsif_device->tsif_ref_clk)) {
238 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
239 pdata->tsif_ref_clk);
240 rc = PTR_ERR(tsif_device->tsif_ref_clk);
241 tsif_device->tsif_ref_clk = NULL;
242 goto ret;
243 }
244 }
245 return 0;
246ret:
247 tsif_put_clocks(tsif_device);
248 return rc;
249}
250
251static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
252{
253 if (on) {
254 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300255 clk_prepare_enable(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300257 clk_prepare_enable(tsif_device->tsif_pclk);
258 clk_prepare_enable(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 } else {
260 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300261 clk_disable_unprepare(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300263 clk_disable_unprepare(tsif_device->tsif_pclk);
264 clk_disable_unprepare(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 }
266}
Joel Nider6682b382012-07-03 13:59:27 +0300267
268static void tsif_clocks_off(unsigned long data)
269{
270 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
271 tsif_clock(tsif_device, 0);
272}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273/* ===clocks end=== */
274/* ===gpio begin=== */
275
276static void tsif_gpios_free(const struct msm_gpio *table, int size)
277{
278 int i;
279 const struct msm_gpio *g;
280 for (i = size-1; i >= 0; i--) {
281 g = table + i;
282 gpio_free(GPIO_PIN(g->gpio_cfg));
283 }
284}
285
286static int tsif_gpios_request(const struct msm_gpio *table, int size)
287{
288 int rc;
289 int i;
290 const struct msm_gpio *g;
291 for (i = 0; i < size; i++) {
292 g = table + i;
293 rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
294 if (rc) {
295 pr_err("gpio_request(%d) <%s> failed: %d\n",
296 GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
297 goto err;
298 }
299 }
300 return 0;
301err:
302 tsif_gpios_free(table, i);
303 return rc;
304}
305
306static int tsif_gpios_disable(const struct msm_gpio *table, int size)
307{
308 int rc = 0;
309 int i;
310 const struct msm_gpio *g;
311 for (i = size-1; i >= 0; i--) {
312 int tmp;
313 g = table + i;
Joel Nider951b2832012-05-07 21:13:38 +0300314 tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
315 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
316 GPIO_CFG_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 if (tmp) {
318 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
319 " <%s> failed: %d\n",
320 g->gpio_cfg, g->label ?: "?", rc);
321 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
322 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
323 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
324 GPIO_DRVSTR(g->gpio_cfg));
325 if (!rc)
326 rc = tmp;
327 }
328 }
329
330 return rc;
331}
332
333static int tsif_gpios_enable(const struct msm_gpio *table, int size)
334{
335 int rc;
336 int i;
337 const struct msm_gpio *g;
338 for (i = 0; i < size; i++) {
339 g = table + i;
340 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
341 if (rc) {
342 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
343 " <%s> failed: %d\n",
344 g->gpio_cfg, g->label ?: "?", rc);
345 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
346 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
347 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
348 GPIO_DRVSTR(g->gpio_cfg));
349 goto err;
350 }
351 }
352 return 0;
353err:
354 tsif_gpios_disable(table, i);
355 return rc;
356}
357
358static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
359{
360 int rc = tsif_gpios_request(table, size);
361 if (rc)
362 return rc;
363 rc = tsif_gpios_enable(table, size);
364 if (rc)
365 tsif_gpios_free(table, size);
366 return rc;
367}
368
369static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
370{
371 tsif_gpios_disable(table, size);
372 tsif_gpios_free(table, size);
373}
374
375static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
376{
377 struct msm_tsif_platform_data *pdata =
378 tsif_device->pdev->dev.platform_data;
379 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
380}
381
382static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
383{
384 struct msm_tsif_platform_data *pdata =
385 tsif_device->pdev->dev.platform_data;
386 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
387}
388
389/* ===gpio end=== */
390
391static int tsif_start_hw(struct msm_tsif_device *tsif_device)
392{
393 u32 ctl = TSIF_STS_CTL_EN_IRQ |
394 TSIF_STS_CTL_EN_TIME_LIM |
395 TSIF_STS_CTL_EN_TCR |
396 TSIF_STS_CTL_EN_DM;
397 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
398 switch (tsif_device->mode) {
399 case 1: /* mode 1 */
400 ctl |= (0 << 5);
401 break;
402 case 2: /* mode 2 */
403 ctl |= (1 << 5);
404 break;
405 case 3: /* manual - control from debugfs */
406 return 0;
407 break;
408 default:
409 return -EINVAL;
410 }
411 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
412 iowrite32(tsif_device->time_limit,
413 tsif_device->base + TSIF_TIME_LIMIT_OFF);
414 wmb();
415 iowrite32(ctl | TSIF_STS_CTL_START,
416 tsif_device->base + TSIF_STS_CTL_OFF);
417 wmb();
418 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
419 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
420}
421
422static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
423{
424 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
425 wmb();
426}
427
428/* ===DMA begin=== */
429/**
430 * TSIF DMA theory of operation
431 *
432 * Circular memory buffer \a tsif_mem_buffer allocated;
433 * 4 pointers points to and moved forward on:
434 * - \a ri index of first ready to read packet.
435 * Updated by client's call to tsif_reclaim_packets()
436 * - \a wi points to the next packet to be written by DM.
437 * Data below is valid and will not be overriden by DMA.
438 * Moved on DM callback
439 * - \a dmwi points to the next packet not scheduled yet for DM
440 * moved when packet scheduled for DM
441 *
442 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
443 * at time immediately after scheduling.
444 *
445 * Initially, 2 packets get scheduled for the DM.
446 *
447 * Upon packet receive, DM writes packet to the pre-programmed
448 * location and invoke its callback.
449 *
450 * DM callback moves sets wi pointer to \a xfer->wi;
451 * then it schedules next packet for DM and moves \a dmwi pointer.
452 *
453 * Buffer overflow handling
454 *
455 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
456 * DMA re-scheduled to the same index.
457 * Callback check and not move \a wi to become equal to \a ri
458 *
459 * On \a read request, data between \a ri and \a wi pointers may be read;
460 * \ri pointer moved accordingly.
461 *
462 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
463 * \a wi is between [\a ri, \a dmwi]
464 *
465 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
466 *
467 * Number of scheduled packets for DM: (dmwi-wi)
468 */
469
470/**
471 * tsif_dma_schedule - schedule DMA transfers
472 *
473 * @tsif_device: device
474 *
475 * Executed from process context on init, or from tasklet when
476 * re-scheduling upon DMA completion.
477 * This prevent concurrent execution from several CPU's
478 */
479static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
480{
481 int i, dmwi0, dmwi1, found = 0;
482 /* find free entry */
483 for (i = 0; i < 2; i++) {
484 struct tsif_xfer *xfer = &tsif_device->xfer[i];
485 if (xfer->busy)
486 continue;
487 found++;
488 xfer->busy = 1;
489 dmwi0 = tsif_device->dmwi;
490 tsif_device->dmov_cmd[i]->box.dst_row_addr =
491 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
492 /* proposed value for dmwi */
493 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
494 /**
495 * If dmwi going to overlap with ri,
496 * overflow occurs because data was not read.
497 * Still get this packet, to not interrupt TSIF
498 * hardware, but do not advance dmwi.
499 *
500 * Upon receive, packet will be dropped.
501 */
502 if (dmwi1 != tsif_device->ri) {
503 tsif_device->dmwi = dmwi1;
504 } else {
505 dev_info(&tsif_device->pdev->dev,
506 "Overflow detected\n");
507 }
508 xfer->wi = tsif_device->dmwi;
509#ifdef CONFIG_TSIF_DEBUG
510 dev_info(&tsif_device->pdev->dev,
511 "schedule xfer[%d] -> [%2d]{%2d}\n",
512 i, dmwi0, xfer->wi);
513#endif
514 /* complete all the writes to box */
515 dma_coherent_pre_ops();
516 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
517 }
518 if (!found)
519 dev_info(&tsif_device->pdev->dev,
520 "All xfer entries are busy\n");
521}
522
523/**
524 * tsif_dmov_complete_func - DataMover completion callback
525 *
526 * @cmd: original DM command
527 * @result: DM result
528 * @err: optional error buffer
529 *
530 * Executed in IRQ context (Data Mover's IRQ)
531 * DataMover's spinlock @msm_dmov_lock held.
532 */
533static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
534 unsigned int result,
535 struct msm_dmov_errdata *err)
536{
537 int i;
538 u32 data_offset;
539 struct tsif_xfer *xfer;
540 struct msm_tsif_device *tsif_device;
541 int reschedule = 0;
542 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
543 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
544 return;
545 }
546 /* restore original context */
547 xfer = container_of(cmd, struct tsif_xfer, hdr);
548 tsif_device = xfer->tsif_device;
549 i = xfer - tsif_device->xfer;
550 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
551 tsif_device->data_buffer_dma;
552
553 /* order reads from the xferred buffer */
554 dma_coherent_post_ops();
555 if (result & DMOV_RSLT_DONE) {
556 int w = data_offset / TSIF_PKT_SIZE;
557 tsif_device->stat_rx++;
558 /*
559 * sowtware overflow when I was scheduled?
560 *
561 * @w is where this xfer was actually written to;
562 * @xfer->wi is where device's @wi will be set;
563 *
564 * if these 2 are equal, we are short in space and
565 * going to overwrite this xfer - this is "soft drop"
566 */
567 if (w == xfer->wi)
568 tsif_device->stat_soft_drop++;
569 reschedule = (tsif_device->state == tsif_state_running);
570#ifdef CONFIG_TSIF_DEBUG
571 /* IFI calculation */
572 /*
573 * update stat_ifi (inter frame interval)
574 *
575 * Calculate time difference between last and 1-st
576 * packets in chunk
577 *
578 * To be removed after tuning
579 */
580 if (TSIF_PKTS_IN_CHUNK > 1) {
581 void *ptr = tsif_device->data_buffer + data_offset;
582 u32 *p0 = ptr;
583 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
584 TSIF_PKT_SIZE;
585 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
586 tsif_pkt_status(p0));
587 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
588 tsif_pkt_status(p1));
589 tsif_device->stat_ifi = (tts1 - tts0) /
590 (TSIF_PKTS_IN_CHUNK - 1);
591 }
592#endif
593 } else {
594 /**
595 * Error or flush
596 *
597 * To recover - re-open TSIF device.
598 */
599 /* mark status "not valid" in data buffer */
600 int n;
601 void *ptr = tsif_device->data_buffer + data_offset;
602 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
603 u32 *p = ptr + (n * TSIF_PKT_SIZE);
604 /* last dword is status + TTS */
605 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
606 }
607 if (result & DMOV_RSLT_ERROR) {
608 dev_err(&tsif_device->pdev->dev,
609 "DMA error (0x%08x)\n", result);
610 tsif_device->stat_dmov_err++;
611 /* force device close */
612 if (tsif_device->state == tsif_state_running) {
613 tsif_stop_hw(tsif_device);
614 /*
Joel Nider6682b382012-07-03 13:59:27 +0300615 * This branch is taken only in case of
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 * severe hardware problem (I don't even know
Joel Nider6682b382012-07-03 13:59:27 +0300617 * what should happen for DMOV_RSLT_ERROR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 * thus I prefer code simplicity over
619 * performance.
Joel Nider6682b382012-07-03 13:59:27 +0300620 * Clocks are turned off from outside the
621 * interrupt context.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 */
Joel Nider6682b382012-07-03 13:59:27 +0300623 tasklet_schedule(&tsif_device->clocks_off);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 tsif_device->state = tsif_state_flushing;
625 }
626 }
627 if (result & DMOV_RSLT_FLUSH) {
628 /*
629 * Flushing normally happens in process of
630 * @tsif_stop(), when we are waiting for outstanding
631 * DMA commands to be flushed.
632 */
633 dev_info(&tsif_device->pdev->dev,
634 "DMA channel flushed (0x%08x)\n", result);
635 if (tsif_device->state == tsif_state_flushing) {
636 if ((!tsif_device->xfer[0].busy) &&
637 (!tsif_device->xfer[1].busy)) {
638 tsif_device->state = tsif_state_stopped;
639 }
640 }
641 }
642 if (err)
643 dev_err(&tsif_device->pdev->dev,
644 "Flush data: %08x %08x %08x %08x %08x %08x\n",
645 err->flush[0], err->flush[1], err->flush[2],
646 err->flush[3], err->flush[4], err->flush[5]);
647 }
648 tsif_device->wi = xfer->wi;
649 xfer->busy = 0;
650 if (tsif_device->client_notify)
651 tsif_device->client_notify(tsif_device->client_data);
652 /*
653 * Can't schedule next DMA -
654 * DataMover driver still hold its semaphore,
655 * deadlock will occur.
656 */
657 if (reschedule)
658 tasklet_schedule(&tsif_device->dma_refill);
659}
660
661/**
662 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
663 *
664 * @data: tsif_device
665 *
666 * Reschedule DMA requests
667 *
668 * Executed in tasklet
669 */
670static void tsif_dma_refill(unsigned long data)
671{
672 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
673 if (tsif_device->state == tsif_state_running)
674 tsif_dma_schedule(tsif_device);
675}
676
677/**
678 * tsif_dma_flush - flush DMA channel
679 *
680 * @tsif_device:
681 *
682 * busy wait till DMA flushed
683 */
684static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
685{
686 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
687 tsif_device->state = tsif_state_flushing;
688 while (tsif_device->xfer[0].busy ||
689 tsif_device->xfer[1].busy) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700690 msm_dmov_flush(tsif_device->dma, 1);
Joel Nider951b2832012-05-07 21:13:38 +0300691 usleep(10000);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692 }
693 }
694 tsif_device->state = tsif_state_stopped;
695 if (tsif_device->client_notify)
696 tsif_device->client_notify(tsif_device->client_data);
697}
698
699static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
700{
701 int i;
702 tsif_device->state = tsif_state_flushing;
703 tasklet_kill(&tsif_device->dma_refill);
704 tsif_dma_flush(tsif_device);
705 for (i = 0; i < 2; i++) {
706 if (tsif_device->dmov_cmd[i]) {
707 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
708 tsif_device->dmov_cmd[i],
709 tsif_device->dmov_cmd_dma[i]);
710 tsif_device->dmov_cmd[i] = NULL;
711 }
712 }
713 if (tsif_device->data_buffer) {
714 tsif_device->blob_wrapper_databuf.data = NULL;
715 tsif_device->blob_wrapper_databuf.size = 0;
716 dma_free_coherent(NULL, TSIF_BUF_SIZE,
717 tsif_device->data_buffer,
718 tsif_device->data_buffer_dma);
719 tsif_device->data_buffer = NULL;
720 }
721}
722
723static int tsif_dma_init(struct msm_tsif_device *tsif_device)
724{
725 int i;
726 /* TODO: allocate all DMA memory in one buffer */
727 /* Note: don't pass device,
728 it require coherent_dma_mask id device definition */
729 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
730 &tsif_device->data_buffer_dma, GFP_KERNEL);
731 if (!tsif_device->data_buffer)
732 goto err;
733 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
734 tsif_device->data_buffer, tsif_device->data_buffer_dma);
735 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
736 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
737 tsif_device->ri = 0;
738 tsif_device->wi = 0;
739 tsif_device->dmwi = 0;
740 for (i = 0; i < 2; i++) {
741 dmov_box *box;
742 struct msm_dmov_cmd *hdr;
743 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
744 sizeof(struct tsif_dmov_cmd),
745 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
746 if (!tsif_device->dmov_cmd[i])
747 goto err;
748 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
749 i, tsif_device->dmov_cmd[i],
750 tsif_device->dmov_cmd_dma[i]);
751 /* dst in 16 LSB, src in 16 MSB */
752 box = &(tsif_device->dmov_cmd[i]->box);
753 box->cmd = CMD_MODE_BOX | CMD_LC |
754 CMD_SRC_CRCI(tsif_device->crci);
755 box->src_row_addr =
756 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
757 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
758 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
759 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
760
761 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
762 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
763 offsetof(struct tsif_dmov_cmd, box));
764 tsif_device->xfer[i].tsif_device = tsif_device;
765 hdr = &tsif_device->xfer[i].hdr;
766 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
767 offsetof(struct tsif_dmov_cmd, box_ptr));
768 hdr->complete_func = tsif_dmov_complete_func;
769 }
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700770 msm_dmov_flush(tsif_device->dma, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 return 0;
772err:
773 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
774 tsif_dma_exit(tsif_device);
775 return -ENOMEM;
776}
777
778/* ===DMA end=== */
779
780/* ===IRQ begin=== */
781
782static irqreturn_t tsif_irq(int irq, void *dev_id)
783{
784 struct msm_tsif_device *tsif_device = dev_id;
785 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
786 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
787 TSIF_STS_CTL_OVERFLOW |
788 TSIF_STS_CTL_LOST_SYNC |
789 TSIF_STS_CTL_TIMEOUT))) {
790 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
791 return IRQ_NONE;
792 }
793 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
794 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
795 tsif_device->stat_rx++;
796 }
797 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
798 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
799 tsif_device->stat_overflow++;
800 }
801 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
802 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
803 tsif_device->stat_lost_sync++;
804 }
805 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
806 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
807 tsif_device->stat_timeout++;
808 }
809 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
810 wmb();
811 return IRQ_HANDLED;
812}
813
814/* ===IRQ end=== */
815
816/* ===Device attributes begin=== */
817
818static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
819 char *buf)
820{
821 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
822 char *state_string;
823 switch (tsif_device->state) {
824 case tsif_state_stopped:
825 state_string = "stopped";
826 break;
827 case tsif_state_running:
828 state_string = "running";
829 break;
830 case tsif_state_flushing:
831 state_string = "flushing";
832 break;
833 default:
834 state_string = "???";
835 }
836 return snprintf(buf, PAGE_SIZE,
837 "Device %s\n"
838 "Mode = %d\n"
839 "Time limit = %d\n"
840 "State %s\n"
841 "Client = %p\n"
842 "Pkt/Buf = %d\n"
843 "Pkt/chunk = %d\n"
844 "--statistics--\n"
845 "Rx chunks = %d\n"
846 "Overflow = %d\n"
847 "Lost sync = %d\n"
848 "Timeout = %d\n"
849 "DMA error = %d\n"
850 "Soft drop = %d\n"
851 "IFI = %d\n"
852 "(0x%08x - 0x%08x) / %d\n"
853 "--debug--\n"
854 "GLBL_CLK_ENA = 0x%08x\n"
855 "ROW_RESET = 0x%08x\n"
856 "CLK_HALT_STATEB = 0x%08x\n"
857 "TV_NS_REG = 0x%08x\n"
858 "TSIF_NS_REG = 0x%08x\n",
859 dev_name(dev),
860 tsif_device->mode,
861 tsif_device->time_limit,
862 state_string,
863 tsif_device->client_data,
864 TSIF_PKTS_IN_BUF,
865 TSIF_PKTS_IN_CHUNK,
866 tsif_device->stat_rx,
867 tsif_device->stat_overflow,
868 tsif_device->stat_lost_sync,
869 tsif_device->stat_timeout,
870 tsif_device->stat_dmov_err,
871 tsif_device->stat_soft_drop,
872 tsif_device->stat_ifi,
873 tsif_device->stat1,
874 tsif_device->stat0,
875 TSIF_PKTS_IN_CHUNK - 1,
876 ioread32(GLBL_CLK_ENA),
877 ioread32(ROW_RESET),
878 ioread32(CLK_HALT_STATEB),
879 ioread32(TV_NS_REG),
880 ioread32(TSIF_NS_REG)
881 );
882}
883/**
884 * set_stats - reset statistics on write
885 *
886 * @dev:
887 * @attr:
888 * @buf:
889 * @count:
890 */
891static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
892 const char *buf, size_t count)
893{
894 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
895 tsif_device->stat_rx = 0;
896 tsif_device->stat_overflow = 0;
897 tsif_device->stat_lost_sync = 0;
898 tsif_device->stat_timeout = 0;
899 tsif_device->stat_dmov_err = 0;
900 tsif_device->stat_soft_drop = 0;
901 tsif_device->stat_ifi = 0;
902 return count;
903}
904static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
905
906static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
907 char *buf)
908{
909 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
910 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
911}
912
913static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
914 const char *buf, size_t count)
915{
916 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
917 int value;
918 int rc;
919 if (1 != sscanf(buf, "%d", &value)) {
920 dev_err(&tsif_device->pdev->dev,
921 "Failed to parse integer: <%s>\n", buf);
922 return -EINVAL;
923 }
924 rc = tsif_set_mode(tsif_device, value);
925 if (!rc)
926 rc = count;
927 return rc;
928}
929static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
930
931static ssize_t show_time_limit(struct device *dev,
932 struct device_attribute *attr,
933 char *buf)
934{
935 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
936 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
937}
938
939static ssize_t set_time_limit(struct device *dev,
940 struct device_attribute *attr,
941 const char *buf, size_t count)
942{
943 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
944 int value;
945 int rc;
946 if (1 != sscanf(buf, "%d", &value)) {
947 dev_err(&tsif_device->pdev->dev,
948 "Failed to parse integer: <%s>\n", buf);
949 return -EINVAL;
950 }
951 rc = tsif_set_time_limit(tsif_device, value);
952 if (!rc)
953 rc = count;
954 return rc;
955}
956static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
957 show_time_limit, set_time_limit);
958
959static ssize_t show_buf_config(struct device *dev,
960 struct device_attribute *attr,
961 char *buf)
962{
963 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
964 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
965 tsif_device->pkts_per_chunk,
966 tsif_device->chunks_per_buf);
967}
968
969static ssize_t set_buf_config(struct device *dev,
970 struct device_attribute *attr,
971 const char *buf, size_t count)
972{
973 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
974 u32 p, c;
975 int rc;
976 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
977 dev_err(&tsif_device->pdev->dev,
978 "Failed to parse integer: <%s>\n", buf);
979 return -EINVAL;
980 }
981 rc = tsif_set_buf_config(tsif_device, p, c);
982 if (!rc)
983 rc = count;
984 return rc;
985}
986static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
987 show_buf_config, set_buf_config);
988
989static struct attribute *dev_attrs[] = {
990 &dev_attr_stats.attr,
991 &dev_attr_mode.attr,
992 &dev_attr_time_limit.attr,
993 &dev_attr_buf_config.attr,
994 NULL,
995};
996static struct attribute_group dev_attr_grp = {
997 .attrs = dev_attrs,
998};
999/* ===Device attributes end=== */
1000
1001/* ===debugfs begin=== */
1002
1003static int debugfs_iomem_x32_set(void *data, u64 val)
1004{
1005 iowrite32(val, data);
1006 wmb();
1007 return 0;
1008}
1009
1010static int debugfs_iomem_x32_get(void *data, u64 *val)
1011{
1012 *val = ioread32(data);
1013 return 0;
1014}
1015
1016DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1017 debugfs_iomem_x32_set, "0x%08llx\n");
1018
1019struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1020 struct dentry *parent, u32 *value)
1021{
1022 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1023}
1024
1025static int action_open(struct msm_tsif_device *tsif_device)
1026{
1027 int rc = -EINVAL;
1028 int result;
1029
1030 struct msm_tsif_platform_data *pdata =
1031 tsif_device->pdev->dev.platform_data;
1032 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1033 if (tsif_device->state != tsif_state_stopped)
1034 return -EAGAIN;
1035 rc = tsif_dma_init(tsif_device);
1036 if (rc) {
1037 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1038 return rc;
1039 }
1040 tsif_device->state = tsif_state_running;
Joel Nider951b2832012-05-07 21:13:38 +03001041
1042 /* make sure the GPIO's are set up */
1043 rc = tsif_start_gpios(tsif_device);
1044 if (rc) {
1045 dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
1046 tsif_dma_exit(tsif_device);
1047 return rc;
1048 }
1049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 /*
1051 * DMA should be scheduled prior to TSIF hardware initialization,
1052 * otherwise "bus error" will be reported by Data Mover
1053 */
1054 enable_irq(tsif_device->irq);
1055 tsif_clock(tsif_device, 1);
1056 tsif_dma_schedule(tsif_device);
1057 /*
1058 * init the device if required
1059 */
1060 if (pdata->init)
1061 pdata->init(pdata);
1062 rc = tsif_start_hw(tsif_device);
1063 if (rc) {
1064 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
Joel Nider951b2832012-05-07 21:13:38 +03001065 tsif_stop_gpios(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 tsif_dma_exit(tsif_device);
1067 tsif_clock(tsif_device, 0);
1068 return rc;
1069 }
1070
1071 result = pm_runtime_get(&tsif_device->pdev->dev);
1072 if (result < 0) {
1073 dev_err(&tsif_device->pdev->dev,
1074 "Runtime PM: Unable to wake up the device, rc = %d\n",
1075 result);
1076 return result;
1077 }
1078
1079 wake_lock(&tsif_device->wake_lock);
1080 return rc;
1081}
1082
1083static int action_close(struct msm_tsif_device *tsif_device)
1084{
1085 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1086 (int)tsif_device->state);
Joel Nider951b2832012-05-07 21:13:38 +03001087
1088 /* turn off the GPIO's to prevent new data from entering */
1089 tsif_stop_gpios(tsif_device);
1090
1091 /* we unfortunately must sleep here to give the ADM time to
1092 * complete any outstanding reads after the GPIO's are turned
1093 * off. There is no indication from the ADM hardware that
1094 * there are any outstanding reads on the bus, and if we
1095 * stop the TSIF too quickly, it can cause a bus error.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 */
Joel Nider951b2832012-05-07 21:13:38 +03001097 msleep(100);
1098
1099 /* now we can stop the core */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100 tsif_stop_hw(tsif_device);
1101 tsif_dma_exit(tsif_device);
1102 tsif_clock(tsif_device, 0);
1103 disable_irq(tsif_device->irq);
1104
1105 pm_runtime_put(&tsif_device->pdev->dev);
1106 wake_unlock(&tsif_device->wake_lock);
1107 return 0;
1108}
1109
1110
1111static struct {
1112 int (*func)(struct msm_tsif_device *);
1113 const char *name;
1114} actions[] = {
1115 { action_open, "open"},
1116 { action_close, "close"},
1117};
1118
1119static ssize_t tsif_debugfs_action_write(struct file *filp,
1120 const char __user *userbuf,
1121 size_t count, loff_t *f_pos)
1122{
1123 int i;
1124 struct msm_tsif_device *tsif_device = filp->private_data;
1125 char s[40];
1126 int len = min(sizeof(s) - 1, count);
1127 if (copy_from_user(s, userbuf, len))
1128 return -EFAULT;
1129 s[len] = '\0';
1130 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1131 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1132 if (!strncmp(s, actions[i].name,
1133 min(count, strlen(actions[i].name)))) {
1134 int rc = actions[i].func(tsif_device);
1135 if (!rc)
1136 rc = count;
1137 return rc;
1138 }
1139 }
1140 return -EINVAL;
1141}
1142
1143static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1144{
1145 filp->private_data = inode->i_private;
1146 return 0;
1147}
1148
1149static const struct file_operations fops_debugfs_action = {
1150 .open = tsif_debugfs_generic_open,
1151 .write = tsif_debugfs_action_write,
1152};
1153
1154static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1155 size_t count, loff_t *f_pos)
1156{
1157 static char bufa[200];
1158 static char *buf = bufa;
1159 int sz = sizeof(bufa);
1160 struct msm_tsif_device *tsif_device = filp->private_data;
1161 int len = 0;
1162 if (tsif_device) {
1163 int i;
1164 len += snprintf(buf + len, sz - len,
1165 "ri %3d | wi %3d | dmwi %3d |",
1166 tsif_device->ri, tsif_device->wi,
1167 tsif_device->dmwi);
1168 for (i = 0; i < 2; i++) {
1169 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1170 if (xfer->busy) {
1171 u32 dst =
1172 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1173 u32 base = tsif_device->data_buffer_dma;
1174 int w = (dst - base) / TSIF_PKT_SIZE;
1175 len += snprintf(buf + len, sz - len,
1176 " [%3d]{%3d}",
1177 w, xfer->wi);
1178 } else {
1179 len += snprintf(buf + len, sz - len,
1180 " ---idle---");
1181 }
1182 }
1183 len += snprintf(buf + len, sz - len, "\n");
1184 } else {
1185 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1186 }
1187 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1188}
1189
1190static const struct file_operations fops_debugfs_dma = {
1191 .open = tsif_debugfs_generic_open,
1192 .read = tsif_debugfs_dma_read,
1193};
1194
1195static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1196 size_t count, loff_t *f_pos)
1197{
1198 static char bufa[300];
1199 static char *buf = bufa;
1200 int sz = sizeof(bufa);
1201 struct msm_tsif_device *tsif_device = filp->private_data;
1202 int len = 0;
1203 if (tsif_device) {
1204 struct msm_tsif_platform_data *pdata =
1205 tsif_device->pdev->dev.platform_data;
1206 int i;
1207 for (i = 0; i < pdata->num_gpios; i++) {
1208 if (pdata->gpios[i].gpio_cfg) {
1209 int x = !!gpio_get_value(GPIO_PIN(
1210 pdata->gpios[i].gpio_cfg));
1211 len += snprintf(buf + len, sz - len,
1212 "%15s: %d\n",
1213 pdata->gpios[i].label, x);
1214 }
1215 }
1216 } else {
1217 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1218 }
1219 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1220}
1221
1222static const struct file_operations fops_debugfs_gpios = {
1223 .open = tsif_debugfs_generic_open,
1224 .read = tsif_debugfs_gpios_read,
1225};
1226
1227
1228static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1229{
1230 tsif_device->dent_tsif = debugfs_create_dir(
1231 dev_name(&tsif_device->pdev->dev), NULL);
1232 if (tsif_device->dent_tsif) {
1233 int i;
1234 void __iomem *base = tsif_device->base;
1235 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1236 tsif_device->debugfs_tsif_regs[i] =
1237 debugfs_create_iomem_x32(
1238 debugfs_tsif_regs[i].name,
1239 debugfs_tsif_regs[i].mode,
1240 tsif_device->dent_tsif,
1241 base + debugfs_tsif_regs[i].offset);
1242 }
1243 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1244 S_IRUGO,
1245 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1246 tsif_device->debugfs_action = debugfs_create_file("action",
1247 S_IWUSR,
1248 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1249 tsif_device->debugfs_dma = debugfs_create_file("dma",
1250 S_IRUGO,
1251 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1252 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1253 S_IRUGO,
1254 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1255 }
1256}
1257
1258static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1259{
1260 if (tsif_device->dent_tsif) {
1261 int i;
1262 debugfs_remove_recursive(tsif_device->dent_tsif);
1263 tsif_device->dent_tsif = NULL;
1264 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1265 tsif_device->debugfs_tsif_regs[i] = NULL;
1266 tsif_device->debugfs_gpio = NULL;
1267 tsif_device->debugfs_action = NULL;
1268 tsif_device->debugfs_dma = NULL;
1269 tsif_device->debugfs_databuf = NULL;
1270 }
1271}
1272/* ===debugfs end=== */
1273
1274/* ===module begin=== */
1275static LIST_HEAD(tsif_devices);
1276
1277static struct msm_tsif_device *tsif_find_by_id(int id)
1278{
1279 struct msm_tsif_device *tsif_device;
1280 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1281 if (tsif_device->pdev->id == id)
1282 return tsif_device;
1283 }
1284 return NULL;
1285}
1286
1287static int __devinit msm_tsif_probe(struct platform_device *pdev)
1288{
1289 int rc = -ENODEV;
1290 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1291 struct msm_tsif_device *tsif_device;
1292 struct resource *res;
1293 /* check device validity */
1294 /* must have platform data */
1295 if (!plat) {
1296 dev_err(&pdev->dev, "Platform data not available\n");
1297 rc = -EINVAL;
1298 goto out;
1299 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001300
1301 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1303 rc = -EINVAL;
1304 goto out;
1305 }
1306 /* OK, we will use this device */
1307 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1308 if (!tsif_device) {
1309 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1310 rc = -ENOMEM;
1311 goto out;
1312 }
1313 /* cross links */
1314 tsif_device->pdev = pdev;
1315 platform_set_drvdata(pdev, tsif_device);
1316 tsif_device->mode = 1;
1317 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1318 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1319 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1320 (unsigned long)tsif_device);
Joel Nider6682b382012-07-03 13:59:27 +03001321 tasklet_init(&tsif_device->clocks_off, tsif_clocks_off,
1322 (unsigned long)tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 if (tsif_get_clocks(tsif_device))
1324 goto err_clocks;
1325/* map I/O memory */
1326 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1327 if (!tsif_device->memres) {
1328 dev_err(&pdev->dev, "Missing MEM resource\n");
1329 rc = -ENXIO;
1330 goto err_rgn;
1331 }
1332 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1333 if (!res) {
1334 dev_err(&pdev->dev, "Missing DMA resource\n");
1335 rc = -ENXIO;
1336 goto err_rgn;
1337 }
1338 tsif_device->dma = res->start;
1339 tsif_device->crci = res->end;
1340 tsif_device->base = ioremap(tsif_device->memres->start,
1341 resource_size(tsif_device->memres));
1342 if (!tsif_device->base) {
1343 dev_err(&pdev->dev, "ioremap failed\n");
1344 goto err_ioremap;
1345 }
1346 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1347 tsif_device->memres->start, tsif_device->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348
1349 pm_runtime_set_active(&pdev->dev);
1350 pm_runtime_enable(&pdev->dev);
1351
1352 tsif_debugfs_init(tsif_device);
1353 rc = platform_get_irq(pdev, 0);
1354 if (rc > 0) {
1355 tsif_device->irq = rc;
1356 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1357 dev_name(&pdev->dev), tsif_device);
1358 disable_irq(tsif_device->irq);
1359 }
1360 if (rc) {
1361 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1362 tsif_device->irq, rc);
1363 goto err_irq;
1364 }
1365 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1366 if (rc) {
1367 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1368 goto err_attrs;
1369 }
1370 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1371 dev_name(&pdev->dev));
1372 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1373 tsif_device->irq, tsif_device->memres->start,
1374 tsif_device->dma, tsif_device->crci);
1375 list_add(&tsif_device->devlist, &tsif_devices);
1376 return 0;
1377/* error path */
1378 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1379err_attrs:
1380 free_irq(tsif_device->irq, tsif_device);
1381err_irq:
1382 tsif_debugfs_exit(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383 iounmap(tsif_device->base);
1384err_ioremap:
1385err_rgn:
1386 tsif_put_clocks(tsif_device);
1387err_clocks:
1388 kfree(tsif_device);
1389out:
1390 return rc;
1391}
1392
1393static int __devexit msm_tsif_remove(struct platform_device *pdev)
1394{
1395 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1396 dev_info(&pdev->dev, "Unload\n");
1397 list_del(&tsif_device->devlist);
1398 wake_lock_destroy(&tsif_device->wake_lock);
1399 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1400 free_irq(tsif_device->irq, tsif_device);
1401 tsif_debugfs_exit(tsif_device);
1402 tsif_dma_exit(tsif_device);
1403 tsif_stop_gpios(tsif_device);
1404 iounmap(tsif_device->base);
1405 tsif_put_clocks(tsif_device);
1406
1407 pm_runtime_put(&pdev->dev);
1408 pm_runtime_disable(&pdev->dev);
1409 kfree(tsif_device);
1410 return 0;
1411}
1412
1413static int tsif_runtime_suspend(struct device *dev)
1414{
1415 dev_dbg(dev, "pm_runtime: suspending...\n");
1416 return 0;
1417}
1418
1419static int tsif_runtime_resume(struct device *dev)
1420{
1421 dev_dbg(dev, "pm_runtime: resuming...\n");
1422 return 0;
1423}
1424
1425static const struct dev_pm_ops tsif_dev_pm_ops = {
1426 .runtime_suspend = tsif_runtime_suspend,
1427 .runtime_resume = tsif_runtime_resume,
1428};
1429
1430
1431static struct platform_driver msm_tsif_driver = {
1432 .probe = msm_tsif_probe,
1433 .remove = __exit_p(msm_tsif_remove),
1434 .driver = {
1435 .name = "msm_tsif",
1436 .pm = &tsif_dev_pm_ops,
1437 },
1438};
1439
1440static int __init mod_init(void)
1441{
1442 int rc = platform_driver_register(&msm_tsif_driver);
1443 if (rc)
1444 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1445 return rc;
1446}
1447
1448static void __exit mod_exit(void)
1449{
1450 platform_driver_unregister(&msm_tsif_driver);
1451}
1452/* ===module end=== */
1453
1454/* public API */
1455
Joel Nider5578bdb2011-08-12 09:37:11 +03001456int tsif_get_active(void)
1457{
1458 struct msm_tsif_device *tsif_device;
1459 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1460 return tsif_device->pdev->id;
1461 }
1462 return -ENODEV;
1463}
1464EXPORT_SYMBOL(tsif_get_active);
1465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1467{
1468 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001469 if (!tsif_device)
1470 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 if (tsif_device->client_notify || tsif_device->client_data)
1472 return ERR_PTR(-EBUSY);
1473 tsif_device->client_notify = notify;
1474 tsif_device->client_data = data;
1475 /* prevent from unloading */
1476 get_device(&tsif_device->pdev->dev);
1477 return tsif_device;
1478}
1479EXPORT_SYMBOL(tsif_attach);
1480
1481void tsif_detach(void *cookie)
1482{
1483 struct msm_tsif_device *tsif_device = cookie;
1484 tsif_device->client_notify = NULL;
1485 tsif_device->client_data = NULL;
1486 put_device(&tsif_device->pdev->dev);
1487}
1488EXPORT_SYMBOL(tsif_detach);
1489
1490void tsif_get_info(void *cookie, void **pdata, int *psize)
1491{
1492 struct msm_tsif_device *tsif_device = cookie;
1493 if (pdata)
1494 *pdata = tsif_device->data_buffer;
1495 if (psize)
1496 *psize = TSIF_PKTS_IN_BUF;
1497}
1498EXPORT_SYMBOL(tsif_get_info);
1499
1500int tsif_set_mode(void *cookie, int mode)
1501{
1502 struct msm_tsif_device *tsif_device = cookie;
1503 if (tsif_device->state != tsif_state_stopped) {
1504 dev_err(&tsif_device->pdev->dev,
1505 "Can't change mode while device is active\n");
1506 return -EBUSY;
1507 }
1508 switch (mode) {
1509 case 1:
1510 case 2:
1511 case 3:
1512 tsif_device->mode = mode;
1513 break;
1514 default:
1515 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1516 return -EINVAL;
1517 }
1518 return 0;
1519}
1520EXPORT_SYMBOL(tsif_set_mode);
1521
1522int tsif_set_time_limit(void *cookie, u32 value)
1523{
1524 struct msm_tsif_device *tsif_device = cookie;
1525 if (tsif_device->state != tsif_state_stopped) {
1526 dev_err(&tsif_device->pdev->dev,
1527 "Can't change time limit while device is active\n");
1528 return -EBUSY;
1529 }
1530 if (value != (value & 0xFFFFFF)) {
1531 dev_err(&tsif_device->pdev->dev,
1532 "Invalid time limit (should be 24 bit): %#x\n", value);
1533 return -EINVAL;
1534 }
1535 tsif_device->time_limit = value;
1536 return 0;
1537}
1538EXPORT_SYMBOL(tsif_set_time_limit);
1539
1540int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1541{
1542 struct msm_tsif_device *tsif_device = cookie;
1543 if (tsif_device->data_buffer) {
1544 dev_err(&tsif_device->pdev->dev,
1545 "Data buffer already allocated: %p\n",
1546 tsif_device->data_buffer);
1547 return -EBUSY;
1548 }
1549 /* check for crazy user */
1550 if (pkts_in_chunk * chunks_in_buf > 10240) {
1551 dev_err(&tsif_device->pdev->dev,
1552 "Buffer requested is too large: %d * %d\n",
1553 pkts_in_chunk,
1554 chunks_in_buf);
1555 return -EINVAL;
1556 }
1557 /* parameters are OK, execute */
1558 tsif_device->pkts_per_chunk = pkts_in_chunk;
1559 tsif_device->chunks_per_buf = chunks_in_buf;
1560 return 0;
1561}
1562EXPORT_SYMBOL(tsif_set_buf_config);
1563
1564void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1565{
1566 struct msm_tsif_device *tsif_device = cookie;
1567 if (ri)
1568 *ri = tsif_device->ri;
1569 if (wi)
1570 *wi = tsif_device->wi;
1571 if (state)
1572 *state = tsif_device->state;
1573}
1574EXPORT_SYMBOL(tsif_get_state);
1575
1576int tsif_start(void *cookie)
1577{
1578 struct msm_tsif_device *tsif_device = cookie;
1579 return action_open(tsif_device);
1580}
1581EXPORT_SYMBOL(tsif_start);
1582
1583void tsif_stop(void *cookie)
1584{
1585 struct msm_tsif_device *tsif_device = cookie;
1586 action_close(tsif_device);
1587}
1588EXPORT_SYMBOL(tsif_stop);
1589
1590void tsif_reclaim_packets(void *cookie, int read_index)
1591{
1592 struct msm_tsif_device *tsif_device = cookie;
1593 tsif_device->ri = read_index;
1594}
1595EXPORT_SYMBOL(tsif_reclaim_packets);
1596
1597module_init(mod_init);
1598module_exit(mod_exit);
1599
1600MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1601 " Driver for the MSM chipset");
1602MODULE_LICENSE("GPL v2");
1603