blob: aeda38c467058f87857339c5ad9f0709b8966241 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
Steve Mucklef132c6c2012-06-06 18:30:57 -070034#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036#include <mach/dma.h>
37#include <mach/msm_tsif.h>
38
39/*
40 * TSIF register offsets
41 */
42#define TSIF_STS_CTL_OFF (0x0)
43#define TSIF_TIME_LIMIT_OFF (0x4)
44#define TSIF_CLK_REF_OFF (0x8)
45#define TSIF_LPBK_FLAGS_OFF (0xc)
46#define TSIF_LPBK_DATA_OFF (0x10)
47#define TSIF_TEST_CTL_OFF (0x14)
48#define TSIF_TEST_MODE_OFF (0x18)
49#define TSIF_TEST_RESET_OFF (0x1c)
50#define TSIF_TEST_EXPORT_OFF (0x20)
51#define TSIF_TEST_CURRENT_OFF (0x24)
52
53#define TSIF_DATA_PORT_OFF (0x100)
54
55/* bits for TSIF_STS_CTL register */
56#define TSIF_STS_CTL_EN_IRQ (1 << 28)
57#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
58#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
59#define TSIF_STS_CTL_OVERFLOW (1 << 25)
60#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
61#define TSIF_STS_CTL_TIMEOUT (1 << 23)
62#define TSIF_STS_CTL_INV_SYNC (1 << 21)
63#define TSIF_STS_CTL_INV_NULL (1 << 20)
64#define TSIF_STS_CTL_INV_ERROR (1 << 19)
65#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
66#define TSIF_STS_CTL_INV_DATA (1 << 17)
67#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
68#define TSIF_STS_CTL_SPARE (1 << 15)
69#define TSIF_STS_CTL_EN_NULL (1 << 11)
70#define TSIF_STS_CTL_EN_ERROR (1 << 10)
71#define TSIF_STS_CTL_LAST_BIT (1 << 9)
72#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
73#define TSIF_STS_CTL_EN_TCR (1 << 7)
74#define TSIF_STS_CTL_TEST_MODE (3 << 5)
75#define TSIF_STS_CTL_EN_DM (1 << 4)
76#define TSIF_STS_CTL_STOP (1 << 3)
77#define TSIF_STS_CTL_START (1 << 0)
78
79/*
80 * Data buffering parameters
81 *
82 * Data stored in cyclic buffer;
83 *
84 * Data organized in chunks of packets.
85 * One chunk processed at a time by the data mover
86 *
87 */
88#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
89#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
90#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
91#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
92#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
93#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030094#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
97#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
98#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
99#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
100#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
101
102/* used to create debugfs entries */
103static const struct {
104 const char *name;
105 mode_t mode;
106 int offset;
107} debugfs_tsif_regs[] = {
108 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
109 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
110 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
111 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
112 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
113 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
114 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
115 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
116 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
117 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
118 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
119};
120
121/* structures for Data Mover */
122struct tsif_dmov_cmd {
123 dmov_box box;
124 dma_addr_t box_ptr;
125};
126
127struct msm_tsif_device;
128
129struct tsif_xfer {
130 struct msm_dmov_cmd hdr;
131 struct msm_tsif_device *tsif_device;
132 int busy;
133 int wi; /**< set devices's write index after xfer */
134};
135
136struct msm_tsif_device {
137 struct list_head devlist;
138 struct platform_device *pdev;
139 struct resource *memres;
140 void __iomem *base;
141 unsigned int irq;
142 int mode;
143 u32 time_limit;
144 enum tsif_state state;
145 struct wake_lock wake_lock;
146 /* clocks */
147 struct clk *tsif_clk;
148 struct clk *tsif_pclk;
149 struct clk *tsif_ref_clk;
150 /* debugfs */
151 struct dentry *dent_tsif;
152 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
153 struct dentry *debugfs_gpio;
154 struct dentry *debugfs_action;
155 struct dentry *debugfs_dma;
156 struct dentry *debugfs_databuf;
157 struct debugfs_blob_wrapper blob_wrapper_databuf;
158 /* DMA related */
159 int dma;
160 int crci;
161 void *data_buffer;
162 dma_addr_t data_buffer_dma;
163 u32 pkts_per_chunk;
164 u32 chunks_per_buf;
165 int ri;
166 int wi;
167 int dmwi; /**< DataMover write index */
168 struct tsif_dmov_cmd *dmov_cmd[2];
169 dma_addr_t dmov_cmd_dma[2];
170 struct tsif_xfer xfer[2];
171 struct tasklet_struct dma_refill;
172 /* statistics */
173 u32 stat_rx;
174 u32 stat_overflow;
175 u32 stat_lost_sync;
176 u32 stat_timeout;
177 u32 stat_dmov_err;
178 u32 stat_soft_drop;
179 int stat_ifi; /* inter frame interval */
180 u32 stat0, stat1;
181 /* client */
182 void *client_data;
183 void (*client_notify)(void *client_data);
184};
185
186/* ===clocks begin=== */
187
188static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
189{
190 if (tsif_device->tsif_clk) {
191 clk_put(tsif_device->tsif_clk);
192 tsif_device->tsif_clk = NULL;
193 }
194 if (tsif_device->tsif_pclk) {
195 clk_put(tsif_device->tsif_pclk);
196 tsif_device->tsif_pclk = NULL;
197 }
198
199 if (tsif_device->tsif_ref_clk) {
200 clk_put(tsif_device->tsif_ref_clk);
201 tsif_device->tsif_ref_clk = NULL;
202 }
203}
204
205static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
206{
207 struct msm_tsif_platform_data *pdata =
208 tsif_device->pdev->dev.platform_data;
209 int rc = 0;
210
211 if (pdata->tsif_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700212 tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
213 pdata->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 if (IS_ERR(tsif_device->tsif_clk)) {
215 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
216 pdata->tsif_clk);
217 rc = PTR_ERR(tsif_device->tsif_clk);
218 tsif_device->tsif_clk = NULL;
219 goto ret;
220 }
221 }
222 if (pdata->tsif_pclk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700223 tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
224 pdata->tsif_pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 if (IS_ERR(tsif_device->tsif_pclk)) {
226 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
227 pdata->tsif_pclk);
228 rc = PTR_ERR(tsif_device->tsif_pclk);
229 tsif_device->tsif_pclk = NULL;
230 goto ret;
231 }
232 }
233 if (pdata->tsif_ref_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700234 tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
235 pdata->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 if (IS_ERR(tsif_device->tsif_ref_clk)) {
237 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
238 pdata->tsif_ref_clk);
239 rc = PTR_ERR(tsif_device->tsif_ref_clk);
240 tsif_device->tsif_ref_clk = NULL;
241 goto ret;
242 }
243 }
244 return 0;
245ret:
246 tsif_put_clocks(tsif_device);
247 return rc;
248}
249
250static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
251{
252 if (on) {
253 if (tsif_device->tsif_clk)
254 clk_enable(tsif_device->tsif_clk);
255 if (tsif_device->tsif_pclk)
256 clk_enable(tsif_device->tsif_pclk);
257 clk_enable(tsif_device->tsif_ref_clk);
258 } else {
259 if (tsif_device->tsif_clk)
260 clk_disable(tsif_device->tsif_clk);
261 if (tsif_device->tsif_pclk)
262 clk_disable(tsif_device->tsif_pclk);
263 clk_disable(tsif_device->tsif_ref_clk);
264 }
265}
266/* ===clocks end=== */
267/* ===gpio begin=== */
268
269static void tsif_gpios_free(const struct msm_gpio *table, int size)
270{
271 int i;
272 const struct msm_gpio *g;
273 for (i = size-1; i >= 0; i--) {
274 g = table + i;
275 gpio_free(GPIO_PIN(g->gpio_cfg));
276 }
277}
278
279static int tsif_gpios_request(const struct msm_gpio *table, int size)
280{
281 int rc;
282 int i;
283 const struct msm_gpio *g;
284 for (i = 0; i < size; i++) {
285 g = table + i;
286 rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
287 if (rc) {
288 pr_err("gpio_request(%d) <%s> failed: %d\n",
289 GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
290 goto err;
291 }
292 }
293 return 0;
294err:
295 tsif_gpios_free(table, i);
296 return rc;
297}
298
299static int tsif_gpios_disable(const struct msm_gpio *table, int size)
300{
301 int rc = 0;
302 int i;
303 const struct msm_gpio *g;
304 for (i = size-1; i >= 0; i--) {
305 int tmp;
306 g = table + i;
Joel Nider951b2832012-05-07 21:13:38 +0300307 tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
308 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
309 GPIO_CFG_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 if (tmp) {
311 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
312 " <%s> failed: %d\n",
313 g->gpio_cfg, g->label ?: "?", rc);
314 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
315 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
316 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
317 GPIO_DRVSTR(g->gpio_cfg));
318 if (!rc)
319 rc = tmp;
320 }
321 }
322
323 return rc;
324}
325
326static int tsif_gpios_enable(const struct msm_gpio *table, int size)
327{
328 int rc;
329 int i;
330 const struct msm_gpio *g;
331 for (i = 0; i < size; i++) {
332 g = table + i;
333 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
334 if (rc) {
335 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
336 " <%s> failed: %d\n",
337 g->gpio_cfg, g->label ?: "?", rc);
338 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
339 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
340 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
341 GPIO_DRVSTR(g->gpio_cfg));
342 goto err;
343 }
344 }
345 return 0;
346err:
347 tsif_gpios_disable(table, i);
348 return rc;
349}
350
351static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
352{
353 int rc = tsif_gpios_request(table, size);
354 if (rc)
355 return rc;
356 rc = tsif_gpios_enable(table, size);
357 if (rc)
358 tsif_gpios_free(table, size);
359 return rc;
360}
361
362static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
363{
364 tsif_gpios_disable(table, size);
365 tsif_gpios_free(table, size);
366}
367
368static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
369{
370 struct msm_tsif_platform_data *pdata =
371 tsif_device->pdev->dev.platform_data;
372 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
373}
374
375static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
376{
377 struct msm_tsif_platform_data *pdata =
378 tsif_device->pdev->dev.platform_data;
379 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
380}
381
382/* ===gpio end=== */
383
384static int tsif_start_hw(struct msm_tsif_device *tsif_device)
385{
386 u32 ctl = TSIF_STS_CTL_EN_IRQ |
387 TSIF_STS_CTL_EN_TIME_LIM |
388 TSIF_STS_CTL_EN_TCR |
389 TSIF_STS_CTL_EN_DM;
390 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
391 switch (tsif_device->mode) {
392 case 1: /* mode 1 */
393 ctl |= (0 << 5);
394 break;
395 case 2: /* mode 2 */
396 ctl |= (1 << 5);
397 break;
398 case 3: /* manual - control from debugfs */
399 return 0;
400 break;
401 default:
402 return -EINVAL;
403 }
404 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
405 iowrite32(tsif_device->time_limit,
406 tsif_device->base + TSIF_TIME_LIMIT_OFF);
407 wmb();
408 iowrite32(ctl | TSIF_STS_CTL_START,
409 tsif_device->base + TSIF_STS_CTL_OFF);
410 wmb();
411 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
412 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
413}
414
415static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
416{
417 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
418 wmb();
419}
420
421/* ===DMA begin=== */
422/**
423 * TSIF DMA theory of operation
424 *
425 * Circular memory buffer \a tsif_mem_buffer allocated;
426 * 4 pointers points to and moved forward on:
427 * - \a ri index of first ready to read packet.
428 * Updated by client's call to tsif_reclaim_packets()
429 * - \a wi points to the next packet to be written by DM.
430 * Data below is valid and will not be overriden by DMA.
431 * Moved on DM callback
432 * - \a dmwi points to the next packet not scheduled yet for DM
433 * moved when packet scheduled for DM
434 *
435 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
436 * at time immediately after scheduling.
437 *
438 * Initially, 2 packets get scheduled for the DM.
439 *
440 * Upon packet receive, DM writes packet to the pre-programmed
441 * location and invoke its callback.
442 *
443 * DM callback moves sets wi pointer to \a xfer->wi;
444 * then it schedules next packet for DM and moves \a dmwi pointer.
445 *
446 * Buffer overflow handling
447 *
448 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
449 * DMA re-scheduled to the same index.
450 * Callback check and not move \a wi to become equal to \a ri
451 *
452 * On \a read request, data between \a ri and \a wi pointers may be read;
453 * \ri pointer moved accordingly.
454 *
455 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
456 * \a wi is between [\a ri, \a dmwi]
457 *
458 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
459 *
460 * Number of scheduled packets for DM: (dmwi-wi)
461 */
462
463/**
464 * tsif_dma_schedule - schedule DMA transfers
465 *
466 * @tsif_device: device
467 *
468 * Executed from process context on init, or from tasklet when
469 * re-scheduling upon DMA completion.
470 * This prevent concurrent execution from several CPU's
471 */
472static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
473{
474 int i, dmwi0, dmwi1, found = 0;
475 /* find free entry */
476 for (i = 0; i < 2; i++) {
477 struct tsif_xfer *xfer = &tsif_device->xfer[i];
478 if (xfer->busy)
479 continue;
480 found++;
481 xfer->busy = 1;
482 dmwi0 = tsif_device->dmwi;
483 tsif_device->dmov_cmd[i]->box.dst_row_addr =
484 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
485 /* proposed value for dmwi */
486 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
487 /**
488 * If dmwi going to overlap with ri,
489 * overflow occurs because data was not read.
490 * Still get this packet, to not interrupt TSIF
491 * hardware, but do not advance dmwi.
492 *
493 * Upon receive, packet will be dropped.
494 */
495 if (dmwi1 != tsif_device->ri) {
496 tsif_device->dmwi = dmwi1;
497 } else {
498 dev_info(&tsif_device->pdev->dev,
499 "Overflow detected\n");
500 }
501 xfer->wi = tsif_device->dmwi;
502#ifdef CONFIG_TSIF_DEBUG
503 dev_info(&tsif_device->pdev->dev,
504 "schedule xfer[%d] -> [%2d]{%2d}\n",
505 i, dmwi0, xfer->wi);
506#endif
507 /* complete all the writes to box */
508 dma_coherent_pre_ops();
509 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
510 }
511 if (!found)
512 dev_info(&tsif_device->pdev->dev,
513 "All xfer entries are busy\n");
514}
515
516/**
517 * tsif_dmov_complete_func - DataMover completion callback
518 *
519 * @cmd: original DM command
520 * @result: DM result
521 * @err: optional error buffer
522 *
523 * Executed in IRQ context (Data Mover's IRQ)
524 * DataMover's spinlock @msm_dmov_lock held.
525 */
526static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
527 unsigned int result,
528 struct msm_dmov_errdata *err)
529{
530 int i;
531 u32 data_offset;
532 struct tsif_xfer *xfer;
533 struct msm_tsif_device *tsif_device;
534 int reschedule = 0;
535 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
536 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
537 return;
538 }
539 /* restore original context */
540 xfer = container_of(cmd, struct tsif_xfer, hdr);
541 tsif_device = xfer->tsif_device;
542 i = xfer - tsif_device->xfer;
543 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
544 tsif_device->data_buffer_dma;
545
546 /* order reads from the xferred buffer */
547 dma_coherent_post_ops();
548 if (result & DMOV_RSLT_DONE) {
549 int w = data_offset / TSIF_PKT_SIZE;
550 tsif_device->stat_rx++;
551 /*
552 * sowtware overflow when I was scheduled?
553 *
554 * @w is where this xfer was actually written to;
555 * @xfer->wi is where device's @wi will be set;
556 *
557 * if these 2 are equal, we are short in space and
558 * going to overwrite this xfer - this is "soft drop"
559 */
560 if (w == xfer->wi)
561 tsif_device->stat_soft_drop++;
562 reschedule = (tsif_device->state == tsif_state_running);
563#ifdef CONFIG_TSIF_DEBUG
564 /* IFI calculation */
565 /*
566 * update stat_ifi (inter frame interval)
567 *
568 * Calculate time difference between last and 1-st
569 * packets in chunk
570 *
571 * To be removed after tuning
572 */
573 if (TSIF_PKTS_IN_CHUNK > 1) {
574 void *ptr = tsif_device->data_buffer + data_offset;
575 u32 *p0 = ptr;
576 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
577 TSIF_PKT_SIZE;
578 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
579 tsif_pkt_status(p0));
580 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
581 tsif_pkt_status(p1));
582 tsif_device->stat_ifi = (tts1 - tts0) /
583 (TSIF_PKTS_IN_CHUNK - 1);
584 }
585#endif
586 } else {
587 /**
588 * Error or flush
589 *
590 * To recover - re-open TSIF device.
591 */
592 /* mark status "not valid" in data buffer */
593 int n;
594 void *ptr = tsif_device->data_buffer + data_offset;
595 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
596 u32 *p = ptr + (n * TSIF_PKT_SIZE);
597 /* last dword is status + TTS */
598 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
599 }
600 if (result & DMOV_RSLT_ERROR) {
601 dev_err(&tsif_device->pdev->dev,
602 "DMA error (0x%08x)\n", result);
603 tsif_device->stat_dmov_err++;
604 /* force device close */
605 if (tsif_device->state == tsif_state_running) {
606 tsif_stop_hw(tsif_device);
607 /*
608 * Clocks _may_ be stopped right from IRQ
609 * context. This is far from optimal w.r.t
610 * latency.
611 *
612 * But, this branch taken only in case of
613 * severe hardware problem (I don't even know
614 * what should happens for DMOV_RSLT_ERROR);
615 * thus I prefer code simplicity over
616 * performance.
617 */
618 tsif_clock(tsif_device, 0);
619 tsif_device->state = tsif_state_flushing;
620 }
621 }
622 if (result & DMOV_RSLT_FLUSH) {
623 /*
624 * Flushing normally happens in process of
625 * @tsif_stop(), when we are waiting for outstanding
626 * DMA commands to be flushed.
627 */
628 dev_info(&tsif_device->pdev->dev,
629 "DMA channel flushed (0x%08x)\n", result);
630 if (tsif_device->state == tsif_state_flushing) {
631 if ((!tsif_device->xfer[0].busy) &&
632 (!tsif_device->xfer[1].busy)) {
633 tsif_device->state = tsif_state_stopped;
634 }
635 }
636 }
637 if (err)
638 dev_err(&tsif_device->pdev->dev,
639 "Flush data: %08x %08x %08x %08x %08x %08x\n",
640 err->flush[0], err->flush[1], err->flush[2],
641 err->flush[3], err->flush[4], err->flush[5]);
642 }
643 tsif_device->wi = xfer->wi;
644 xfer->busy = 0;
645 if (tsif_device->client_notify)
646 tsif_device->client_notify(tsif_device->client_data);
647 /*
648 * Can't schedule next DMA -
649 * DataMover driver still hold its semaphore,
650 * deadlock will occur.
651 */
652 if (reschedule)
653 tasklet_schedule(&tsif_device->dma_refill);
654}
655
656/**
657 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
658 *
659 * @data: tsif_device
660 *
661 * Reschedule DMA requests
662 *
663 * Executed in tasklet
664 */
665static void tsif_dma_refill(unsigned long data)
666{
667 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
668 if (tsif_device->state == tsif_state_running)
669 tsif_dma_schedule(tsif_device);
670}
671
672/**
673 * tsif_dma_flush - flush DMA channel
674 *
675 * @tsif_device:
676 *
677 * busy wait till DMA flushed
678 */
679static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
680{
681 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
682 tsif_device->state = tsif_state_flushing;
683 while (tsif_device->xfer[0].busy ||
684 tsif_device->xfer[1].busy) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700685 msm_dmov_flush(tsif_device->dma, 1);
Joel Nider951b2832012-05-07 21:13:38 +0300686 usleep(10000);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687 }
688 }
689 tsif_device->state = tsif_state_stopped;
690 if (tsif_device->client_notify)
691 tsif_device->client_notify(tsif_device->client_data);
692}
693
694static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
695{
696 int i;
697 tsif_device->state = tsif_state_flushing;
698 tasklet_kill(&tsif_device->dma_refill);
699 tsif_dma_flush(tsif_device);
700 for (i = 0; i < 2; i++) {
701 if (tsif_device->dmov_cmd[i]) {
702 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
703 tsif_device->dmov_cmd[i],
704 tsif_device->dmov_cmd_dma[i]);
705 tsif_device->dmov_cmd[i] = NULL;
706 }
707 }
708 if (tsif_device->data_buffer) {
709 tsif_device->blob_wrapper_databuf.data = NULL;
710 tsif_device->blob_wrapper_databuf.size = 0;
711 dma_free_coherent(NULL, TSIF_BUF_SIZE,
712 tsif_device->data_buffer,
713 tsif_device->data_buffer_dma);
714 tsif_device->data_buffer = NULL;
715 }
716}
717
718static int tsif_dma_init(struct msm_tsif_device *tsif_device)
719{
720 int i;
721 /* TODO: allocate all DMA memory in one buffer */
722 /* Note: don't pass device,
723 it require coherent_dma_mask id device definition */
724 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
725 &tsif_device->data_buffer_dma, GFP_KERNEL);
726 if (!tsif_device->data_buffer)
727 goto err;
728 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
729 tsif_device->data_buffer, tsif_device->data_buffer_dma);
730 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
731 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
732 tsif_device->ri = 0;
733 tsif_device->wi = 0;
734 tsif_device->dmwi = 0;
735 for (i = 0; i < 2; i++) {
736 dmov_box *box;
737 struct msm_dmov_cmd *hdr;
738 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
739 sizeof(struct tsif_dmov_cmd),
740 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
741 if (!tsif_device->dmov_cmd[i])
742 goto err;
743 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
744 i, tsif_device->dmov_cmd[i],
745 tsif_device->dmov_cmd_dma[i]);
746 /* dst in 16 LSB, src in 16 MSB */
747 box = &(tsif_device->dmov_cmd[i]->box);
748 box->cmd = CMD_MODE_BOX | CMD_LC |
749 CMD_SRC_CRCI(tsif_device->crci);
750 box->src_row_addr =
751 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
752 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
753 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
754 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
755
756 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
757 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
758 offsetof(struct tsif_dmov_cmd, box));
759 tsif_device->xfer[i].tsif_device = tsif_device;
760 hdr = &tsif_device->xfer[i].hdr;
761 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
762 offsetof(struct tsif_dmov_cmd, box_ptr));
763 hdr->complete_func = tsif_dmov_complete_func;
764 }
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700765 msm_dmov_flush(tsif_device->dma, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 return 0;
767err:
768 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
769 tsif_dma_exit(tsif_device);
770 return -ENOMEM;
771}
772
773/* ===DMA end=== */
774
775/* ===IRQ begin=== */
776
777static irqreturn_t tsif_irq(int irq, void *dev_id)
778{
779 struct msm_tsif_device *tsif_device = dev_id;
780 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
781 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
782 TSIF_STS_CTL_OVERFLOW |
783 TSIF_STS_CTL_LOST_SYNC |
784 TSIF_STS_CTL_TIMEOUT))) {
785 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
786 return IRQ_NONE;
787 }
788 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
789 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
790 tsif_device->stat_rx++;
791 }
792 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
793 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
794 tsif_device->stat_overflow++;
795 }
796 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
797 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
798 tsif_device->stat_lost_sync++;
799 }
800 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
801 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
802 tsif_device->stat_timeout++;
803 }
804 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
805 wmb();
806 return IRQ_HANDLED;
807}
808
809/* ===IRQ end=== */
810
811/* ===Device attributes begin=== */
812
813static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
814 char *buf)
815{
816 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
817 char *state_string;
818 switch (tsif_device->state) {
819 case tsif_state_stopped:
820 state_string = "stopped";
821 break;
822 case tsif_state_running:
823 state_string = "running";
824 break;
825 case tsif_state_flushing:
826 state_string = "flushing";
827 break;
828 default:
829 state_string = "???";
830 }
831 return snprintf(buf, PAGE_SIZE,
832 "Device %s\n"
833 "Mode = %d\n"
834 "Time limit = %d\n"
835 "State %s\n"
836 "Client = %p\n"
837 "Pkt/Buf = %d\n"
838 "Pkt/chunk = %d\n"
839 "--statistics--\n"
840 "Rx chunks = %d\n"
841 "Overflow = %d\n"
842 "Lost sync = %d\n"
843 "Timeout = %d\n"
844 "DMA error = %d\n"
845 "Soft drop = %d\n"
846 "IFI = %d\n"
847 "(0x%08x - 0x%08x) / %d\n"
848 "--debug--\n"
849 "GLBL_CLK_ENA = 0x%08x\n"
850 "ROW_RESET = 0x%08x\n"
851 "CLK_HALT_STATEB = 0x%08x\n"
852 "TV_NS_REG = 0x%08x\n"
853 "TSIF_NS_REG = 0x%08x\n",
854 dev_name(dev),
855 tsif_device->mode,
856 tsif_device->time_limit,
857 state_string,
858 tsif_device->client_data,
859 TSIF_PKTS_IN_BUF,
860 TSIF_PKTS_IN_CHUNK,
861 tsif_device->stat_rx,
862 tsif_device->stat_overflow,
863 tsif_device->stat_lost_sync,
864 tsif_device->stat_timeout,
865 tsif_device->stat_dmov_err,
866 tsif_device->stat_soft_drop,
867 tsif_device->stat_ifi,
868 tsif_device->stat1,
869 tsif_device->stat0,
870 TSIF_PKTS_IN_CHUNK - 1,
871 ioread32(GLBL_CLK_ENA),
872 ioread32(ROW_RESET),
873 ioread32(CLK_HALT_STATEB),
874 ioread32(TV_NS_REG),
875 ioread32(TSIF_NS_REG)
876 );
877}
878/**
879 * set_stats - reset statistics on write
880 *
881 * @dev:
882 * @attr:
883 * @buf:
884 * @count:
885 */
886static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
887 const char *buf, size_t count)
888{
889 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
890 tsif_device->stat_rx = 0;
891 tsif_device->stat_overflow = 0;
892 tsif_device->stat_lost_sync = 0;
893 tsif_device->stat_timeout = 0;
894 tsif_device->stat_dmov_err = 0;
895 tsif_device->stat_soft_drop = 0;
896 tsif_device->stat_ifi = 0;
897 return count;
898}
899static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
900
901static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
902 char *buf)
903{
904 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
905 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
906}
907
908static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
909 const char *buf, size_t count)
910{
911 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
912 int value;
913 int rc;
914 if (1 != sscanf(buf, "%d", &value)) {
915 dev_err(&tsif_device->pdev->dev,
916 "Failed to parse integer: <%s>\n", buf);
917 return -EINVAL;
918 }
919 rc = tsif_set_mode(tsif_device, value);
920 if (!rc)
921 rc = count;
922 return rc;
923}
924static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
925
926static ssize_t show_time_limit(struct device *dev,
927 struct device_attribute *attr,
928 char *buf)
929{
930 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
931 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
932}
933
934static ssize_t set_time_limit(struct device *dev,
935 struct device_attribute *attr,
936 const char *buf, size_t count)
937{
938 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
939 int value;
940 int rc;
941 if (1 != sscanf(buf, "%d", &value)) {
942 dev_err(&tsif_device->pdev->dev,
943 "Failed to parse integer: <%s>\n", buf);
944 return -EINVAL;
945 }
946 rc = tsif_set_time_limit(tsif_device, value);
947 if (!rc)
948 rc = count;
949 return rc;
950}
951static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
952 show_time_limit, set_time_limit);
953
954static ssize_t show_buf_config(struct device *dev,
955 struct device_attribute *attr,
956 char *buf)
957{
958 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
959 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
960 tsif_device->pkts_per_chunk,
961 tsif_device->chunks_per_buf);
962}
963
964static ssize_t set_buf_config(struct device *dev,
965 struct device_attribute *attr,
966 const char *buf, size_t count)
967{
968 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
969 u32 p, c;
970 int rc;
971 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
972 dev_err(&tsif_device->pdev->dev,
973 "Failed to parse integer: <%s>\n", buf);
974 return -EINVAL;
975 }
976 rc = tsif_set_buf_config(tsif_device, p, c);
977 if (!rc)
978 rc = count;
979 return rc;
980}
981static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
982 show_buf_config, set_buf_config);
983
984static struct attribute *dev_attrs[] = {
985 &dev_attr_stats.attr,
986 &dev_attr_mode.attr,
987 &dev_attr_time_limit.attr,
988 &dev_attr_buf_config.attr,
989 NULL,
990};
991static struct attribute_group dev_attr_grp = {
992 .attrs = dev_attrs,
993};
994/* ===Device attributes end=== */
995
996/* ===debugfs begin=== */
997
998static int debugfs_iomem_x32_set(void *data, u64 val)
999{
1000 iowrite32(val, data);
1001 wmb();
1002 return 0;
1003}
1004
1005static int debugfs_iomem_x32_get(void *data, u64 *val)
1006{
1007 *val = ioread32(data);
1008 return 0;
1009}
1010
1011DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1012 debugfs_iomem_x32_set, "0x%08llx\n");
1013
1014struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1015 struct dentry *parent, u32 *value)
1016{
1017 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1018}
1019
1020static int action_open(struct msm_tsif_device *tsif_device)
1021{
1022 int rc = -EINVAL;
1023 int result;
1024
1025 struct msm_tsif_platform_data *pdata =
1026 tsif_device->pdev->dev.platform_data;
1027 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1028 if (tsif_device->state != tsif_state_stopped)
1029 return -EAGAIN;
1030 rc = tsif_dma_init(tsif_device);
1031 if (rc) {
1032 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1033 return rc;
1034 }
1035 tsif_device->state = tsif_state_running;
Joel Nider951b2832012-05-07 21:13:38 +03001036
1037 /* make sure the GPIO's are set up */
1038 rc = tsif_start_gpios(tsif_device);
1039 if (rc) {
1040 dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
1041 tsif_dma_exit(tsif_device);
1042 return rc;
1043 }
1044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 /*
1046 * DMA should be scheduled prior to TSIF hardware initialization,
1047 * otherwise "bus error" will be reported by Data Mover
1048 */
1049 enable_irq(tsif_device->irq);
1050 tsif_clock(tsif_device, 1);
1051 tsif_dma_schedule(tsif_device);
1052 /*
1053 * init the device if required
1054 */
1055 if (pdata->init)
1056 pdata->init(pdata);
1057 rc = tsif_start_hw(tsif_device);
1058 if (rc) {
1059 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
Joel Nider951b2832012-05-07 21:13:38 +03001060 tsif_stop_gpios(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061 tsif_dma_exit(tsif_device);
1062 tsif_clock(tsif_device, 0);
1063 return rc;
1064 }
1065
1066 result = pm_runtime_get(&tsif_device->pdev->dev);
1067 if (result < 0) {
1068 dev_err(&tsif_device->pdev->dev,
1069 "Runtime PM: Unable to wake up the device, rc = %d\n",
1070 result);
1071 return result;
1072 }
1073
1074 wake_lock(&tsif_device->wake_lock);
1075 return rc;
1076}
1077
1078static int action_close(struct msm_tsif_device *tsif_device)
1079{
1080 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1081 (int)tsif_device->state);
Joel Nider951b2832012-05-07 21:13:38 +03001082
1083 /* turn off the GPIO's to prevent new data from entering */
1084 tsif_stop_gpios(tsif_device);
1085
1086 /* we unfortunately must sleep here to give the ADM time to
1087 * complete any outstanding reads after the GPIO's are turned
1088 * off. There is no indication from the ADM hardware that
1089 * there are any outstanding reads on the bus, and if we
1090 * stop the TSIF too quickly, it can cause a bus error.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091 */
Joel Nider951b2832012-05-07 21:13:38 +03001092 msleep(100);
1093
1094 /* now we can stop the core */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 tsif_stop_hw(tsif_device);
1096 tsif_dma_exit(tsif_device);
1097 tsif_clock(tsif_device, 0);
1098 disable_irq(tsif_device->irq);
1099
1100 pm_runtime_put(&tsif_device->pdev->dev);
1101 wake_unlock(&tsif_device->wake_lock);
1102 return 0;
1103}
1104
1105
1106static struct {
1107 int (*func)(struct msm_tsif_device *);
1108 const char *name;
1109} actions[] = {
1110 { action_open, "open"},
1111 { action_close, "close"},
1112};
1113
1114static ssize_t tsif_debugfs_action_write(struct file *filp,
1115 const char __user *userbuf,
1116 size_t count, loff_t *f_pos)
1117{
1118 int i;
1119 struct msm_tsif_device *tsif_device = filp->private_data;
1120 char s[40];
1121 int len = min(sizeof(s) - 1, count);
1122 if (copy_from_user(s, userbuf, len))
1123 return -EFAULT;
1124 s[len] = '\0';
1125 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1126 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1127 if (!strncmp(s, actions[i].name,
1128 min(count, strlen(actions[i].name)))) {
1129 int rc = actions[i].func(tsif_device);
1130 if (!rc)
1131 rc = count;
1132 return rc;
1133 }
1134 }
1135 return -EINVAL;
1136}
1137
1138static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1139{
1140 filp->private_data = inode->i_private;
1141 return 0;
1142}
1143
1144static const struct file_operations fops_debugfs_action = {
1145 .open = tsif_debugfs_generic_open,
1146 .write = tsif_debugfs_action_write,
1147};
1148
1149static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1150 size_t count, loff_t *f_pos)
1151{
1152 static char bufa[200];
1153 static char *buf = bufa;
1154 int sz = sizeof(bufa);
1155 struct msm_tsif_device *tsif_device = filp->private_data;
1156 int len = 0;
1157 if (tsif_device) {
1158 int i;
1159 len += snprintf(buf + len, sz - len,
1160 "ri %3d | wi %3d | dmwi %3d |",
1161 tsif_device->ri, tsif_device->wi,
1162 tsif_device->dmwi);
1163 for (i = 0; i < 2; i++) {
1164 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1165 if (xfer->busy) {
1166 u32 dst =
1167 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1168 u32 base = tsif_device->data_buffer_dma;
1169 int w = (dst - base) / TSIF_PKT_SIZE;
1170 len += snprintf(buf + len, sz - len,
1171 " [%3d]{%3d}",
1172 w, xfer->wi);
1173 } else {
1174 len += snprintf(buf + len, sz - len,
1175 " ---idle---");
1176 }
1177 }
1178 len += snprintf(buf + len, sz - len, "\n");
1179 } else {
1180 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1181 }
1182 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1183}
1184
1185static const struct file_operations fops_debugfs_dma = {
1186 .open = tsif_debugfs_generic_open,
1187 .read = tsif_debugfs_dma_read,
1188};
1189
1190static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1191 size_t count, loff_t *f_pos)
1192{
1193 static char bufa[300];
1194 static char *buf = bufa;
1195 int sz = sizeof(bufa);
1196 struct msm_tsif_device *tsif_device = filp->private_data;
1197 int len = 0;
1198 if (tsif_device) {
1199 struct msm_tsif_platform_data *pdata =
1200 tsif_device->pdev->dev.platform_data;
1201 int i;
1202 for (i = 0; i < pdata->num_gpios; i++) {
1203 if (pdata->gpios[i].gpio_cfg) {
1204 int x = !!gpio_get_value(GPIO_PIN(
1205 pdata->gpios[i].gpio_cfg));
1206 len += snprintf(buf + len, sz - len,
1207 "%15s: %d\n",
1208 pdata->gpios[i].label, x);
1209 }
1210 }
1211 } else {
1212 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1213 }
1214 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1215}
1216
1217static const struct file_operations fops_debugfs_gpios = {
1218 .open = tsif_debugfs_generic_open,
1219 .read = tsif_debugfs_gpios_read,
1220};
1221
1222
1223static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1224{
1225 tsif_device->dent_tsif = debugfs_create_dir(
1226 dev_name(&tsif_device->pdev->dev), NULL);
1227 if (tsif_device->dent_tsif) {
1228 int i;
1229 void __iomem *base = tsif_device->base;
1230 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1231 tsif_device->debugfs_tsif_regs[i] =
1232 debugfs_create_iomem_x32(
1233 debugfs_tsif_regs[i].name,
1234 debugfs_tsif_regs[i].mode,
1235 tsif_device->dent_tsif,
1236 base + debugfs_tsif_regs[i].offset);
1237 }
1238 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1239 S_IRUGO,
1240 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1241 tsif_device->debugfs_action = debugfs_create_file("action",
1242 S_IWUSR,
1243 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1244 tsif_device->debugfs_dma = debugfs_create_file("dma",
1245 S_IRUGO,
1246 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1247 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1248 S_IRUGO,
1249 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1250 }
1251}
1252
1253static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1254{
1255 if (tsif_device->dent_tsif) {
1256 int i;
1257 debugfs_remove_recursive(tsif_device->dent_tsif);
1258 tsif_device->dent_tsif = NULL;
1259 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1260 tsif_device->debugfs_tsif_regs[i] = NULL;
1261 tsif_device->debugfs_gpio = NULL;
1262 tsif_device->debugfs_action = NULL;
1263 tsif_device->debugfs_dma = NULL;
1264 tsif_device->debugfs_databuf = NULL;
1265 }
1266}
1267/* ===debugfs end=== */
1268
1269/* ===module begin=== */
1270static LIST_HEAD(tsif_devices);
1271
1272static struct msm_tsif_device *tsif_find_by_id(int id)
1273{
1274 struct msm_tsif_device *tsif_device;
1275 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1276 if (tsif_device->pdev->id == id)
1277 return tsif_device;
1278 }
1279 return NULL;
1280}
1281
1282static int __devinit msm_tsif_probe(struct platform_device *pdev)
1283{
1284 int rc = -ENODEV;
1285 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1286 struct msm_tsif_device *tsif_device;
1287 struct resource *res;
1288 /* check device validity */
1289 /* must have platform data */
1290 if (!plat) {
1291 dev_err(&pdev->dev, "Platform data not available\n");
1292 rc = -EINVAL;
1293 goto out;
1294 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001295
1296 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1298 rc = -EINVAL;
1299 goto out;
1300 }
1301 /* OK, we will use this device */
1302 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1303 if (!tsif_device) {
1304 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1305 rc = -ENOMEM;
1306 goto out;
1307 }
1308 /* cross links */
1309 tsif_device->pdev = pdev;
1310 platform_set_drvdata(pdev, tsif_device);
1311 tsif_device->mode = 1;
1312 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1313 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1314 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1315 (unsigned long)tsif_device);
1316 if (tsif_get_clocks(tsif_device))
1317 goto err_clocks;
1318/* map I/O memory */
1319 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1320 if (!tsif_device->memres) {
1321 dev_err(&pdev->dev, "Missing MEM resource\n");
1322 rc = -ENXIO;
1323 goto err_rgn;
1324 }
1325 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1326 if (!res) {
1327 dev_err(&pdev->dev, "Missing DMA resource\n");
1328 rc = -ENXIO;
1329 goto err_rgn;
1330 }
1331 tsif_device->dma = res->start;
1332 tsif_device->crci = res->end;
1333 tsif_device->base = ioremap(tsif_device->memres->start,
1334 resource_size(tsif_device->memres));
1335 if (!tsif_device->base) {
1336 dev_err(&pdev->dev, "ioremap failed\n");
1337 goto err_ioremap;
1338 }
1339 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1340 tsif_device->memres->start, tsif_device->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341
1342 pm_runtime_set_active(&pdev->dev);
1343 pm_runtime_enable(&pdev->dev);
1344
1345 tsif_debugfs_init(tsif_device);
1346 rc = platform_get_irq(pdev, 0);
1347 if (rc > 0) {
1348 tsif_device->irq = rc;
1349 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1350 dev_name(&pdev->dev), tsif_device);
1351 disable_irq(tsif_device->irq);
1352 }
1353 if (rc) {
1354 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1355 tsif_device->irq, rc);
1356 goto err_irq;
1357 }
1358 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1359 if (rc) {
1360 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1361 goto err_attrs;
1362 }
1363 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1364 dev_name(&pdev->dev));
1365 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1366 tsif_device->irq, tsif_device->memres->start,
1367 tsif_device->dma, tsif_device->crci);
1368 list_add(&tsif_device->devlist, &tsif_devices);
1369 return 0;
1370/* error path */
1371 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1372err_attrs:
1373 free_irq(tsif_device->irq, tsif_device);
1374err_irq:
1375 tsif_debugfs_exit(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001376 iounmap(tsif_device->base);
1377err_ioremap:
1378err_rgn:
1379 tsif_put_clocks(tsif_device);
1380err_clocks:
1381 kfree(tsif_device);
1382out:
1383 return rc;
1384}
1385
1386static int __devexit msm_tsif_remove(struct platform_device *pdev)
1387{
1388 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1389 dev_info(&pdev->dev, "Unload\n");
1390 list_del(&tsif_device->devlist);
1391 wake_lock_destroy(&tsif_device->wake_lock);
1392 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1393 free_irq(tsif_device->irq, tsif_device);
1394 tsif_debugfs_exit(tsif_device);
1395 tsif_dma_exit(tsif_device);
1396 tsif_stop_gpios(tsif_device);
1397 iounmap(tsif_device->base);
1398 tsif_put_clocks(tsif_device);
1399
1400 pm_runtime_put(&pdev->dev);
1401 pm_runtime_disable(&pdev->dev);
1402 kfree(tsif_device);
1403 return 0;
1404}
1405
1406static int tsif_runtime_suspend(struct device *dev)
1407{
1408 dev_dbg(dev, "pm_runtime: suspending...\n");
1409 return 0;
1410}
1411
1412static int tsif_runtime_resume(struct device *dev)
1413{
1414 dev_dbg(dev, "pm_runtime: resuming...\n");
1415 return 0;
1416}
1417
1418static const struct dev_pm_ops tsif_dev_pm_ops = {
1419 .runtime_suspend = tsif_runtime_suspend,
1420 .runtime_resume = tsif_runtime_resume,
1421};
1422
1423
1424static struct platform_driver msm_tsif_driver = {
1425 .probe = msm_tsif_probe,
1426 .remove = __exit_p(msm_tsif_remove),
1427 .driver = {
1428 .name = "msm_tsif",
1429 .pm = &tsif_dev_pm_ops,
1430 },
1431};
1432
1433static int __init mod_init(void)
1434{
1435 int rc = platform_driver_register(&msm_tsif_driver);
1436 if (rc)
1437 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1438 return rc;
1439}
1440
1441static void __exit mod_exit(void)
1442{
1443 platform_driver_unregister(&msm_tsif_driver);
1444}
1445/* ===module end=== */
1446
1447/* public API */
1448
Joel Nider5578bdb2011-08-12 09:37:11 +03001449int tsif_get_active(void)
1450{
1451 struct msm_tsif_device *tsif_device;
1452 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1453 return tsif_device->pdev->id;
1454 }
1455 return -ENODEV;
1456}
1457EXPORT_SYMBOL(tsif_get_active);
1458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1460{
1461 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001462 if (!tsif_device)
1463 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001464 if (tsif_device->client_notify || tsif_device->client_data)
1465 return ERR_PTR(-EBUSY);
1466 tsif_device->client_notify = notify;
1467 tsif_device->client_data = data;
1468 /* prevent from unloading */
1469 get_device(&tsif_device->pdev->dev);
1470 return tsif_device;
1471}
1472EXPORT_SYMBOL(tsif_attach);
1473
1474void tsif_detach(void *cookie)
1475{
1476 struct msm_tsif_device *tsif_device = cookie;
1477 tsif_device->client_notify = NULL;
1478 tsif_device->client_data = NULL;
1479 put_device(&tsif_device->pdev->dev);
1480}
1481EXPORT_SYMBOL(tsif_detach);
1482
1483void tsif_get_info(void *cookie, void **pdata, int *psize)
1484{
1485 struct msm_tsif_device *tsif_device = cookie;
1486 if (pdata)
1487 *pdata = tsif_device->data_buffer;
1488 if (psize)
1489 *psize = TSIF_PKTS_IN_BUF;
1490}
1491EXPORT_SYMBOL(tsif_get_info);
1492
1493int tsif_set_mode(void *cookie, int mode)
1494{
1495 struct msm_tsif_device *tsif_device = cookie;
1496 if (tsif_device->state != tsif_state_stopped) {
1497 dev_err(&tsif_device->pdev->dev,
1498 "Can't change mode while device is active\n");
1499 return -EBUSY;
1500 }
1501 switch (mode) {
1502 case 1:
1503 case 2:
1504 case 3:
1505 tsif_device->mode = mode;
1506 break;
1507 default:
1508 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1509 return -EINVAL;
1510 }
1511 return 0;
1512}
1513EXPORT_SYMBOL(tsif_set_mode);
1514
1515int tsif_set_time_limit(void *cookie, u32 value)
1516{
1517 struct msm_tsif_device *tsif_device = cookie;
1518 if (tsif_device->state != tsif_state_stopped) {
1519 dev_err(&tsif_device->pdev->dev,
1520 "Can't change time limit while device is active\n");
1521 return -EBUSY;
1522 }
1523 if (value != (value & 0xFFFFFF)) {
1524 dev_err(&tsif_device->pdev->dev,
1525 "Invalid time limit (should be 24 bit): %#x\n", value);
1526 return -EINVAL;
1527 }
1528 tsif_device->time_limit = value;
1529 return 0;
1530}
1531EXPORT_SYMBOL(tsif_set_time_limit);
1532
1533int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1534{
1535 struct msm_tsif_device *tsif_device = cookie;
1536 if (tsif_device->data_buffer) {
1537 dev_err(&tsif_device->pdev->dev,
1538 "Data buffer already allocated: %p\n",
1539 tsif_device->data_buffer);
1540 return -EBUSY;
1541 }
1542 /* check for crazy user */
1543 if (pkts_in_chunk * chunks_in_buf > 10240) {
1544 dev_err(&tsif_device->pdev->dev,
1545 "Buffer requested is too large: %d * %d\n",
1546 pkts_in_chunk,
1547 chunks_in_buf);
1548 return -EINVAL;
1549 }
1550 /* parameters are OK, execute */
1551 tsif_device->pkts_per_chunk = pkts_in_chunk;
1552 tsif_device->chunks_per_buf = chunks_in_buf;
1553 return 0;
1554}
1555EXPORT_SYMBOL(tsif_set_buf_config);
1556
1557void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1558{
1559 struct msm_tsif_device *tsif_device = cookie;
1560 if (ri)
1561 *ri = tsif_device->ri;
1562 if (wi)
1563 *wi = tsif_device->wi;
1564 if (state)
1565 *state = tsif_device->state;
1566}
1567EXPORT_SYMBOL(tsif_get_state);
1568
1569int tsif_start(void *cookie)
1570{
1571 struct msm_tsif_device *tsif_device = cookie;
1572 return action_open(tsif_device);
1573}
1574EXPORT_SYMBOL(tsif_start);
1575
1576void tsif_stop(void *cookie)
1577{
1578 struct msm_tsif_device *tsif_device = cookie;
1579 action_close(tsif_device);
1580}
1581EXPORT_SYMBOL(tsif_stop);
1582
1583void tsif_reclaim_packets(void *cookie, int read_index)
1584{
1585 struct msm_tsif_device *tsif_device = cookie;
1586 tsif_device->ri = read_index;
1587}
1588EXPORT_SYMBOL(tsif_reclaim_packets);
1589
1590module_init(mod_init);
1591module_exit(mod_exit);
1592
1593MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1594 " Driver for the MSM chipset");
1595MODULE_LICENSE("GPL v2");
1596