blob: 42faa078aff0eb7c76d3cc670ce1b0ac846be882 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
34
35#include <mach/gpio.h>
36#include <mach/dma.h>
37#include <mach/msm_tsif.h>
38
39/*
40 * TSIF register offsets
41 */
42#define TSIF_STS_CTL_OFF (0x0)
43#define TSIF_TIME_LIMIT_OFF (0x4)
44#define TSIF_CLK_REF_OFF (0x8)
45#define TSIF_LPBK_FLAGS_OFF (0xc)
46#define TSIF_LPBK_DATA_OFF (0x10)
47#define TSIF_TEST_CTL_OFF (0x14)
48#define TSIF_TEST_MODE_OFF (0x18)
49#define TSIF_TEST_RESET_OFF (0x1c)
50#define TSIF_TEST_EXPORT_OFF (0x20)
51#define TSIF_TEST_CURRENT_OFF (0x24)
52
53#define TSIF_DATA_PORT_OFF (0x100)
54
55/* bits for TSIF_STS_CTL register */
56#define TSIF_STS_CTL_EN_IRQ (1 << 28)
57#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
58#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
59#define TSIF_STS_CTL_OVERFLOW (1 << 25)
60#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
61#define TSIF_STS_CTL_TIMEOUT (1 << 23)
62#define TSIF_STS_CTL_INV_SYNC (1 << 21)
63#define TSIF_STS_CTL_INV_NULL (1 << 20)
64#define TSIF_STS_CTL_INV_ERROR (1 << 19)
65#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
66#define TSIF_STS_CTL_INV_DATA (1 << 17)
67#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
68#define TSIF_STS_CTL_SPARE (1 << 15)
69#define TSIF_STS_CTL_EN_NULL (1 << 11)
70#define TSIF_STS_CTL_EN_ERROR (1 << 10)
71#define TSIF_STS_CTL_LAST_BIT (1 << 9)
72#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
73#define TSIF_STS_CTL_EN_TCR (1 << 7)
74#define TSIF_STS_CTL_TEST_MODE (3 << 5)
75#define TSIF_STS_CTL_EN_DM (1 << 4)
76#define TSIF_STS_CTL_STOP (1 << 3)
77#define TSIF_STS_CTL_START (1 << 0)
78
79/*
80 * Data buffering parameters
81 *
82 * Data stored in cyclic buffer;
83 *
84 * Data organized in chunks of packets.
85 * One chunk processed at a time by the data mover
86 *
87 */
88#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
89#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
90#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
91#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
92#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
93#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030094#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
97#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
98#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
99#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
100#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
101
102/* used to create debugfs entries */
103static const struct {
104 const char *name;
105 mode_t mode;
106 int offset;
107} debugfs_tsif_regs[] = {
108 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
109 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
110 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
111 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
112 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
113 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
114 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
115 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
116 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
117 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
118 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
119};
120
121/* structures for Data Mover */
122struct tsif_dmov_cmd {
123 dmov_box box;
124 dma_addr_t box_ptr;
125};
126
127struct msm_tsif_device;
128
129struct tsif_xfer {
130 struct msm_dmov_cmd hdr;
131 struct msm_tsif_device *tsif_device;
132 int busy;
133 int wi; /**< set devices's write index after xfer */
134};
135
136struct msm_tsif_device {
137 struct list_head devlist;
138 struct platform_device *pdev;
139 struct resource *memres;
140 void __iomem *base;
141 unsigned int irq;
142 int mode;
143 u32 time_limit;
144 enum tsif_state state;
145 struct wake_lock wake_lock;
146 /* clocks */
147 struct clk *tsif_clk;
148 struct clk *tsif_pclk;
149 struct clk *tsif_ref_clk;
150 /* debugfs */
151 struct dentry *dent_tsif;
152 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
153 struct dentry *debugfs_gpio;
154 struct dentry *debugfs_action;
155 struct dentry *debugfs_dma;
156 struct dentry *debugfs_databuf;
157 struct debugfs_blob_wrapper blob_wrapper_databuf;
158 /* DMA related */
159 int dma;
160 int crci;
161 void *data_buffer;
162 dma_addr_t data_buffer_dma;
163 u32 pkts_per_chunk;
164 u32 chunks_per_buf;
165 int ri;
166 int wi;
167 int dmwi; /**< DataMover write index */
168 struct tsif_dmov_cmd *dmov_cmd[2];
169 dma_addr_t dmov_cmd_dma[2];
170 struct tsif_xfer xfer[2];
171 struct tasklet_struct dma_refill;
172 /* statistics */
173 u32 stat_rx;
174 u32 stat_overflow;
175 u32 stat_lost_sync;
176 u32 stat_timeout;
177 u32 stat_dmov_err;
178 u32 stat_soft_drop;
179 int stat_ifi; /* inter frame interval */
180 u32 stat0, stat1;
181 /* client */
182 void *client_data;
183 void (*client_notify)(void *client_data);
184};
185
186/* ===clocks begin=== */
187
188static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
189{
190 if (tsif_device->tsif_clk) {
191 clk_put(tsif_device->tsif_clk);
192 tsif_device->tsif_clk = NULL;
193 }
194 if (tsif_device->tsif_pclk) {
195 clk_put(tsif_device->tsif_pclk);
196 tsif_device->tsif_pclk = NULL;
197 }
198
199 if (tsif_device->tsif_ref_clk) {
200 clk_put(tsif_device->tsif_ref_clk);
201 tsif_device->tsif_ref_clk = NULL;
202 }
203}
204
205static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
206{
207 struct msm_tsif_platform_data *pdata =
208 tsif_device->pdev->dev.platform_data;
209 int rc = 0;
210
211 if (pdata->tsif_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700212 tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
213 pdata->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 if (IS_ERR(tsif_device->tsif_clk)) {
215 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
216 pdata->tsif_clk);
217 rc = PTR_ERR(tsif_device->tsif_clk);
218 tsif_device->tsif_clk = NULL;
219 goto ret;
220 }
221 }
222 if (pdata->tsif_pclk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700223 tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
224 pdata->tsif_pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 if (IS_ERR(tsif_device->tsif_pclk)) {
226 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
227 pdata->tsif_pclk);
228 rc = PTR_ERR(tsif_device->tsif_pclk);
229 tsif_device->tsif_pclk = NULL;
230 goto ret;
231 }
232 }
233 if (pdata->tsif_ref_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700234 tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
235 pdata->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 if (IS_ERR(tsif_device->tsif_ref_clk)) {
237 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
238 pdata->tsif_ref_clk);
239 rc = PTR_ERR(tsif_device->tsif_ref_clk);
240 tsif_device->tsif_ref_clk = NULL;
241 goto ret;
242 }
243 }
244 return 0;
245ret:
246 tsif_put_clocks(tsif_device);
247 return rc;
248}
249
250static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
251{
252 if (on) {
253 if (tsif_device->tsif_clk)
254 clk_enable(tsif_device->tsif_clk);
255 if (tsif_device->tsif_pclk)
256 clk_enable(tsif_device->tsif_pclk);
257 clk_enable(tsif_device->tsif_ref_clk);
258 } else {
259 if (tsif_device->tsif_clk)
260 clk_disable(tsif_device->tsif_clk);
261 if (tsif_device->tsif_pclk)
262 clk_disable(tsif_device->tsif_pclk);
263 clk_disable(tsif_device->tsif_ref_clk);
264 }
265}
266/* ===clocks end=== */
267/* ===gpio begin=== */
268
269static void tsif_gpios_free(const struct msm_gpio *table, int size)
270{
271 int i;
272 const struct msm_gpio *g;
273 for (i = size-1; i >= 0; i--) {
274 g = table + i;
275 gpio_free(GPIO_PIN(g->gpio_cfg));
276 }
277}
278
279static int tsif_gpios_request(const struct msm_gpio *table, int size)
280{
281 int rc;
282 int i;
283 const struct msm_gpio *g;
284 for (i = 0; i < size; i++) {
285 g = table + i;
286 rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
287 if (rc) {
288 pr_err("gpio_request(%d) <%s> failed: %d\n",
289 GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
290 goto err;
291 }
292 }
293 return 0;
294err:
295 tsif_gpios_free(table, i);
296 return rc;
297}
298
299static int tsif_gpios_disable(const struct msm_gpio *table, int size)
300{
301 int rc = 0;
302 int i;
303 const struct msm_gpio *g;
304 for (i = size-1; i >= 0; i--) {
305 int tmp;
306 g = table + i;
307 tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
308 if (tmp) {
309 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
310 " <%s> failed: %d\n",
311 g->gpio_cfg, g->label ?: "?", rc);
312 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
313 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
314 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
315 GPIO_DRVSTR(g->gpio_cfg));
316 if (!rc)
317 rc = tmp;
318 }
319 }
320
321 return rc;
322}
323
324static int tsif_gpios_enable(const struct msm_gpio *table, int size)
325{
326 int rc;
327 int i;
328 const struct msm_gpio *g;
329 for (i = 0; i < size; i++) {
330 g = table + i;
331 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
332 if (rc) {
333 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
334 " <%s> failed: %d\n",
335 g->gpio_cfg, g->label ?: "?", rc);
336 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
337 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
338 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
339 GPIO_DRVSTR(g->gpio_cfg));
340 goto err;
341 }
342 }
343 return 0;
344err:
345 tsif_gpios_disable(table, i);
346 return rc;
347}
348
349static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
350{
351 int rc = tsif_gpios_request(table, size);
352 if (rc)
353 return rc;
354 rc = tsif_gpios_enable(table, size);
355 if (rc)
356 tsif_gpios_free(table, size);
357 return rc;
358}
359
360static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
361{
362 tsif_gpios_disable(table, size);
363 tsif_gpios_free(table, size);
364}
365
366static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
367{
368 struct msm_tsif_platform_data *pdata =
369 tsif_device->pdev->dev.platform_data;
370 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
371}
372
373static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
374{
375 struct msm_tsif_platform_data *pdata =
376 tsif_device->pdev->dev.platform_data;
377 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
378}
379
380/* ===gpio end=== */
381
382static int tsif_start_hw(struct msm_tsif_device *tsif_device)
383{
384 u32 ctl = TSIF_STS_CTL_EN_IRQ |
385 TSIF_STS_CTL_EN_TIME_LIM |
386 TSIF_STS_CTL_EN_TCR |
387 TSIF_STS_CTL_EN_DM;
388 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
389 switch (tsif_device->mode) {
390 case 1: /* mode 1 */
391 ctl |= (0 << 5);
392 break;
393 case 2: /* mode 2 */
394 ctl |= (1 << 5);
395 break;
396 case 3: /* manual - control from debugfs */
397 return 0;
398 break;
399 default:
400 return -EINVAL;
401 }
402 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
403 iowrite32(tsif_device->time_limit,
404 tsif_device->base + TSIF_TIME_LIMIT_OFF);
405 wmb();
406 iowrite32(ctl | TSIF_STS_CTL_START,
407 tsif_device->base + TSIF_STS_CTL_OFF);
408 wmb();
409 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
410 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
411}
412
413static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
414{
415 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
416 wmb();
417}
418
419/* ===DMA begin=== */
420/**
421 * TSIF DMA theory of operation
422 *
423 * Circular memory buffer \a tsif_mem_buffer allocated;
424 * 4 pointers points to and moved forward on:
425 * - \a ri index of first ready to read packet.
426 * Updated by client's call to tsif_reclaim_packets()
427 * - \a wi points to the next packet to be written by DM.
428 * Data below is valid and will not be overriden by DMA.
429 * Moved on DM callback
430 * - \a dmwi points to the next packet not scheduled yet for DM
431 * moved when packet scheduled for DM
432 *
433 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
434 * at time immediately after scheduling.
435 *
436 * Initially, 2 packets get scheduled for the DM.
437 *
438 * Upon packet receive, DM writes packet to the pre-programmed
439 * location and invoke its callback.
440 *
441 * DM callback moves sets wi pointer to \a xfer->wi;
442 * then it schedules next packet for DM and moves \a dmwi pointer.
443 *
444 * Buffer overflow handling
445 *
446 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
447 * DMA re-scheduled to the same index.
448 * Callback check and not move \a wi to become equal to \a ri
449 *
450 * On \a read request, data between \a ri and \a wi pointers may be read;
451 * \ri pointer moved accordingly.
452 *
453 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
454 * \a wi is between [\a ri, \a dmwi]
455 *
456 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
457 *
458 * Number of scheduled packets for DM: (dmwi-wi)
459 */
460
461/**
462 * tsif_dma_schedule - schedule DMA transfers
463 *
464 * @tsif_device: device
465 *
466 * Executed from process context on init, or from tasklet when
467 * re-scheduling upon DMA completion.
468 * This prevent concurrent execution from several CPU's
469 */
470static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
471{
472 int i, dmwi0, dmwi1, found = 0;
473 /* find free entry */
474 for (i = 0; i < 2; i++) {
475 struct tsif_xfer *xfer = &tsif_device->xfer[i];
476 if (xfer->busy)
477 continue;
478 found++;
479 xfer->busy = 1;
480 dmwi0 = tsif_device->dmwi;
481 tsif_device->dmov_cmd[i]->box.dst_row_addr =
482 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
483 /* proposed value for dmwi */
484 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
485 /**
486 * If dmwi going to overlap with ri,
487 * overflow occurs because data was not read.
488 * Still get this packet, to not interrupt TSIF
489 * hardware, but do not advance dmwi.
490 *
491 * Upon receive, packet will be dropped.
492 */
493 if (dmwi1 != tsif_device->ri) {
494 tsif_device->dmwi = dmwi1;
495 } else {
496 dev_info(&tsif_device->pdev->dev,
497 "Overflow detected\n");
498 }
499 xfer->wi = tsif_device->dmwi;
500#ifdef CONFIG_TSIF_DEBUG
501 dev_info(&tsif_device->pdev->dev,
502 "schedule xfer[%d] -> [%2d]{%2d}\n",
503 i, dmwi0, xfer->wi);
504#endif
505 /* complete all the writes to box */
506 dma_coherent_pre_ops();
507 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
508 }
509 if (!found)
510 dev_info(&tsif_device->pdev->dev,
511 "All xfer entries are busy\n");
512}
513
514/**
515 * tsif_dmov_complete_func - DataMover completion callback
516 *
517 * @cmd: original DM command
518 * @result: DM result
519 * @err: optional error buffer
520 *
521 * Executed in IRQ context (Data Mover's IRQ)
522 * DataMover's spinlock @msm_dmov_lock held.
523 */
524static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
525 unsigned int result,
526 struct msm_dmov_errdata *err)
527{
528 int i;
529 u32 data_offset;
530 struct tsif_xfer *xfer;
531 struct msm_tsif_device *tsif_device;
532 int reschedule = 0;
533 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
534 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
535 return;
536 }
537 /* restore original context */
538 xfer = container_of(cmd, struct tsif_xfer, hdr);
539 tsif_device = xfer->tsif_device;
540 i = xfer - tsif_device->xfer;
541 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
542 tsif_device->data_buffer_dma;
543
544 /* order reads from the xferred buffer */
545 dma_coherent_post_ops();
546 if (result & DMOV_RSLT_DONE) {
547 int w = data_offset / TSIF_PKT_SIZE;
548 tsif_device->stat_rx++;
549 /*
550 * sowtware overflow when I was scheduled?
551 *
552 * @w is where this xfer was actually written to;
553 * @xfer->wi is where device's @wi will be set;
554 *
555 * if these 2 are equal, we are short in space and
556 * going to overwrite this xfer - this is "soft drop"
557 */
558 if (w == xfer->wi)
559 tsif_device->stat_soft_drop++;
560 reschedule = (tsif_device->state == tsif_state_running);
561#ifdef CONFIG_TSIF_DEBUG
562 /* IFI calculation */
563 /*
564 * update stat_ifi (inter frame interval)
565 *
566 * Calculate time difference between last and 1-st
567 * packets in chunk
568 *
569 * To be removed after tuning
570 */
571 if (TSIF_PKTS_IN_CHUNK > 1) {
572 void *ptr = tsif_device->data_buffer + data_offset;
573 u32 *p0 = ptr;
574 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
575 TSIF_PKT_SIZE;
576 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
577 tsif_pkt_status(p0));
578 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
579 tsif_pkt_status(p1));
580 tsif_device->stat_ifi = (tts1 - tts0) /
581 (TSIF_PKTS_IN_CHUNK - 1);
582 }
583#endif
584 } else {
585 /**
586 * Error or flush
587 *
588 * To recover - re-open TSIF device.
589 */
590 /* mark status "not valid" in data buffer */
591 int n;
592 void *ptr = tsif_device->data_buffer + data_offset;
593 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
594 u32 *p = ptr + (n * TSIF_PKT_SIZE);
595 /* last dword is status + TTS */
596 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
597 }
598 if (result & DMOV_RSLT_ERROR) {
599 dev_err(&tsif_device->pdev->dev,
600 "DMA error (0x%08x)\n", result);
601 tsif_device->stat_dmov_err++;
602 /* force device close */
603 if (tsif_device->state == tsif_state_running) {
604 tsif_stop_hw(tsif_device);
605 /*
606 * Clocks _may_ be stopped right from IRQ
607 * context. This is far from optimal w.r.t
608 * latency.
609 *
610 * But, this branch taken only in case of
611 * severe hardware problem (I don't even know
612 * what should happens for DMOV_RSLT_ERROR);
613 * thus I prefer code simplicity over
614 * performance.
615 */
616 tsif_clock(tsif_device, 0);
617 tsif_device->state = tsif_state_flushing;
618 }
619 }
620 if (result & DMOV_RSLT_FLUSH) {
621 /*
622 * Flushing normally happens in process of
623 * @tsif_stop(), when we are waiting for outstanding
624 * DMA commands to be flushed.
625 */
626 dev_info(&tsif_device->pdev->dev,
627 "DMA channel flushed (0x%08x)\n", result);
628 if (tsif_device->state == tsif_state_flushing) {
629 if ((!tsif_device->xfer[0].busy) &&
630 (!tsif_device->xfer[1].busy)) {
631 tsif_device->state = tsif_state_stopped;
632 }
633 }
634 }
635 if (err)
636 dev_err(&tsif_device->pdev->dev,
637 "Flush data: %08x %08x %08x %08x %08x %08x\n",
638 err->flush[0], err->flush[1], err->flush[2],
639 err->flush[3], err->flush[4], err->flush[5]);
640 }
641 tsif_device->wi = xfer->wi;
642 xfer->busy = 0;
643 if (tsif_device->client_notify)
644 tsif_device->client_notify(tsif_device->client_data);
645 /*
646 * Can't schedule next DMA -
647 * DataMover driver still hold its semaphore,
648 * deadlock will occur.
649 */
650 if (reschedule)
651 tasklet_schedule(&tsif_device->dma_refill);
652}
653
654/**
655 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
656 *
657 * @data: tsif_device
658 *
659 * Reschedule DMA requests
660 *
661 * Executed in tasklet
662 */
663static void tsif_dma_refill(unsigned long data)
664{
665 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
666 if (tsif_device->state == tsif_state_running)
667 tsif_dma_schedule(tsif_device);
668}
669
670/**
671 * tsif_dma_flush - flush DMA channel
672 *
673 * @tsif_device:
674 *
675 * busy wait till DMA flushed
676 */
677static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
678{
679 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
680 tsif_device->state = tsif_state_flushing;
681 while (tsif_device->xfer[0].busy ||
682 tsif_device->xfer[1].busy) {
683 msm_dmov_flush(tsif_device->dma);
684 msleep(10);
685 }
686 }
687 tsif_device->state = tsif_state_stopped;
688 if (tsif_device->client_notify)
689 tsif_device->client_notify(tsif_device->client_data);
690}
691
692static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
693{
694 int i;
695 tsif_device->state = tsif_state_flushing;
696 tasklet_kill(&tsif_device->dma_refill);
697 tsif_dma_flush(tsif_device);
698 for (i = 0; i < 2; i++) {
699 if (tsif_device->dmov_cmd[i]) {
700 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
701 tsif_device->dmov_cmd[i],
702 tsif_device->dmov_cmd_dma[i]);
703 tsif_device->dmov_cmd[i] = NULL;
704 }
705 }
706 if (tsif_device->data_buffer) {
707 tsif_device->blob_wrapper_databuf.data = NULL;
708 tsif_device->blob_wrapper_databuf.size = 0;
709 dma_free_coherent(NULL, TSIF_BUF_SIZE,
710 tsif_device->data_buffer,
711 tsif_device->data_buffer_dma);
712 tsif_device->data_buffer = NULL;
713 }
714}
715
716static int tsif_dma_init(struct msm_tsif_device *tsif_device)
717{
718 int i;
719 /* TODO: allocate all DMA memory in one buffer */
720 /* Note: don't pass device,
721 it require coherent_dma_mask id device definition */
722 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
723 &tsif_device->data_buffer_dma, GFP_KERNEL);
724 if (!tsif_device->data_buffer)
725 goto err;
726 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
727 tsif_device->data_buffer, tsif_device->data_buffer_dma);
728 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
729 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
730 tsif_device->ri = 0;
731 tsif_device->wi = 0;
732 tsif_device->dmwi = 0;
733 for (i = 0; i < 2; i++) {
734 dmov_box *box;
735 struct msm_dmov_cmd *hdr;
736 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
737 sizeof(struct tsif_dmov_cmd),
738 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
739 if (!tsif_device->dmov_cmd[i])
740 goto err;
741 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
742 i, tsif_device->dmov_cmd[i],
743 tsif_device->dmov_cmd_dma[i]);
744 /* dst in 16 LSB, src in 16 MSB */
745 box = &(tsif_device->dmov_cmd[i]->box);
746 box->cmd = CMD_MODE_BOX | CMD_LC |
747 CMD_SRC_CRCI(tsif_device->crci);
748 box->src_row_addr =
749 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
750 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
751 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
752 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
753
754 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
755 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
756 offsetof(struct tsif_dmov_cmd, box));
757 tsif_device->xfer[i].tsif_device = tsif_device;
758 hdr = &tsif_device->xfer[i].hdr;
759 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
760 offsetof(struct tsif_dmov_cmd, box_ptr));
761 hdr->complete_func = tsif_dmov_complete_func;
762 }
763 msm_dmov_flush(tsif_device->dma);
764 return 0;
765err:
766 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
767 tsif_dma_exit(tsif_device);
768 return -ENOMEM;
769}
770
771/* ===DMA end=== */
772
773/* ===IRQ begin=== */
774
775static irqreturn_t tsif_irq(int irq, void *dev_id)
776{
777 struct msm_tsif_device *tsif_device = dev_id;
778 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
779 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
780 TSIF_STS_CTL_OVERFLOW |
781 TSIF_STS_CTL_LOST_SYNC |
782 TSIF_STS_CTL_TIMEOUT))) {
783 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
784 return IRQ_NONE;
785 }
786 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
787 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
788 tsif_device->stat_rx++;
789 }
790 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
791 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
792 tsif_device->stat_overflow++;
793 }
794 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
795 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
796 tsif_device->stat_lost_sync++;
797 }
798 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
799 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
800 tsif_device->stat_timeout++;
801 }
802 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
803 wmb();
804 return IRQ_HANDLED;
805}
806
807/* ===IRQ end=== */
808
809/* ===Device attributes begin=== */
810
811static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
812 char *buf)
813{
814 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
815 char *state_string;
816 switch (tsif_device->state) {
817 case tsif_state_stopped:
818 state_string = "stopped";
819 break;
820 case tsif_state_running:
821 state_string = "running";
822 break;
823 case tsif_state_flushing:
824 state_string = "flushing";
825 break;
826 default:
827 state_string = "???";
828 }
829 return snprintf(buf, PAGE_SIZE,
830 "Device %s\n"
831 "Mode = %d\n"
832 "Time limit = %d\n"
833 "State %s\n"
834 "Client = %p\n"
835 "Pkt/Buf = %d\n"
836 "Pkt/chunk = %d\n"
837 "--statistics--\n"
838 "Rx chunks = %d\n"
839 "Overflow = %d\n"
840 "Lost sync = %d\n"
841 "Timeout = %d\n"
842 "DMA error = %d\n"
843 "Soft drop = %d\n"
844 "IFI = %d\n"
845 "(0x%08x - 0x%08x) / %d\n"
846 "--debug--\n"
847 "GLBL_CLK_ENA = 0x%08x\n"
848 "ROW_RESET = 0x%08x\n"
849 "CLK_HALT_STATEB = 0x%08x\n"
850 "TV_NS_REG = 0x%08x\n"
851 "TSIF_NS_REG = 0x%08x\n",
852 dev_name(dev),
853 tsif_device->mode,
854 tsif_device->time_limit,
855 state_string,
856 tsif_device->client_data,
857 TSIF_PKTS_IN_BUF,
858 TSIF_PKTS_IN_CHUNK,
859 tsif_device->stat_rx,
860 tsif_device->stat_overflow,
861 tsif_device->stat_lost_sync,
862 tsif_device->stat_timeout,
863 tsif_device->stat_dmov_err,
864 tsif_device->stat_soft_drop,
865 tsif_device->stat_ifi,
866 tsif_device->stat1,
867 tsif_device->stat0,
868 TSIF_PKTS_IN_CHUNK - 1,
869 ioread32(GLBL_CLK_ENA),
870 ioread32(ROW_RESET),
871 ioread32(CLK_HALT_STATEB),
872 ioread32(TV_NS_REG),
873 ioread32(TSIF_NS_REG)
874 );
875}
876/**
877 * set_stats - reset statistics on write
878 *
879 * @dev:
880 * @attr:
881 * @buf:
882 * @count:
883 */
884static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
885 const char *buf, size_t count)
886{
887 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
888 tsif_device->stat_rx = 0;
889 tsif_device->stat_overflow = 0;
890 tsif_device->stat_lost_sync = 0;
891 tsif_device->stat_timeout = 0;
892 tsif_device->stat_dmov_err = 0;
893 tsif_device->stat_soft_drop = 0;
894 tsif_device->stat_ifi = 0;
895 return count;
896}
897static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
898
899static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
900 char *buf)
901{
902 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
903 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
904}
905
906static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
907 const char *buf, size_t count)
908{
909 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
910 int value;
911 int rc;
912 if (1 != sscanf(buf, "%d", &value)) {
913 dev_err(&tsif_device->pdev->dev,
914 "Failed to parse integer: <%s>\n", buf);
915 return -EINVAL;
916 }
917 rc = tsif_set_mode(tsif_device, value);
918 if (!rc)
919 rc = count;
920 return rc;
921}
922static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
923
924static ssize_t show_time_limit(struct device *dev,
925 struct device_attribute *attr,
926 char *buf)
927{
928 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
929 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
930}
931
932static ssize_t set_time_limit(struct device *dev,
933 struct device_attribute *attr,
934 const char *buf, size_t count)
935{
936 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
937 int value;
938 int rc;
939 if (1 != sscanf(buf, "%d", &value)) {
940 dev_err(&tsif_device->pdev->dev,
941 "Failed to parse integer: <%s>\n", buf);
942 return -EINVAL;
943 }
944 rc = tsif_set_time_limit(tsif_device, value);
945 if (!rc)
946 rc = count;
947 return rc;
948}
949static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
950 show_time_limit, set_time_limit);
951
952static ssize_t show_buf_config(struct device *dev,
953 struct device_attribute *attr,
954 char *buf)
955{
956 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
957 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
958 tsif_device->pkts_per_chunk,
959 tsif_device->chunks_per_buf);
960}
961
962static ssize_t set_buf_config(struct device *dev,
963 struct device_attribute *attr,
964 const char *buf, size_t count)
965{
966 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
967 u32 p, c;
968 int rc;
969 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
970 dev_err(&tsif_device->pdev->dev,
971 "Failed to parse integer: <%s>\n", buf);
972 return -EINVAL;
973 }
974 rc = tsif_set_buf_config(tsif_device, p, c);
975 if (!rc)
976 rc = count;
977 return rc;
978}
979static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
980 show_buf_config, set_buf_config);
981
982static struct attribute *dev_attrs[] = {
983 &dev_attr_stats.attr,
984 &dev_attr_mode.attr,
985 &dev_attr_time_limit.attr,
986 &dev_attr_buf_config.attr,
987 NULL,
988};
989static struct attribute_group dev_attr_grp = {
990 .attrs = dev_attrs,
991};
992/* ===Device attributes end=== */
993
994/* ===debugfs begin=== */
995
996static int debugfs_iomem_x32_set(void *data, u64 val)
997{
998 iowrite32(val, data);
999 wmb();
1000 return 0;
1001}
1002
1003static int debugfs_iomem_x32_get(void *data, u64 *val)
1004{
1005 *val = ioread32(data);
1006 return 0;
1007}
1008
1009DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1010 debugfs_iomem_x32_set, "0x%08llx\n");
1011
1012struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1013 struct dentry *parent, u32 *value)
1014{
1015 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1016}
1017
1018static int action_open(struct msm_tsif_device *tsif_device)
1019{
1020 int rc = -EINVAL;
1021 int result;
1022
1023 struct msm_tsif_platform_data *pdata =
1024 tsif_device->pdev->dev.platform_data;
1025 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1026 if (tsif_device->state != tsif_state_stopped)
1027 return -EAGAIN;
1028 rc = tsif_dma_init(tsif_device);
1029 if (rc) {
1030 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1031 return rc;
1032 }
1033 tsif_device->state = tsif_state_running;
1034 /*
1035 * DMA should be scheduled prior to TSIF hardware initialization,
1036 * otherwise "bus error" will be reported by Data Mover
1037 */
1038 enable_irq(tsif_device->irq);
1039 tsif_clock(tsif_device, 1);
1040 tsif_dma_schedule(tsif_device);
1041 /*
1042 * init the device if required
1043 */
1044 if (pdata->init)
1045 pdata->init(pdata);
1046 rc = tsif_start_hw(tsif_device);
1047 if (rc) {
1048 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
1049 tsif_dma_exit(tsif_device);
1050 tsif_clock(tsif_device, 0);
1051 return rc;
1052 }
1053
1054 result = pm_runtime_get(&tsif_device->pdev->dev);
1055 if (result < 0) {
1056 dev_err(&tsif_device->pdev->dev,
1057 "Runtime PM: Unable to wake up the device, rc = %d\n",
1058 result);
1059 return result;
1060 }
1061
1062 wake_lock(&tsif_device->wake_lock);
1063 return rc;
1064}
1065
1066static int action_close(struct msm_tsif_device *tsif_device)
1067{
1068 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1069 (int)tsif_device->state);
1070 /*
1071 * DMA should be flushed/stopped prior to TSIF hardware stop,
1072 * otherwise "bus error" will be reported by Data Mover
1073 */
1074 tsif_stop_hw(tsif_device);
1075 tsif_dma_exit(tsif_device);
1076 tsif_clock(tsif_device, 0);
1077 disable_irq(tsif_device->irq);
1078
1079 pm_runtime_put(&tsif_device->pdev->dev);
1080 wake_unlock(&tsif_device->wake_lock);
1081 return 0;
1082}
1083
1084
1085static struct {
1086 int (*func)(struct msm_tsif_device *);
1087 const char *name;
1088} actions[] = {
1089 { action_open, "open"},
1090 { action_close, "close"},
1091};
1092
1093static ssize_t tsif_debugfs_action_write(struct file *filp,
1094 const char __user *userbuf,
1095 size_t count, loff_t *f_pos)
1096{
1097 int i;
1098 struct msm_tsif_device *tsif_device = filp->private_data;
1099 char s[40];
1100 int len = min(sizeof(s) - 1, count);
1101 if (copy_from_user(s, userbuf, len))
1102 return -EFAULT;
1103 s[len] = '\0';
1104 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1105 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1106 if (!strncmp(s, actions[i].name,
1107 min(count, strlen(actions[i].name)))) {
1108 int rc = actions[i].func(tsif_device);
1109 if (!rc)
1110 rc = count;
1111 return rc;
1112 }
1113 }
1114 return -EINVAL;
1115}
1116
1117static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1118{
1119 filp->private_data = inode->i_private;
1120 return 0;
1121}
1122
1123static const struct file_operations fops_debugfs_action = {
1124 .open = tsif_debugfs_generic_open,
1125 .write = tsif_debugfs_action_write,
1126};
1127
1128static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1129 size_t count, loff_t *f_pos)
1130{
1131 static char bufa[200];
1132 static char *buf = bufa;
1133 int sz = sizeof(bufa);
1134 struct msm_tsif_device *tsif_device = filp->private_data;
1135 int len = 0;
1136 if (tsif_device) {
1137 int i;
1138 len += snprintf(buf + len, sz - len,
1139 "ri %3d | wi %3d | dmwi %3d |",
1140 tsif_device->ri, tsif_device->wi,
1141 tsif_device->dmwi);
1142 for (i = 0; i < 2; i++) {
1143 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1144 if (xfer->busy) {
1145 u32 dst =
1146 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1147 u32 base = tsif_device->data_buffer_dma;
1148 int w = (dst - base) / TSIF_PKT_SIZE;
1149 len += snprintf(buf + len, sz - len,
1150 " [%3d]{%3d}",
1151 w, xfer->wi);
1152 } else {
1153 len += snprintf(buf + len, sz - len,
1154 " ---idle---");
1155 }
1156 }
1157 len += snprintf(buf + len, sz - len, "\n");
1158 } else {
1159 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1160 }
1161 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1162}
1163
1164static const struct file_operations fops_debugfs_dma = {
1165 .open = tsif_debugfs_generic_open,
1166 .read = tsif_debugfs_dma_read,
1167};
1168
1169static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1170 size_t count, loff_t *f_pos)
1171{
1172 static char bufa[300];
1173 static char *buf = bufa;
1174 int sz = sizeof(bufa);
1175 struct msm_tsif_device *tsif_device = filp->private_data;
1176 int len = 0;
1177 if (tsif_device) {
1178 struct msm_tsif_platform_data *pdata =
1179 tsif_device->pdev->dev.platform_data;
1180 int i;
1181 for (i = 0; i < pdata->num_gpios; i++) {
1182 if (pdata->gpios[i].gpio_cfg) {
1183 int x = !!gpio_get_value(GPIO_PIN(
1184 pdata->gpios[i].gpio_cfg));
1185 len += snprintf(buf + len, sz - len,
1186 "%15s: %d\n",
1187 pdata->gpios[i].label, x);
1188 }
1189 }
1190 } else {
1191 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1192 }
1193 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1194}
1195
1196static const struct file_operations fops_debugfs_gpios = {
1197 .open = tsif_debugfs_generic_open,
1198 .read = tsif_debugfs_gpios_read,
1199};
1200
1201
1202static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1203{
1204 tsif_device->dent_tsif = debugfs_create_dir(
1205 dev_name(&tsif_device->pdev->dev), NULL);
1206 if (tsif_device->dent_tsif) {
1207 int i;
1208 void __iomem *base = tsif_device->base;
1209 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1210 tsif_device->debugfs_tsif_regs[i] =
1211 debugfs_create_iomem_x32(
1212 debugfs_tsif_regs[i].name,
1213 debugfs_tsif_regs[i].mode,
1214 tsif_device->dent_tsif,
1215 base + debugfs_tsif_regs[i].offset);
1216 }
1217 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1218 S_IRUGO,
1219 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1220 tsif_device->debugfs_action = debugfs_create_file("action",
1221 S_IWUSR,
1222 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1223 tsif_device->debugfs_dma = debugfs_create_file("dma",
1224 S_IRUGO,
1225 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1226 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1227 S_IRUGO,
1228 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1229 }
1230}
1231
1232static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1233{
1234 if (tsif_device->dent_tsif) {
1235 int i;
1236 debugfs_remove_recursive(tsif_device->dent_tsif);
1237 tsif_device->dent_tsif = NULL;
1238 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1239 tsif_device->debugfs_tsif_regs[i] = NULL;
1240 tsif_device->debugfs_gpio = NULL;
1241 tsif_device->debugfs_action = NULL;
1242 tsif_device->debugfs_dma = NULL;
1243 tsif_device->debugfs_databuf = NULL;
1244 }
1245}
1246/* ===debugfs end=== */
1247
1248/* ===module begin=== */
1249static LIST_HEAD(tsif_devices);
1250
1251static struct msm_tsif_device *tsif_find_by_id(int id)
1252{
1253 struct msm_tsif_device *tsif_device;
1254 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1255 if (tsif_device->pdev->id == id)
1256 return tsif_device;
1257 }
1258 return NULL;
1259}
1260
1261static int __devinit msm_tsif_probe(struct platform_device *pdev)
1262{
1263 int rc = -ENODEV;
1264 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1265 struct msm_tsif_device *tsif_device;
1266 struct resource *res;
1267 /* check device validity */
1268 /* must have platform data */
1269 if (!plat) {
1270 dev_err(&pdev->dev, "Platform data not available\n");
1271 rc = -EINVAL;
1272 goto out;
1273 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001274
1275 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001276 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1277 rc = -EINVAL;
1278 goto out;
1279 }
1280 /* OK, we will use this device */
1281 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1282 if (!tsif_device) {
1283 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1284 rc = -ENOMEM;
1285 goto out;
1286 }
1287 /* cross links */
1288 tsif_device->pdev = pdev;
1289 platform_set_drvdata(pdev, tsif_device);
1290 tsif_device->mode = 1;
1291 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1292 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1293 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1294 (unsigned long)tsif_device);
1295 if (tsif_get_clocks(tsif_device))
1296 goto err_clocks;
1297/* map I/O memory */
1298 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1299 if (!tsif_device->memres) {
1300 dev_err(&pdev->dev, "Missing MEM resource\n");
1301 rc = -ENXIO;
1302 goto err_rgn;
1303 }
1304 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1305 if (!res) {
1306 dev_err(&pdev->dev, "Missing DMA resource\n");
1307 rc = -ENXIO;
1308 goto err_rgn;
1309 }
1310 tsif_device->dma = res->start;
1311 tsif_device->crci = res->end;
1312 tsif_device->base = ioremap(tsif_device->memres->start,
1313 resource_size(tsif_device->memres));
1314 if (!tsif_device->base) {
1315 dev_err(&pdev->dev, "ioremap failed\n");
1316 goto err_ioremap;
1317 }
1318 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1319 tsif_device->memres->start, tsif_device->base);
1320 rc = tsif_start_gpios(tsif_device);
1321 if (rc)
1322 goto err_gpio;
1323
1324 pm_runtime_set_active(&pdev->dev);
1325 pm_runtime_enable(&pdev->dev);
1326
1327 tsif_debugfs_init(tsif_device);
1328 rc = platform_get_irq(pdev, 0);
1329 if (rc > 0) {
1330 tsif_device->irq = rc;
1331 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1332 dev_name(&pdev->dev), tsif_device);
1333 disable_irq(tsif_device->irq);
1334 }
1335 if (rc) {
1336 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1337 tsif_device->irq, rc);
1338 goto err_irq;
1339 }
1340 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1341 if (rc) {
1342 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1343 goto err_attrs;
1344 }
1345 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1346 dev_name(&pdev->dev));
1347 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1348 tsif_device->irq, tsif_device->memres->start,
1349 tsif_device->dma, tsif_device->crci);
1350 list_add(&tsif_device->devlist, &tsif_devices);
1351 return 0;
1352/* error path */
1353 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1354err_attrs:
1355 free_irq(tsif_device->irq, tsif_device);
1356err_irq:
1357 tsif_debugfs_exit(tsif_device);
1358 tsif_stop_gpios(tsif_device);
1359err_gpio:
1360 iounmap(tsif_device->base);
1361err_ioremap:
1362err_rgn:
1363 tsif_put_clocks(tsif_device);
1364err_clocks:
1365 kfree(tsif_device);
1366out:
1367 return rc;
1368}
1369
1370static int __devexit msm_tsif_remove(struct platform_device *pdev)
1371{
1372 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1373 dev_info(&pdev->dev, "Unload\n");
1374 list_del(&tsif_device->devlist);
1375 wake_lock_destroy(&tsif_device->wake_lock);
1376 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1377 free_irq(tsif_device->irq, tsif_device);
1378 tsif_debugfs_exit(tsif_device);
1379 tsif_dma_exit(tsif_device);
1380 tsif_stop_gpios(tsif_device);
1381 iounmap(tsif_device->base);
1382 tsif_put_clocks(tsif_device);
1383
1384 pm_runtime_put(&pdev->dev);
1385 pm_runtime_disable(&pdev->dev);
1386 kfree(tsif_device);
1387 return 0;
1388}
1389
1390static int tsif_runtime_suspend(struct device *dev)
1391{
1392 dev_dbg(dev, "pm_runtime: suspending...\n");
1393 return 0;
1394}
1395
1396static int tsif_runtime_resume(struct device *dev)
1397{
1398 dev_dbg(dev, "pm_runtime: resuming...\n");
1399 return 0;
1400}
1401
1402static const struct dev_pm_ops tsif_dev_pm_ops = {
1403 .runtime_suspend = tsif_runtime_suspend,
1404 .runtime_resume = tsif_runtime_resume,
1405};
1406
1407
1408static struct platform_driver msm_tsif_driver = {
1409 .probe = msm_tsif_probe,
1410 .remove = __exit_p(msm_tsif_remove),
1411 .driver = {
1412 .name = "msm_tsif",
1413 .pm = &tsif_dev_pm_ops,
1414 },
1415};
1416
1417static int __init mod_init(void)
1418{
1419 int rc = platform_driver_register(&msm_tsif_driver);
1420 if (rc)
1421 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1422 return rc;
1423}
1424
1425static void __exit mod_exit(void)
1426{
1427 platform_driver_unregister(&msm_tsif_driver);
1428}
1429/* ===module end=== */
1430
1431/* public API */
1432
Joel Nider5578bdb2011-08-12 09:37:11 +03001433int tsif_get_active(void)
1434{
1435 struct msm_tsif_device *tsif_device;
1436 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1437 return tsif_device->pdev->id;
1438 }
1439 return -ENODEV;
1440}
1441EXPORT_SYMBOL(tsif_get_active);
1442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001443void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1444{
1445 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001446 if (!tsif_device)
1447 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448 if (tsif_device->client_notify || tsif_device->client_data)
1449 return ERR_PTR(-EBUSY);
1450 tsif_device->client_notify = notify;
1451 tsif_device->client_data = data;
1452 /* prevent from unloading */
1453 get_device(&tsif_device->pdev->dev);
1454 return tsif_device;
1455}
1456EXPORT_SYMBOL(tsif_attach);
1457
1458void tsif_detach(void *cookie)
1459{
1460 struct msm_tsif_device *tsif_device = cookie;
1461 tsif_device->client_notify = NULL;
1462 tsif_device->client_data = NULL;
1463 put_device(&tsif_device->pdev->dev);
1464}
1465EXPORT_SYMBOL(tsif_detach);
1466
1467void tsif_get_info(void *cookie, void **pdata, int *psize)
1468{
1469 struct msm_tsif_device *tsif_device = cookie;
1470 if (pdata)
1471 *pdata = tsif_device->data_buffer;
1472 if (psize)
1473 *psize = TSIF_PKTS_IN_BUF;
1474}
1475EXPORT_SYMBOL(tsif_get_info);
1476
1477int tsif_set_mode(void *cookie, int mode)
1478{
1479 struct msm_tsif_device *tsif_device = cookie;
1480 if (tsif_device->state != tsif_state_stopped) {
1481 dev_err(&tsif_device->pdev->dev,
1482 "Can't change mode while device is active\n");
1483 return -EBUSY;
1484 }
1485 switch (mode) {
1486 case 1:
1487 case 2:
1488 case 3:
1489 tsif_device->mode = mode;
1490 break;
1491 default:
1492 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1493 return -EINVAL;
1494 }
1495 return 0;
1496}
1497EXPORT_SYMBOL(tsif_set_mode);
1498
1499int tsif_set_time_limit(void *cookie, u32 value)
1500{
1501 struct msm_tsif_device *tsif_device = cookie;
1502 if (tsif_device->state != tsif_state_stopped) {
1503 dev_err(&tsif_device->pdev->dev,
1504 "Can't change time limit while device is active\n");
1505 return -EBUSY;
1506 }
1507 if (value != (value & 0xFFFFFF)) {
1508 dev_err(&tsif_device->pdev->dev,
1509 "Invalid time limit (should be 24 bit): %#x\n", value);
1510 return -EINVAL;
1511 }
1512 tsif_device->time_limit = value;
1513 return 0;
1514}
1515EXPORT_SYMBOL(tsif_set_time_limit);
1516
1517int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1518{
1519 struct msm_tsif_device *tsif_device = cookie;
1520 if (tsif_device->data_buffer) {
1521 dev_err(&tsif_device->pdev->dev,
1522 "Data buffer already allocated: %p\n",
1523 tsif_device->data_buffer);
1524 return -EBUSY;
1525 }
1526 /* check for crazy user */
1527 if (pkts_in_chunk * chunks_in_buf > 10240) {
1528 dev_err(&tsif_device->pdev->dev,
1529 "Buffer requested is too large: %d * %d\n",
1530 pkts_in_chunk,
1531 chunks_in_buf);
1532 return -EINVAL;
1533 }
1534 /* parameters are OK, execute */
1535 tsif_device->pkts_per_chunk = pkts_in_chunk;
1536 tsif_device->chunks_per_buf = chunks_in_buf;
1537 return 0;
1538}
1539EXPORT_SYMBOL(tsif_set_buf_config);
1540
1541void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1542{
1543 struct msm_tsif_device *tsif_device = cookie;
1544 if (ri)
1545 *ri = tsif_device->ri;
1546 if (wi)
1547 *wi = tsif_device->wi;
1548 if (state)
1549 *state = tsif_device->state;
1550}
1551EXPORT_SYMBOL(tsif_get_state);
1552
1553int tsif_start(void *cookie)
1554{
1555 struct msm_tsif_device *tsif_device = cookie;
1556 return action_open(tsif_device);
1557}
1558EXPORT_SYMBOL(tsif_start);
1559
1560void tsif_stop(void *cookie)
1561{
1562 struct msm_tsif_device *tsif_device = cookie;
1563 action_close(tsif_device);
1564}
1565EXPORT_SYMBOL(tsif_stop);
1566
1567void tsif_reclaim_packets(void *cookie, int read_index)
1568{
1569 struct msm_tsif_device *tsif_device = cookie;
1570 tsif_device->ri = read_index;
1571}
1572EXPORT_SYMBOL(tsif_reclaim_packets);
1573
1574module_init(mod_init);
1575module_exit(mod_exit);
1576
1577MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1578 " Driver for the MSM chipset");
1579MODULE_LICENSE("GPL v2");
1580