blob: 53d4ef2e0d801347fbd17d48ebba3807f4142f7e [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
34
35#include <mach/gpio.h>
36#include <mach/dma.h>
37#include <mach/msm_tsif.h>
38
39/*
40 * TSIF register offsets
41 */
42#define TSIF_STS_CTL_OFF (0x0)
43#define TSIF_TIME_LIMIT_OFF (0x4)
44#define TSIF_CLK_REF_OFF (0x8)
45#define TSIF_LPBK_FLAGS_OFF (0xc)
46#define TSIF_LPBK_DATA_OFF (0x10)
47#define TSIF_TEST_CTL_OFF (0x14)
48#define TSIF_TEST_MODE_OFF (0x18)
49#define TSIF_TEST_RESET_OFF (0x1c)
50#define TSIF_TEST_EXPORT_OFF (0x20)
51#define TSIF_TEST_CURRENT_OFF (0x24)
52
53#define TSIF_DATA_PORT_OFF (0x100)
54
55/* bits for TSIF_STS_CTL register */
56#define TSIF_STS_CTL_EN_IRQ (1 << 28)
57#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
58#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
59#define TSIF_STS_CTL_OVERFLOW (1 << 25)
60#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
61#define TSIF_STS_CTL_TIMEOUT (1 << 23)
62#define TSIF_STS_CTL_INV_SYNC (1 << 21)
63#define TSIF_STS_CTL_INV_NULL (1 << 20)
64#define TSIF_STS_CTL_INV_ERROR (1 << 19)
65#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
66#define TSIF_STS_CTL_INV_DATA (1 << 17)
67#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
68#define TSIF_STS_CTL_SPARE (1 << 15)
69#define TSIF_STS_CTL_EN_NULL (1 << 11)
70#define TSIF_STS_CTL_EN_ERROR (1 << 10)
71#define TSIF_STS_CTL_LAST_BIT (1 << 9)
72#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
73#define TSIF_STS_CTL_EN_TCR (1 << 7)
74#define TSIF_STS_CTL_TEST_MODE (3 << 5)
75#define TSIF_STS_CTL_EN_DM (1 << 4)
76#define TSIF_STS_CTL_STOP (1 << 3)
77#define TSIF_STS_CTL_START (1 << 0)
78
79/*
80 * Data buffering parameters
81 *
82 * Data stored in cyclic buffer;
83 *
84 * Data organized in chunks of packets.
85 * One chunk processed at a time by the data mover
86 *
87 */
88#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
89#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
90#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
91#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
92#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
93#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
94
95#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
96#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
97#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
98#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
99#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
100
101/* used to create debugfs entries */
102static const struct {
103 const char *name;
104 mode_t mode;
105 int offset;
106} debugfs_tsif_regs[] = {
107 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
108 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
109 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
110 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
111 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
112 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
113 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
114 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
115 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
116 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
117 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
118};
119
120/* structures for Data Mover */
121struct tsif_dmov_cmd {
122 dmov_box box;
123 dma_addr_t box_ptr;
124};
125
126struct msm_tsif_device;
127
128struct tsif_xfer {
129 struct msm_dmov_cmd hdr;
130 struct msm_tsif_device *tsif_device;
131 int busy;
132 int wi; /**< set devices's write index after xfer */
133};
134
135struct msm_tsif_device {
136 struct list_head devlist;
137 struct platform_device *pdev;
138 struct resource *memres;
139 void __iomem *base;
140 unsigned int irq;
141 int mode;
142 u32 time_limit;
143 enum tsif_state state;
144 struct wake_lock wake_lock;
145 /* clocks */
146 struct clk *tsif_clk;
147 struct clk *tsif_pclk;
148 struct clk *tsif_ref_clk;
149 /* debugfs */
150 struct dentry *dent_tsif;
151 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
152 struct dentry *debugfs_gpio;
153 struct dentry *debugfs_action;
154 struct dentry *debugfs_dma;
155 struct dentry *debugfs_databuf;
156 struct debugfs_blob_wrapper blob_wrapper_databuf;
157 /* DMA related */
158 int dma;
159 int crci;
160 void *data_buffer;
161 dma_addr_t data_buffer_dma;
162 u32 pkts_per_chunk;
163 u32 chunks_per_buf;
164 int ri;
165 int wi;
166 int dmwi; /**< DataMover write index */
167 struct tsif_dmov_cmd *dmov_cmd[2];
168 dma_addr_t dmov_cmd_dma[2];
169 struct tsif_xfer xfer[2];
170 struct tasklet_struct dma_refill;
171 /* statistics */
172 u32 stat_rx;
173 u32 stat_overflow;
174 u32 stat_lost_sync;
175 u32 stat_timeout;
176 u32 stat_dmov_err;
177 u32 stat_soft_drop;
178 int stat_ifi; /* inter frame interval */
179 u32 stat0, stat1;
180 /* client */
181 void *client_data;
182 void (*client_notify)(void *client_data);
183};
184
185/* ===clocks begin=== */
186
187static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
188{
189 if (tsif_device->tsif_clk) {
190 clk_put(tsif_device->tsif_clk);
191 tsif_device->tsif_clk = NULL;
192 }
193 if (tsif_device->tsif_pclk) {
194 clk_put(tsif_device->tsif_pclk);
195 tsif_device->tsif_pclk = NULL;
196 }
197
198 if (tsif_device->tsif_ref_clk) {
199 clk_put(tsif_device->tsif_ref_clk);
200 tsif_device->tsif_ref_clk = NULL;
201 }
202}
203
204static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
205{
206 struct msm_tsif_platform_data *pdata =
207 tsif_device->pdev->dev.platform_data;
208 int rc = 0;
209
210 if (pdata->tsif_clk) {
211 tsif_device->tsif_clk = clk_get(NULL, pdata->tsif_clk);
212 if (IS_ERR(tsif_device->tsif_clk)) {
213 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
214 pdata->tsif_clk);
215 rc = PTR_ERR(tsif_device->tsif_clk);
216 tsif_device->tsif_clk = NULL;
217 goto ret;
218 }
219 }
220 if (pdata->tsif_pclk) {
221 tsif_device->tsif_pclk = clk_get(NULL, pdata->tsif_pclk);
222 if (IS_ERR(tsif_device->tsif_pclk)) {
223 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
224 pdata->tsif_pclk);
225 rc = PTR_ERR(tsif_device->tsif_pclk);
226 tsif_device->tsif_pclk = NULL;
227 goto ret;
228 }
229 }
230 if (pdata->tsif_ref_clk) {
231 tsif_device->tsif_ref_clk = clk_get(NULL, pdata->tsif_ref_clk);
232 if (IS_ERR(tsif_device->tsif_ref_clk)) {
233 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
234 pdata->tsif_ref_clk);
235 rc = PTR_ERR(tsif_device->tsif_ref_clk);
236 tsif_device->tsif_ref_clk = NULL;
237 goto ret;
238 }
239 }
240 return 0;
241ret:
242 tsif_put_clocks(tsif_device);
243 return rc;
244}
245
246static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
247{
248 if (on) {
249 if (tsif_device->tsif_clk)
250 clk_enable(tsif_device->tsif_clk);
251 if (tsif_device->tsif_pclk)
252 clk_enable(tsif_device->tsif_pclk);
253 clk_enable(tsif_device->tsif_ref_clk);
254 } else {
255 if (tsif_device->tsif_clk)
256 clk_disable(tsif_device->tsif_clk);
257 if (tsif_device->tsif_pclk)
258 clk_disable(tsif_device->tsif_pclk);
259 clk_disable(tsif_device->tsif_ref_clk);
260 }
261}
262/* ===clocks end=== */
263/* ===gpio begin=== */
264
265static void tsif_gpios_free(const struct msm_gpio *table, int size)
266{
267 int i;
268 const struct msm_gpio *g;
269 for (i = size-1; i >= 0; i--) {
270 g = table + i;
271 gpio_free(GPIO_PIN(g->gpio_cfg));
272 }
273}
274
275static int tsif_gpios_request(const struct msm_gpio *table, int size)
276{
277 int rc;
278 int i;
279 const struct msm_gpio *g;
280 for (i = 0; i < size; i++) {
281 g = table + i;
282 rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
283 if (rc) {
284 pr_err("gpio_request(%d) <%s> failed: %d\n",
285 GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
286 goto err;
287 }
288 }
289 return 0;
290err:
291 tsif_gpios_free(table, i);
292 return rc;
293}
294
295static int tsif_gpios_disable(const struct msm_gpio *table, int size)
296{
297 int rc = 0;
298 int i;
299 const struct msm_gpio *g;
300 for (i = size-1; i >= 0; i--) {
301 int tmp;
302 g = table + i;
303 tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
304 if (tmp) {
305 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
306 " <%s> failed: %d\n",
307 g->gpio_cfg, g->label ?: "?", rc);
308 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
309 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
310 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
311 GPIO_DRVSTR(g->gpio_cfg));
312 if (!rc)
313 rc = tmp;
314 }
315 }
316
317 return rc;
318}
319
320static int tsif_gpios_enable(const struct msm_gpio *table, int size)
321{
322 int rc;
323 int i;
324 const struct msm_gpio *g;
325 for (i = 0; i < size; i++) {
326 g = table + i;
327 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
328 if (rc) {
329 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
330 " <%s> failed: %d\n",
331 g->gpio_cfg, g->label ?: "?", rc);
332 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
333 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
334 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
335 GPIO_DRVSTR(g->gpio_cfg));
336 goto err;
337 }
338 }
339 return 0;
340err:
341 tsif_gpios_disable(table, i);
342 return rc;
343}
344
345static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
346{
347 int rc = tsif_gpios_request(table, size);
348 if (rc)
349 return rc;
350 rc = tsif_gpios_enable(table, size);
351 if (rc)
352 tsif_gpios_free(table, size);
353 return rc;
354}
355
356static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
357{
358 tsif_gpios_disable(table, size);
359 tsif_gpios_free(table, size);
360}
361
362static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
363{
364 struct msm_tsif_platform_data *pdata =
365 tsif_device->pdev->dev.platform_data;
366 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
367}
368
369static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
370{
371 struct msm_tsif_platform_data *pdata =
372 tsif_device->pdev->dev.platform_data;
373 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
374}
375
376/* ===gpio end=== */
377
378static int tsif_start_hw(struct msm_tsif_device *tsif_device)
379{
380 u32 ctl = TSIF_STS_CTL_EN_IRQ |
381 TSIF_STS_CTL_EN_TIME_LIM |
382 TSIF_STS_CTL_EN_TCR |
383 TSIF_STS_CTL_EN_DM;
384 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
385 switch (tsif_device->mode) {
386 case 1: /* mode 1 */
387 ctl |= (0 << 5);
388 break;
389 case 2: /* mode 2 */
390 ctl |= (1 << 5);
391 break;
392 case 3: /* manual - control from debugfs */
393 return 0;
394 break;
395 default:
396 return -EINVAL;
397 }
398 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
399 iowrite32(tsif_device->time_limit,
400 tsif_device->base + TSIF_TIME_LIMIT_OFF);
401 wmb();
402 iowrite32(ctl | TSIF_STS_CTL_START,
403 tsif_device->base + TSIF_STS_CTL_OFF);
404 wmb();
405 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
406 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
407}
408
409static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
410{
411 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
412 wmb();
413}
414
415/* ===DMA begin=== */
416/**
417 * TSIF DMA theory of operation
418 *
419 * Circular memory buffer \a tsif_mem_buffer allocated;
420 * 4 pointers points to and moved forward on:
421 * - \a ri index of first ready to read packet.
422 * Updated by client's call to tsif_reclaim_packets()
423 * - \a wi points to the next packet to be written by DM.
424 * Data below is valid and will not be overriden by DMA.
425 * Moved on DM callback
426 * - \a dmwi points to the next packet not scheduled yet for DM
427 * moved when packet scheduled for DM
428 *
429 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
430 * at time immediately after scheduling.
431 *
432 * Initially, 2 packets get scheduled for the DM.
433 *
434 * Upon packet receive, DM writes packet to the pre-programmed
435 * location and invoke its callback.
436 *
437 * DM callback moves sets wi pointer to \a xfer->wi;
438 * then it schedules next packet for DM and moves \a dmwi pointer.
439 *
440 * Buffer overflow handling
441 *
442 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
443 * DMA re-scheduled to the same index.
444 * Callback check and not move \a wi to become equal to \a ri
445 *
446 * On \a read request, data between \a ri and \a wi pointers may be read;
447 * \ri pointer moved accordingly.
448 *
449 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
450 * \a wi is between [\a ri, \a dmwi]
451 *
452 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
453 *
454 * Number of scheduled packets for DM: (dmwi-wi)
455 */
456
457/**
458 * tsif_dma_schedule - schedule DMA transfers
459 *
460 * @tsif_device: device
461 *
462 * Executed from process context on init, or from tasklet when
463 * re-scheduling upon DMA completion.
464 * This prevent concurrent execution from several CPU's
465 */
466static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
467{
468 int i, dmwi0, dmwi1, found = 0;
469 /* find free entry */
470 for (i = 0; i < 2; i++) {
471 struct tsif_xfer *xfer = &tsif_device->xfer[i];
472 if (xfer->busy)
473 continue;
474 found++;
475 xfer->busy = 1;
476 dmwi0 = tsif_device->dmwi;
477 tsif_device->dmov_cmd[i]->box.dst_row_addr =
478 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
479 /* proposed value for dmwi */
480 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
481 /**
482 * If dmwi going to overlap with ri,
483 * overflow occurs because data was not read.
484 * Still get this packet, to not interrupt TSIF
485 * hardware, but do not advance dmwi.
486 *
487 * Upon receive, packet will be dropped.
488 */
489 if (dmwi1 != tsif_device->ri) {
490 tsif_device->dmwi = dmwi1;
491 } else {
492 dev_info(&tsif_device->pdev->dev,
493 "Overflow detected\n");
494 }
495 xfer->wi = tsif_device->dmwi;
496#ifdef CONFIG_TSIF_DEBUG
497 dev_info(&tsif_device->pdev->dev,
498 "schedule xfer[%d] -> [%2d]{%2d}\n",
499 i, dmwi0, xfer->wi);
500#endif
501 /* complete all the writes to box */
502 dma_coherent_pre_ops();
503 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
504 }
505 if (!found)
506 dev_info(&tsif_device->pdev->dev,
507 "All xfer entries are busy\n");
508}
509
510/**
511 * tsif_dmov_complete_func - DataMover completion callback
512 *
513 * @cmd: original DM command
514 * @result: DM result
515 * @err: optional error buffer
516 *
517 * Executed in IRQ context (Data Mover's IRQ)
518 * DataMover's spinlock @msm_dmov_lock held.
519 */
520static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
521 unsigned int result,
522 struct msm_dmov_errdata *err)
523{
524 int i;
525 u32 data_offset;
526 struct tsif_xfer *xfer;
527 struct msm_tsif_device *tsif_device;
528 int reschedule = 0;
529 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
530 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
531 return;
532 }
533 /* restore original context */
534 xfer = container_of(cmd, struct tsif_xfer, hdr);
535 tsif_device = xfer->tsif_device;
536 i = xfer - tsif_device->xfer;
537 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
538 tsif_device->data_buffer_dma;
539
540 /* order reads from the xferred buffer */
541 dma_coherent_post_ops();
542 if (result & DMOV_RSLT_DONE) {
543 int w = data_offset / TSIF_PKT_SIZE;
544 tsif_device->stat_rx++;
545 /*
546 * sowtware overflow when I was scheduled?
547 *
548 * @w is where this xfer was actually written to;
549 * @xfer->wi is where device's @wi will be set;
550 *
551 * if these 2 are equal, we are short in space and
552 * going to overwrite this xfer - this is "soft drop"
553 */
554 if (w == xfer->wi)
555 tsif_device->stat_soft_drop++;
556 reschedule = (tsif_device->state == tsif_state_running);
557#ifdef CONFIG_TSIF_DEBUG
558 /* IFI calculation */
559 /*
560 * update stat_ifi (inter frame interval)
561 *
562 * Calculate time difference between last and 1-st
563 * packets in chunk
564 *
565 * To be removed after tuning
566 */
567 if (TSIF_PKTS_IN_CHUNK > 1) {
568 void *ptr = tsif_device->data_buffer + data_offset;
569 u32 *p0 = ptr;
570 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
571 TSIF_PKT_SIZE;
572 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
573 tsif_pkt_status(p0));
574 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
575 tsif_pkt_status(p1));
576 tsif_device->stat_ifi = (tts1 - tts0) /
577 (TSIF_PKTS_IN_CHUNK - 1);
578 }
579#endif
580 } else {
581 /**
582 * Error or flush
583 *
584 * To recover - re-open TSIF device.
585 */
586 /* mark status "not valid" in data buffer */
587 int n;
588 void *ptr = tsif_device->data_buffer + data_offset;
589 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
590 u32 *p = ptr + (n * TSIF_PKT_SIZE);
591 /* last dword is status + TTS */
592 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
593 }
594 if (result & DMOV_RSLT_ERROR) {
595 dev_err(&tsif_device->pdev->dev,
596 "DMA error (0x%08x)\n", result);
597 tsif_device->stat_dmov_err++;
598 /* force device close */
599 if (tsif_device->state == tsif_state_running) {
600 tsif_stop_hw(tsif_device);
601 /*
602 * Clocks _may_ be stopped right from IRQ
603 * context. This is far from optimal w.r.t
604 * latency.
605 *
606 * But, this branch taken only in case of
607 * severe hardware problem (I don't even know
608 * what should happens for DMOV_RSLT_ERROR);
609 * thus I prefer code simplicity over
610 * performance.
611 */
612 tsif_clock(tsif_device, 0);
613 tsif_device->state = tsif_state_flushing;
614 }
615 }
616 if (result & DMOV_RSLT_FLUSH) {
617 /*
618 * Flushing normally happens in process of
619 * @tsif_stop(), when we are waiting for outstanding
620 * DMA commands to be flushed.
621 */
622 dev_info(&tsif_device->pdev->dev,
623 "DMA channel flushed (0x%08x)\n", result);
624 if (tsif_device->state == tsif_state_flushing) {
625 if ((!tsif_device->xfer[0].busy) &&
626 (!tsif_device->xfer[1].busy)) {
627 tsif_device->state = tsif_state_stopped;
628 }
629 }
630 }
631 if (err)
632 dev_err(&tsif_device->pdev->dev,
633 "Flush data: %08x %08x %08x %08x %08x %08x\n",
634 err->flush[0], err->flush[1], err->flush[2],
635 err->flush[3], err->flush[4], err->flush[5]);
636 }
637 tsif_device->wi = xfer->wi;
638 xfer->busy = 0;
639 if (tsif_device->client_notify)
640 tsif_device->client_notify(tsif_device->client_data);
641 /*
642 * Can't schedule next DMA -
643 * DataMover driver still hold its semaphore,
644 * deadlock will occur.
645 */
646 if (reschedule)
647 tasklet_schedule(&tsif_device->dma_refill);
648}
649
650/**
651 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
652 *
653 * @data: tsif_device
654 *
655 * Reschedule DMA requests
656 *
657 * Executed in tasklet
658 */
659static void tsif_dma_refill(unsigned long data)
660{
661 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
662 if (tsif_device->state == tsif_state_running)
663 tsif_dma_schedule(tsif_device);
664}
665
666/**
667 * tsif_dma_flush - flush DMA channel
668 *
669 * @tsif_device:
670 *
671 * busy wait till DMA flushed
672 */
673static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
674{
675 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
676 tsif_device->state = tsif_state_flushing;
677 while (tsif_device->xfer[0].busy ||
678 tsif_device->xfer[1].busy) {
679 msm_dmov_flush(tsif_device->dma);
680 msleep(10);
681 }
682 }
683 tsif_device->state = tsif_state_stopped;
684 if (tsif_device->client_notify)
685 tsif_device->client_notify(tsif_device->client_data);
686}
687
688static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
689{
690 int i;
691 tsif_device->state = tsif_state_flushing;
692 tasklet_kill(&tsif_device->dma_refill);
693 tsif_dma_flush(tsif_device);
694 for (i = 0; i < 2; i++) {
695 if (tsif_device->dmov_cmd[i]) {
696 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
697 tsif_device->dmov_cmd[i],
698 tsif_device->dmov_cmd_dma[i]);
699 tsif_device->dmov_cmd[i] = NULL;
700 }
701 }
702 if (tsif_device->data_buffer) {
703 tsif_device->blob_wrapper_databuf.data = NULL;
704 tsif_device->blob_wrapper_databuf.size = 0;
705 dma_free_coherent(NULL, TSIF_BUF_SIZE,
706 tsif_device->data_buffer,
707 tsif_device->data_buffer_dma);
708 tsif_device->data_buffer = NULL;
709 }
710}
711
712static int tsif_dma_init(struct msm_tsif_device *tsif_device)
713{
714 int i;
715 /* TODO: allocate all DMA memory in one buffer */
716 /* Note: don't pass device,
717 it require coherent_dma_mask id device definition */
718 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
719 &tsif_device->data_buffer_dma, GFP_KERNEL);
720 if (!tsif_device->data_buffer)
721 goto err;
722 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
723 tsif_device->data_buffer, tsif_device->data_buffer_dma);
724 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
725 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
726 tsif_device->ri = 0;
727 tsif_device->wi = 0;
728 tsif_device->dmwi = 0;
729 for (i = 0; i < 2; i++) {
730 dmov_box *box;
731 struct msm_dmov_cmd *hdr;
732 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
733 sizeof(struct tsif_dmov_cmd),
734 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
735 if (!tsif_device->dmov_cmd[i])
736 goto err;
737 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
738 i, tsif_device->dmov_cmd[i],
739 tsif_device->dmov_cmd_dma[i]);
740 /* dst in 16 LSB, src in 16 MSB */
741 box = &(tsif_device->dmov_cmd[i]->box);
742 box->cmd = CMD_MODE_BOX | CMD_LC |
743 CMD_SRC_CRCI(tsif_device->crci);
744 box->src_row_addr =
745 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
746 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
747 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
748 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
749
750 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
751 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
752 offsetof(struct tsif_dmov_cmd, box));
753 tsif_device->xfer[i].tsif_device = tsif_device;
754 hdr = &tsif_device->xfer[i].hdr;
755 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
756 offsetof(struct tsif_dmov_cmd, box_ptr));
757 hdr->complete_func = tsif_dmov_complete_func;
758 }
759 msm_dmov_flush(tsif_device->dma);
760 return 0;
761err:
762 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
763 tsif_dma_exit(tsif_device);
764 return -ENOMEM;
765}
766
767/* ===DMA end=== */
768
769/* ===IRQ begin=== */
770
771static irqreturn_t tsif_irq(int irq, void *dev_id)
772{
773 struct msm_tsif_device *tsif_device = dev_id;
774 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
775 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
776 TSIF_STS_CTL_OVERFLOW |
777 TSIF_STS_CTL_LOST_SYNC |
778 TSIF_STS_CTL_TIMEOUT))) {
779 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
780 return IRQ_NONE;
781 }
782 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
783 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
784 tsif_device->stat_rx++;
785 }
786 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
787 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
788 tsif_device->stat_overflow++;
789 }
790 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
791 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
792 tsif_device->stat_lost_sync++;
793 }
794 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
795 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
796 tsif_device->stat_timeout++;
797 }
798 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
799 wmb();
800 return IRQ_HANDLED;
801}
802
803/* ===IRQ end=== */
804
805/* ===Device attributes begin=== */
806
807static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
808 char *buf)
809{
810 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
811 char *state_string;
812 switch (tsif_device->state) {
813 case tsif_state_stopped:
814 state_string = "stopped";
815 break;
816 case tsif_state_running:
817 state_string = "running";
818 break;
819 case tsif_state_flushing:
820 state_string = "flushing";
821 break;
822 default:
823 state_string = "???";
824 }
825 return snprintf(buf, PAGE_SIZE,
826 "Device %s\n"
827 "Mode = %d\n"
828 "Time limit = %d\n"
829 "State %s\n"
830 "Client = %p\n"
831 "Pkt/Buf = %d\n"
832 "Pkt/chunk = %d\n"
833 "--statistics--\n"
834 "Rx chunks = %d\n"
835 "Overflow = %d\n"
836 "Lost sync = %d\n"
837 "Timeout = %d\n"
838 "DMA error = %d\n"
839 "Soft drop = %d\n"
840 "IFI = %d\n"
841 "(0x%08x - 0x%08x) / %d\n"
842 "--debug--\n"
843 "GLBL_CLK_ENA = 0x%08x\n"
844 "ROW_RESET = 0x%08x\n"
845 "CLK_HALT_STATEB = 0x%08x\n"
846 "TV_NS_REG = 0x%08x\n"
847 "TSIF_NS_REG = 0x%08x\n",
848 dev_name(dev),
849 tsif_device->mode,
850 tsif_device->time_limit,
851 state_string,
852 tsif_device->client_data,
853 TSIF_PKTS_IN_BUF,
854 TSIF_PKTS_IN_CHUNK,
855 tsif_device->stat_rx,
856 tsif_device->stat_overflow,
857 tsif_device->stat_lost_sync,
858 tsif_device->stat_timeout,
859 tsif_device->stat_dmov_err,
860 tsif_device->stat_soft_drop,
861 tsif_device->stat_ifi,
862 tsif_device->stat1,
863 tsif_device->stat0,
864 TSIF_PKTS_IN_CHUNK - 1,
865 ioread32(GLBL_CLK_ENA),
866 ioread32(ROW_RESET),
867 ioread32(CLK_HALT_STATEB),
868 ioread32(TV_NS_REG),
869 ioread32(TSIF_NS_REG)
870 );
871}
872/**
873 * set_stats - reset statistics on write
874 *
875 * @dev:
876 * @attr:
877 * @buf:
878 * @count:
879 */
880static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
881 const char *buf, size_t count)
882{
883 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
884 tsif_device->stat_rx = 0;
885 tsif_device->stat_overflow = 0;
886 tsif_device->stat_lost_sync = 0;
887 tsif_device->stat_timeout = 0;
888 tsif_device->stat_dmov_err = 0;
889 tsif_device->stat_soft_drop = 0;
890 tsif_device->stat_ifi = 0;
891 return count;
892}
893static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
894
895static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
896 char *buf)
897{
898 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
899 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
900}
901
902static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
903 const char *buf, size_t count)
904{
905 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
906 int value;
907 int rc;
908 if (1 != sscanf(buf, "%d", &value)) {
909 dev_err(&tsif_device->pdev->dev,
910 "Failed to parse integer: <%s>\n", buf);
911 return -EINVAL;
912 }
913 rc = tsif_set_mode(tsif_device, value);
914 if (!rc)
915 rc = count;
916 return rc;
917}
918static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
919
920static ssize_t show_time_limit(struct device *dev,
921 struct device_attribute *attr,
922 char *buf)
923{
924 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
925 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
926}
927
928static ssize_t set_time_limit(struct device *dev,
929 struct device_attribute *attr,
930 const char *buf, size_t count)
931{
932 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
933 int value;
934 int rc;
935 if (1 != sscanf(buf, "%d", &value)) {
936 dev_err(&tsif_device->pdev->dev,
937 "Failed to parse integer: <%s>\n", buf);
938 return -EINVAL;
939 }
940 rc = tsif_set_time_limit(tsif_device, value);
941 if (!rc)
942 rc = count;
943 return rc;
944}
945static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
946 show_time_limit, set_time_limit);
947
948static ssize_t show_buf_config(struct device *dev,
949 struct device_attribute *attr,
950 char *buf)
951{
952 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
953 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
954 tsif_device->pkts_per_chunk,
955 tsif_device->chunks_per_buf);
956}
957
958static ssize_t set_buf_config(struct device *dev,
959 struct device_attribute *attr,
960 const char *buf, size_t count)
961{
962 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
963 u32 p, c;
964 int rc;
965 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
966 dev_err(&tsif_device->pdev->dev,
967 "Failed to parse integer: <%s>\n", buf);
968 return -EINVAL;
969 }
970 rc = tsif_set_buf_config(tsif_device, p, c);
971 if (!rc)
972 rc = count;
973 return rc;
974}
975static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
976 show_buf_config, set_buf_config);
977
978static struct attribute *dev_attrs[] = {
979 &dev_attr_stats.attr,
980 &dev_attr_mode.attr,
981 &dev_attr_time_limit.attr,
982 &dev_attr_buf_config.attr,
983 NULL,
984};
985static struct attribute_group dev_attr_grp = {
986 .attrs = dev_attrs,
987};
988/* ===Device attributes end=== */
989
990/* ===debugfs begin=== */
991
992static int debugfs_iomem_x32_set(void *data, u64 val)
993{
994 iowrite32(val, data);
995 wmb();
996 return 0;
997}
998
999static int debugfs_iomem_x32_get(void *data, u64 *val)
1000{
1001 *val = ioread32(data);
1002 return 0;
1003}
1004
1005DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1006 debugfs_iomem_x32_set, "0x%08llx\n");
1007
1008struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1009 struct dentry *parent, u32 *value)
1010{
1011 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1012}
1013
1014static int action_open(struct msm_tsif_device *tsif_device)
1015{
1016 int rc = -EINVAL;
1017 int result;
1018
1019 struct msm_tsif_platform_data *pdata =
1020 tsif_device->pdev->dev.platform_data;
1021 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1022 if (tsif_device->state != tsif_state_stopped)
1023 return -EAGAIN;
1024 rc = tsif_dma_init(tsif_device);
1025 if (rc) {
1026 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1027 return rc;
1028 }
1029 tsif_device->state = tsif_state_running;
1030 /*
1031 * DMA should be scheduled prior to TSIF hardware initialization,
1032 * otherwise "bus error" will be reported by Data Mover
1033 */
1034 enable_irq(tsif_device->irq);
1035 tsif_clock(tsif_device, 1);
1036 tsif_dma_schedule(tsif_device);
1037 /*
1038 * init the device if required
1039 */
1040 if (pdata->init)
1041 pdata->init(pdata);
1042 rc = tsif_start_hw(tsif_device);
1043 if (rc) {
1044 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
1045 tsif_dma_exit(tsif_device);
1046 tsif_clock(tsif_device, 0);
1047 return rc;
1048 }
1049
1050 result = pm_runtime_get(&tsif_device->pdev->dev);
1051 if (result < 0) {
1052 dev_err(&tsif_device->pdev->dev,
1053 "Runtime PM: Unable to wake up the device, rc = %d\n",
1054 result);
1055 return result;
1056 }
1057
1058 wake_lock(&tsif_device->wake_lock);
1059 return rc;
1060}
1061
1062static int action_close(struct msm_tsif_device *tsif_device)
1063{
1064 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1065 (int)tsif_device->state);
1066 /*
1067 * DMA should be flushed/stopped prior to TSIF hardware stop,
1068 * otherwise "bus error" will be reported by Data Mover
1069 */
1070 tsif_stop_hw(tsif_device);
1071 tsif_dma_exit(tsif_device);
1072 tsif_clock(tsif_device, 0);
1073 disable_irq(tsif_device->irq);
1074
1075 pm_runtime_put(&tsif_device->pdev->dev);
1076 wake_unlock(&tsif_device->wake_lock);
1077 return 0;
1078}
1079
1080
1081static struct {
1082 int (*func)(struct msm_tsif_device *);
1083 const char *name;
1084} actions[] = {
1085 { action_open, "open"},
1086 { action_close, "close"},
1087};
1088
1089static ssize_t tsif_debugfs_action_write(struct file *filp,
1090 const char __user *userbuf,
1091 size_t count, loff_t *f_pos)
1092{
1093 int i;
1094 struct msm_tsif_device *tsif_device = filp->private_data;
1095 char s[40];
1096 int len = min(sizeof(s) - 1, count);
1097 if (copy_from_user(s, userbuf, len))
1098 return -EFAULT;
1099 s[len] = '\0';
1100 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1101 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1102 if (!strncmp(s, actions[i].name,
1103 min(count, strlen(actions[i].name)))) {
1104 int rc = actions[i].func(tsif_device);
1105 if (!rc)
1106 rc = count;
1107 return rc;
1108 }
1109 }
1110 return -EINVAL;
1111}
1112
1113static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1114{
1115 filp->private_data = inode->i_private;
1116 return 0;
1117}
1118
1119static const struct file_operations fops_debugfs_action = {
1120 .open = tsif_debugfs_generic_open,
1121 .write = tsif_debugfs_action_write,
1122};
1123
1124static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1125 size_t count, loff_t *f_pos)
1126{
1127 static char bufa[200];
1128 static char *buf = bufa;
1129 int sz = sizeof(bufa);
1130 struct msm_tsif_device *tsif_device = filp->private_data;
1131 int len = 0;
1132 if (tsif_device) {
1133 int i;
1134 len += snprintf(buf + len, sz - len,
1135 "ri %3d | wi %3d | dmwi %3d |",
1136 tsif_device->ri, tsif_device->wi,
1137 tsif_device->dmwi);
1138 for (i = 0; i < 2; i++) {
1139 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1140 if (xfer->busy) {
1141 u32 dst =
1142 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1143 u32 base = tsif_device->data_buffer_dma;
1144 int w = (dst - base) / TSIF_PKT_SIZE;
1145 len += snprintf(buf + len, sz - len,
1146 " [%3d]{%3d}",
1147 w, xfer->wi);
1148 } else {
1149 len += snprintf(buf + len, sz - len,
1150 " ---idle---");
1151 }
1152 }
1153 len += snprintf(buf + len, sz - len, "\n");
1154 } else {
1155 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1156 }
1157 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1158}
1159
1160static const struct file_operations fops_debugfs_dma = {
1161 .open = tsif_debugfs_generic_open,
1162 .read = tsif_debugfs_dma_read,
1163};
1164
1165static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1166 size_t count, loff_t *f_pos)
1167{
1168 static char bufa[300];
1169 static char *buf = bufa;
1170 int sz = sizeof(bufa);
1171 struct msm_tsif_device *tsif_device = filp->private_data;
1172 int len = 0;
1173 if (tsif_device) {
1174 struct msm_tsif_platform_data *pdata =
1175 tsif_device->pdev->dev.platform_data;
1176 int i;
1177 for (i = 0; i < pdata->num_gpios; i++) {
1178 if (pdata->gpios[i].gpio_cfg) {
1179 int x = !!gpio_get_value(GPIO_PIN(
1180 pdata->gpios[i].gpio_cfg));
1181 len += snprintf(buf + len, sz - len,
1182 "%15s: %d\n",
1183 pdata->gpios[i].label, x);
1184 }
1185 }
1186 } else {
1187 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1188 }
1189 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1190}
1191
1192static const struct file_operations fops_debugfs_gpios = {
1193 .open = tsif_debugfs_generic_open,
1194 .read = tsif_debugfs_gpios_read,
1195};
1196
1197
1198static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1199{
1200 tsif_device->dent_tsif = debugfs_create_dir(
1201 dev_name(&tsif_device->pdev->dev), NULL);
1202 if (tsif_device->dent_tsif) {
1203 int i;
1204 void __iomem *base = tsif_device->base;
1205 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1206 tsif_device->debugfs_tsif_regs[i] =
1207 debugfs_create_iomem_x32(
1208 debugfs_tsif_regs[i].name,
1209 debugfs_tsif_regs[i].mode,
1210 tsif_device->dent_tsif,
1211 base + debugfs_tsif_regs[i].offset);
1212 }
1213 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1214 S_IRUGO,
1215 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1216 tsif_device->debugfs_action = debugfs_create_file("action",
1217 S_IWUSR,
1218 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1219 tsif_device->debugfs_dma = debugfs_create_file("dma",
1220 S_IRUGO,
1221 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1222 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1223 S_IRUGO,
1224 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1225 }
1226}
1227
1228static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1229{
1230 if (tsif_device->dent_tsif) {
1231 int i;
1232 debugfs_remove_recursive(tsif_device->dent_tsif);
1233 tsif_device->dent_tsif = NULL;
1234 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1235 tsif_device->debugfs_tsif_regs[i] = NULL;
1236 tsif_device->debugfs_gpio = NULL;
1237 tsif_device->debugfs_action = NULL;
1238 tsif_device->debugfs_dma = NULL;
1239 tsif_device->debugfs_databuf = NULL;
1240 }
1241}
1242/* ===debugfs end=== */
1243
1244/* ===module begin=== */
1245static LIST_HEAD(tsif_devices);
1246
1247static struct msm_tsif_device *tsif_find_by_id(int id)
1248{
1249 struct msm_tsif_device *tsif_device;
1250 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1251 if (tsif_device->pdev->id == id)
1252 return tsif_device;
1253 }
1254 return NULL;
1255}
1256
1257static int __devinit msm_tsif_probe(struct platform_device *pdev)
1258{
1259 int rc = -ENODEV;
1260 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1261 struct msm_tsif_device *tsif_device;
1262 struct resource *res;
1263 /* check device validity */
1264 /* must have platform data */
1265 if (!plat) {
1266 dev_err(&pdev->dev, "Platform data not available\n");
1267 rc = -EINVAL;
1268 goto out;
1269 }
1270/*TODO macro for max. id*/
1271 if ((pdev->id < 0) || (pdev->id > 0)) {
1272 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1273 rc = -EINVAL;
1274 goto out;
1275 }
1276 /* OK, we will use this device */
1277 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1278 if (!tsif_device) {
1279 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1280 rc = -ENOMEM;
1281 goto out;
1282 }
1283 /* cross links */
1284 tsif_device->pdev = pdev;
1285 platform_set_drvdata(pdev, tsif_device);
1286 tsif_device->mode = 1;
1287 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1288 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1289 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1290 (unsigned long)tsif_device);
1291 if (tsif_get_clocks(tsif_device))
1292 goto err_clocks;
1293/* map I/O memory */
1294 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1295 if (!tsif_device->memres) {
1296 dev_err(&pdev->dev, "Missing MEM resource\n");
1297 rc = -ENXIO;
1298 goto err_rgn;
1299 }
1300 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1301 if (!res) {
1302 dev_err(&pdev->dev, "Missing DMA resource\n");
1303 rc = -ENXIO;
1304 goto err_rgn;
1305 }
1306 tsif_device->dma = res->start;
1307 tsif_device->crci = res->end;
1308 tsif_device->base = ioremap(tsif_device->memres->start,
1309 resource_size(tsif_device->memres));
1310 if (!tsif_device->base) {
1311 dev_err(&pdev->dev, "ioremap failed\n");
1312 goto err_ioremap;
1313 }
1314 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1315 tsif_device->memres->start, tsif_device->base);
1316 rc = tsif_start_gpios(tsif_device);
1317 if (rc)
1318 goto err_gpio;
1319
1320 pm_runtime_set_active(&pdev->dev);
1321 pm_runtime_enable(&pdev->dev);
1322
1323 tsif_debugfs_init(tsif_device);
1324 rc = platform_get_irq(pdev, 0);
1325 if (rc > 0) {
1326 tsif_device->irq = rc;
1327 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1328 dev_name(&pdev->dev), tsif_device);
1329 disable_irq(tsif_device->irq);
1330 }
1331 if (rc) {
1332 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1333 tsif_device->irq, rc);
1334 goto err_irq;
1335 }
1336 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1337 if (rc) {
1338 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1339 goto err_attrs;
1340 }
1341 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1342 dev_name(&pdev->dev));
1343 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1344 tsif_device->irq, tsif_device->memres->start,
1345 tsif_device->dma, tsif_device->crci);
1346 list_add(&tsif_device->devlist, &tsif_devices);
1347 return 0;
1348/* error path */
1349 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1350err_attrs:
1351 free_irq(tsif_device->irq, tsif_device);
1352err_irq:
1353 tsif_debugfs_exit(tsif_device);
1354 tsif_stop_gpios(tsif_device);
1355err_gpio:
1356 iounmap(tsif_device->base);
1357err_ioremap:
1358err_rgn:
1359 tsif_put_clocks(tsif_device);
1360err_clocks:
1361 kfree(tsif_device);
1362out:
1363 return rc;
1364}
1365
1366static int __devexit msm_tsif_remove(struct platform_device *pdev)
1367{
1368 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1369 dev_info(&pdev->dev, "Unload\n");
1370 list_del(&tsif_device->devlist);
1371 wake_lock_destroy(&tsif_device->wake_lock);
1372 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1373 free_irq(tsif_device->irq, tsif_device);
1374 tsif_debugfs_exit(tsif_device);
1375 tsif_dma_exit(tsif_device);
1376 tsif_stop_gpios(tsif_device);
1377 iounmap(tsif_device->base);
1378 tsif_put_clocks(tsif_device);
1379
1380 pm_runtime_put(&pdev->dev);
1381 pm_runtime_disable(&pdev->dev);
1382 kfree(tsif_device);
1383 return 0;
1384}
1385
1386static int tsif_runtime_suspend(struct device *dev)
1387{
1388 dev_dbg(dev, "pm_runtime: suspending...\n");
1389 return 0;
1390}
1391
1392static int tsif_runtime_resume(struct device *dev)
1393{
1394 dev_dbg(dev, "pm_runtime: resuming...\n");
1395 return 0;
1396}
1397
1398static const struct dev_pm_ops tsif_dev_pm_ops = {
1399 .runtime_suspend = tsif_runtime_suspend,
1400 .runtime_resume = tsif_runtime_resume,
1401};
1402
1403
1404static struct platform_driver msm_tsif_driver = {
1405 .probe = msm_tsif_probe,
1406 .remove = __exit_p(msm_tsif_remove),
1407 .driver = {
1408 .name = "msm_tsif",
1409 .pm = &tsif_dev_pm_ops,
1410 },
1411};
1412
1413static int __init mod_init(void)
1414{
1415 int rc = platform_driver_register(&msm_tsif_driver);
1416 if (rc)
1417 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1418 return rc;
1419}
1420
1421static void __exit mod_exit(void)
1422{
1423 platform_driver_unregister(&msm_tsif_driver);
1424}
1425/* ===module end=== */
1426
1427/* public API */
1428
1429void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1430{
1431 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
1432 if (tsif_device->client_notify || tsif_device->client_data)
1433 return ERR_PTR(-EBUSY);
1434 tsif_device->client_notify = notify;
1435 tsif_device->client_data = data;
1436 /* prevent from unloading */
1437 get_device(&tsif_device->pdev->dev);
1438 return tsif_device;
1439}
1440EXPORT_SYMBOL(tsif_attach);
1441
1442void tsif_detach(void *cookie)
1443{
1444 struct msm_tsif_device *tsif_device = cookie;
1445 tsif_device->client_notify = NULL;
1446 tsif_device->client_data = NULL;
1447 put_device(&tsif_device->pdev->dev);
1448}
1449EXPORT_SYMBOL(tsif_detach);
1450
1451void tsif_get_info(void *cookie, void **pdata, int *psize)
1452{
1453 struct msm_tsif_device *tsif_device = cookie;
1454 if (pdata)
1455 *pdata = tsif_device->data_buffer;
1456 if (psize)
1457 *psize = TSIF_PKTS_IN_BUF;
1458}
1459EXPORT_SYMBOL(tsif_get_info);
1460
1461int tsif_set_mode(void *cookie, int mode)
1462{
1463 struct msm_tsif_device *tsif_device = cookie;
1464 if (tsif_device->state != tsif_state_stopped) {
1465 dev_err(&tsif_device->pdev->dev,
1466 "Can't change mode while device is active\n");
1467 return -EBUSY;
1468 }
1469 switch (mode) {
1470 case 1:
1471 case 2:
1472 case 3:
1473 tsif_device->mode = mode;
1474 break;
1475 default:
1476 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1477 return -EINVAL;
1478 }
1479 return 0;
1480}
1481EXPORT_SYMBOL(tsif_set_mode);
1482
1483int tsif_set_time_limit(void *cookie, u32 value)
1484{
1485 struct msm_tsif_device *tsif_device = cookie;
1486 if (tsif_device->state != tsif_state_stopped) {
1487 dev_err(&tsif_device->pdev->dev,
1488 "Can't change time limit while device is active\n");
1489 return -EBUSY;
1490 }
1491 if (value != (value & 0xFFFFFF)) {
1492 dev_err(&tsif_device->pdev->dev,
1493 "Invalid time limit (should be 24 bit): %#x\n", value);
1494 return -EINVAL;
1495 }
1496 tsif_device->time_limit = value;
1497 return 0;
1498}
1499EXPORT_SYMBOL(tsif_set_time_limit);
1500
1501int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1502{
1503 struct msm_tsif_device *tsif_device = cookie;
1504 if (tsif_device->data_buffer) {
1505 dev_err(&tsif_device->pdev->dev,
1506 "Data buffer already allocated: %p\n",
1507 tsif_device->data_buffer);
1508 return -EBUSY;
1509 }
1510 /* check for crazy user */
1511 if (pkts_in_chunk * chunks_in_buf > 10240) {
1512 dev_err(&tsif_device->pdev->dev,
1513 "Buffer requested is too large: %d * %d\n",
1514 pkts_in_chunk,
1515 chunks_in_buf);
1516 return -EINVAL;
1517 }
1518 /* parameters are OK, execute */
1519 tsif_device->pkts_per_chunk = pkts_in_chunk;
1520 tsif_device->chunks_per_buf = chunks_in_buf;
1521 return 0;
1522}
1523EXPORT_SYMBOL(tsif_set_buf_config);
1524
1525void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1526{
1527 struct msm_tsif_device *tsif_device = cookie;
1528 if (ri)
1529 *ri = tsif_device->ri;
1530 if (wi)
1531 *wi = tsif_device->wi;
1532 if (state)
1533 *state = tsif_device->state;
1534}
1535EXPORT_SYMBOL(tsif_get_state);
1536
1537int tsif_start(void *cookie)
1538{
1539 struct msm_tsif_device *tsif_device = cookie;
1540 return action_open(tsif_device);
1541}
1542EXPORT_SYMBOL(tsif_start);
1543
1544void tsif_stop(void *cookie)
1545{
1546 struct msm_tsif_device *tsif_device = cookie;
1547 action_close(tsif_device);
1548}
1549EXPORT_SYMBOL(tsif_stop);
1550
1551void tsif_reclaim_packets(void *cookie, int read_index)
1552{
1553 struct msm_tsif_device *tsif_device = cookie;
1554 tsif_device->ri = read_index;
1555}
1556EXPORT_SYMBOL(tsif_reclaim_packets);
1557
1558module_init(mod_init);
1559module_exit(mod_exit);
1560
1561MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1562 " Driver for the MSM chipset");
1563MODULE_LICENSE("GPL v2");
1564