blob: 90e65b495ce2c461e27dae053b53264b3eec3ee9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
34
35#include <mach/gpio.h>
36#include <mach/dma.h>
37#include <mach/msm_tsif.h>
38
39/*
40 * TSIF register offsets
41 */
42#define TSIF_STS_CTL_OFF (0x0)
43#define TSIF_TIME_LIMIT_OFF (0x4)
44#define TSIF_CLK_REF_OFF (0x8)
45#define TSIF_LPBK_FLAGS_OFF (0xc)
46#define TSIF_LPBK_DATA_OFF (0x10)
47#define TSIF_TEST_CTL_OFF (0x14)
48#define TSIF_TEST_MODE_OFF (0x18)
49#define TSIF_TEST_RESET_OFF (0x1c)
50#define TSIF_TEST_EXPORT_OFF (0x20)
51#define TSIF_TEST_CURRENT_OFF (0x24)
52
53#define TSIF_DATA_PORT_OFF (0x100)
54
55/* bits for TSIF_STS_CTL register */
56#define TSIF_STS_CTL_EN_IRQ (1 << 28)
57#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
58#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
59#define TSIF_STS_CTL_OVERFLOW (1 << 25)
60#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
61#define TSIF_STS_CTL_TIMEOUT (1 << 23)
62#define TSIF_STS_CTL_INV_SYNC (1 << 21)
63#define TSIF_STS_CTL_INV_NULL (1 << 20)
64#define TSIF_STS_CTL_INV_ERROR (1 << 19)
65#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
66#define TSIF_STS_CTL_INV_DATA (1 << 17)
67#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
68#define TSIF_STS_CTL_SPARE (1 << 15)
69#define TSIF_STS_CTL_EN_NULL (1 << 11)
70#define TSIF_STS_CTL_EN_ERROR (1 << 10)
71#define TSIF_STS_CTL_LAST_BIT (1 << 9)
72#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
73#define TSIF_STS_CTL_EN_TCR (1 << 7)
74#define TSIF_STS_CTL_TEST_MODE (3 << 5)
75#define TSIF_STS_CTL_EN_DM (1 << 4)
76#define TSIF_STS_CTL_STOP (1 << 3)
77#define TSIF_STS_CTL_START (1 << 0)
78
79/*
80 * Data buffering parameters
81 *
82 * Data stored in cyclic buffer;
83 *
84 * Data organized in chunks of packets.
85 * One chunk processed at a time by the data mover
86 *
87 */
88#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
89#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
90#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
91#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
92#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
93#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030094#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
97#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
98#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
99#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
100#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
101
102/* used to create debugfs entries */
103static const struct {
104 const char *name;
105 mode_t mode;
106 int offset;
107} debugfs_tsif_regs[] = {
108 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
109 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
110 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
111 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
112 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
113 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
114 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
115 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
116 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
117 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
118 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
119};
120
121/* structures for Data Mover */
122struct tsif_dmov_cmd {
123 dmov_box box;
124 dma_addr_t box_ptr;
125};
126
127struct msm_tsif_device;
128
129struct tsif_xfer {
130 struct msm_dmov_cmd hdr;
131 struct msm_tsif_device *tsif_device;
132 int busy;
133 int wi; /**< set devices's write index after xfer */
134};
135
136struct msm_tsif_device {
137 struct list_head devlist;
138 struct platform_device *pdev;
139 struct resource *memres;
140 void __iomem *base;
141 unsigned int irq;
142 int mode;
143 u32 time_limit;
144 enum tsif_state state;
145 struct wake_lock wake_lock;
146 /* clocks */
147 struct clk *tsif_clk;
148 struct clk *tsif_pclk;
149 struct clk *tsif_ref_clk;
150 /* debugfs */
151 struct dentry *dent_tsif;
152 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
153 struct dentry *debugfs_gpio;
154 struct dentry *debugfs_action;
155 struct dentry *debugfs_dma;
156 struct dentry *debugfs_databuf;
157 struct debugfs_blob_wrapper blob_wrapper_databuf;
158 /* DMA related */
159 int dma;
160 int crci;
161 void *data_buffer;
162 dma_addr_t data_buffer_dma;
163 u32 pkts_per_chunk;
164 u32 chunks_per_buf;
165 int ri;
166 int wi;
167 int dmwi; /**< DataMover write index */
168 struct tsif_dmov_cmd *dmov_cmd[2];
169 dma_addr_t dmov_cmd_dma[2];
170 struct tsif_xfer xfer[2];
171 struct tasklet_struct dma_refill;
172 /* statistics */
173 u32 stat_rx;
174 u32 stat_overflow;
175 u32 stat_lost_sync;
176 u32 stat_timeout;
177 u32 stat_dmov_err;
178 u32 stat_soft_drop;
179 int stat_ifi; /* inter frame interval */
180 u32 stat0, stat1;
181 /* client */
182 void *client_data;
183 void (*client_notify)(void *client_data);
184};
185
186/* ===clocks begin=== */
187
188static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
189{
190 if (tsif_device->tsif_clk) {
191 clk_put(tsif_device->tsif_clk);
192 tsif_device->tsif_clk = NULL;
193 }
194 if (tsif_device->tsif_pclk) {
195 clk_put(tsif_device->tsif_pclk);
196 tsif_device->tsif_pclk = NULL;
197 }
198
199 if (tsif_device->tsif_ref_clk) {
200 clk_put(tsif_device->tsif_ref_clk);
201 tsif_device->tsif_ref_clk = NULL;
202 }
203}
204
205static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
206{
207 struct msm_tsif_platform_data *pdata =
208 tsif_device->pdev->dev.platform_data;
209 int rc = 0;
210
211 if (pdata->tsif_clk) {
212 tsif_device->tsif_clk = clk_get(NULL, pdata->tsif_clk);
213 if (IS_ERR(tsif_device->tsif_clk)) {
214 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
215 pdata->tsif_clk);
216 rc = PTR_ERR(tsif_device->tsif_clk);
217 tsif_device->tsif_clk = NULL;
218 goto ret;
219 }
220 }
221 if (pdata->tsif_pclk) {
222 tsif_device->tsif_pclk = clk_get(NULL, pdata->tsif_pclk);
223 if (IS_ERR(tsif_device->tsif_pclk)) {
224 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
225 pdata->tsif_pclk);
226 rc = PTR_ERR(tsif_device->tsif_pclk);
227 tsif_device->tsif_pclk = NULL;
228 goto ret;
229 }
230 }
231 if (pdata->tsif_ref_clk) {
232 tsif_device->tsif_ref_clk = clk_get(NULL, pdata->tsif_ref_clk);
233 if (IS_ERR(tsif_device->tsif_ref_clk)) {
234 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
235 pdata->tsif_ref_clk);
236 rc = PTR_ERR(tsif_device->tsif_ref_clk);
237 tsif_device->tsif_ref_clk = NULL;
238 goto ret;
239 }
240 }
241 return 0;
242ret:
243 tsif_put_clocks(tsif_device);
244 return rc;
245}
246
247static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
248{
249 if (on) {
250 if (tsif_device->tsif_clk)
251 clk_enable(tsif_device->tsif_clk);
252 if (tsif_device->tsif_pclk)
253 clk_enable(tsif_device->tsif_pclk);
254 clk_enable(tsif_device->tsif_ref_clk);
255 } else {
256 if (tsif_device->tsif_clk)
257 clk_disable(tsif_device->tsif_clk);
258 if (tsif_device->tsif_pclk)
259 clk_disable(tsif_device->tsif_pclk);
260 clk_disable(tsif_device->tsif_ref_clk);
261 }
262}
263/* ===clocks end=== */
264/* ===gpio begin=== */
265
266static void tsif_gpios_free(const struct msm_gpio *table, int size)
267{
268 int i;
269 const struct msm_gpio *g;
270 for (i = size-1; i >= 0; i--) {
271 g = table + i;
272 gpio_free(GPIO_PIN(g->gpio_cfg));
273 }
274}
275
276static int tsif_gpios_request(const struct msm_gpio *table, int size)
277{
278 int rc;
279 int i;
280 const struct msm_gpio *g;
281 for (i = 0; i < size; i++) {
282 g = table + i;
283 rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
284 if (rc) {
285 pr_err("gpio_request(%d) <%s> failed: %d\n",
286 GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
287 goto err;
288 }
289 }
290 return 0;
291err:
292 tsif_gpios_free(table, i);
293 return rc;
294}
295
296static int tsif_gpios_disable(const struct msm_gpio *table, int size)
297{
298 int rc = 0;
299 int i;
300 const struct msm_gpio *g;
301 for (i = size-1; i >= 0; i--) {
302 int tmp;
303 g = table + i;
304 tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
305 if (tmp) {
306 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
307 " <%s> failed: %d\n",
308 g->gpio_cfg, g->label ?: "?", rc);
309 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
310 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
311 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
312 GPIO_DRVSTR(g->gpio_cfg));
313 if (!rc)
314 rc = tmp;
315 }
316 }
317
318 return rc;
319}
320
321static int tsif_gpios_enable(const struct msm_gpio *table, int size)
322{
323 int rc;
324 int i;
325 const struct msm_gpio *g;
326 for (i = 0; i < size; i++) {
327 g = table + i;
328 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
329 if (rc) {
330 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
331 " <%s> failed: %d\n",
332 g->gpio_cfg, g->label ?: "?", rc);
333 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
334 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
335 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
336 GPIO_DRVSTR(g->gpio_cfg));
337 goto err;
338 }
339 }
340 return 0;
341err:
342 tsif_gpios_disable(table, i);
343 return rc;
344}
345
346static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
347{
348 int rc = tsif_gpios_request(table, size);
349 if (rc)
350 return rc;
351 rc = tsif_gpios_enable(table, size);
352 if (rc)
353 tsif_gpios_free(table, size);
354 return rc;
355}
356
357static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
358{
359 tsif_gpios_disable(table, size);
360 tsif_gpios_free(table, size);
361}
362
363static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
364{
365 struct msm_tsif_platform_data *pdata =
366 tsif_device->pdev->dev.platform_data;
367 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
368}
369
370static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
371{
372 struct msm_tsif_platform_data *pdata =
373 tsif_device->pdev->dev.platform_data;
374 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
375}
376
377/* ===gpio end=== */
378
379static int tsif_start_hw(struct msm_tsif_device *tsif_device)
380{
381 u32 ctl = TSIF_STS_CTL_EN_IRQ |
382 TSIF_STS_CTL_EN_TIME_LIM |
383 TSIF_STS_CTL_EN_TCR |
384 TSIF_STS_CTL_EN_DM;
385 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
386 switch (tsif_device->mode) {
387 case 1: /* mode 1 */
388 ctl |= (0 << 5);
389 break;
390 case 2: /* mode 2 */
391 ctl |= (1 << 5);
392 break;
393 case 3: /* manual - control from debugfs */
394 return 0;
395 break;
396 default:
397 return -EINVAL;
398 }
399 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
400 iowrite32(tsif_device->time_limit,
401 tsif_device->base + TSIF_TIME_LIMIT_OFF);
402 wmb();
403 iowrite32(ctl | TSIF_STS_CTL_START,
404 tsif_device->base + TSIF_STS_CTL_OFF);
405 wmb();
406 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
407 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
408}
409
410static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
411{
412 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
413 wmb();
414}
415
416/* ===DMA begin=== */
417/**
418 * TSIF DMA theory of operation
419 *
420 * Circular memory buffer \a tsif_mem_buffer allocated;
421 * 4 pointers points to and moved forward on:
422 * - \a ri index of first ready to read packet.
423 * Updated by client's call to tsif_reclaim_packets()
424 * - \a wi points to the next packet to be written by DM.
425 * Data below is valid and will not be overriden by DMA.
426 * Moved on DM callback
427 * - \a dmwi points to the next packet not scheduled yet for DM
428 * moved when packet scheduled for DM
429 *
430 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
431 * at time immediately after scheduling.
432 *
433 * Initially, 2 packets get scheduled for the DM.
434 *
435 * Upon packet receive, DM writes packet to the pre-programmed
436 * location and invoke its callback.
437 *
438 * DM callback moves sets wi pointer to \a xfer->wi;
439 * then it schedules next packet for DM and moves \a dmwi pointer.
440 *
441 * Buffer overflow handling
442 *
443 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
444 * DMA re-scheduled to the same index.
445 * Callback check and not move \a wi to become equal to \a ri
446 *
447 * On \a read request, data between \a ri and \a wi pointers may be read;
448 * \ri pointer moved accordingly.
449 *
450 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
451 * \a wi is between [\a ri, \a dmwi]
452 *
453 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
454 *
455 * Number of scheduled packets for DM: (dmwi-wi)
456 */
457
458/**
459 * tsif_dma_schedule - schedule DMA transfers
460 *
461 * @tsif_device: device
462 *
463 * Executed from process context on init, or from tasklet when
464 * re-scheduling upon DMA completion.
465 * This prevent concurrent execution from several CPU's
466 */
467static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
468{
469 int i, dmwi0, dmwi1, found = 0;
470 /* find free entry */
471 for (i = 0; i < 2; i++) {
472 struct tsif_xfer *xfer = &tsif_device->xfer[i];
473 if (xfer->busy)
474 continue;
475 found++;
476 xfer->busy = 1;
477 dmwi0 = tsif_device->dmwi;
478 tsif_device->dmov_cmd[i]->box.dst_row_addr =
479 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
480 /* proposed value for dmwi */
481 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
482 /**
483 * If dmwi going to overlap with ri,
484 * overflow occurs because data was not read.
485 * Still get this packet, to not interrupt TSIF
486 * hardware, but do not advance dmwi.
487 *
488 * Upon receive, packet will be dropped.
489 */
490 if (dmwi1 != tsif_device->ri) {
491 tsif_device->dmwi = dmwi1;
492 } else {
493 dev_info(&tsif_device->pdev->dev,
494 "Overflow detected\n");
495 }
496 xfer->wi = tsif_device->dmwi;
497#ifdef CONFIG_TSIF_DEBUG
498 dev_info(&tsif_device->pdev->dev,
499 "schedule xfer[%d] -> [%2d]{%2d}\n",
500 i, dmwi0, xfer->wi);
501#endif
502 /* complete all the writes to box */
503 dma_coherent_pre_ops();
504 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
505 }
506 if (!found)
507 dev_info(&tsif_device->pdev->dev,
508 "All xfer entries are busy\n");
509}
510
511/**
512 * tsif_dmov_complete_func - DataMover completion callback
513 *
514 * @cmd: original DM command
515 * @result: DM result
516 * @err: optional error buffer
517 *
518 * Executed in IRQ context (Data Mover's IRQ)
519 * DataMover's spinlock @msm_dmov_lock held.
520 */
521static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
522 unsigned int result,
523 struct msm_dmov_errdata *err)
524{
525 int i;
526 u32 data_offset;
527 struct tsif_xfer *xfer;
528 struct msm_tsif_device *tsif_device;
529 int reschedule = 0;
530 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
531 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
532 return;
533 }
534 /* restore original context */
535 xfer = container_of(cmd, struct tsif_xfer, hdr);
536 tsif_device = xfer->tsif_device;
537 i = xfer - tsif_device->xfer;
538 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
539 tsif_device->data_buffer_dma;
540
541 /* order reads from the xferred buffer */
542 dma_coherent_post_ops();
543 if (result & DMOV_RSLT_DONE) {
544 int w = data_offset / TSIF_PKT_SIZE;
545 tsif_device->stat_rx++;
546 /*
547 * sowtware overflow when I was scheduled?
548 *
549 * @w is where this xfer was actually written to;
550 * @xfer->wi is where device's @wi will be set;
551 *
552 * if these 2 are equal, we are short in space and
553 * going to overwrite this xfer - this is "soft drop"
554 */
555 if (w == xfer->wi)
556 tsif_device->stat_soft_drop++;
557 reschedule = (tsif_device->state == tsif_state_running);
558#ifdef CONFIG_TSIF_DEBUG
559 /* IFI calculation */
560 /*
561 * update stat_ifi (inter frame interval)
562 *
563 * Calculate time difference between last and 1-st
564 * packets in chunk
565 *
566 * To be removed after tuning
567 */
568 if (TSIF_PKTS_IN_CHUNK > 1) {
569 void *ptr = tsif_device->data_buffer + data_offset;
570 u32 *p0 = ptr;
571 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
572 TSIF_PKT_SIZE;
573 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
574 tsif_pkt_status(p0));
575 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
576 tsif_pkt_status(p1));
577 tsif_device->stat_ifi = (tts1 - tts0) /
578 (TSIF_PKTS_IN_CHUNK - 1);
579 }
580#endif
581 } else {
582 /**
583 * Error or flush
584 *
585 * To recover - re-open TSIF device.
586 */
587 /* mark status "not valid" in data buffer */
588 int n;
589 void *ptr = tsif_device->data_buffer + data_offset;
590 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
591 u32 *p = ptr + (n * TSIF_PKT_SIZE);
592 /* last dword is status + TTS */
593 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
594 }
595 if (result & DMOV_RSLT_ERROR) {
596 dev_err(&tsif_device->pdev->dev,
597 "DMA error (0x%08x)\n", result);
598 tsif_device->stat_dmov_err++;
599 /* force device close */
600 if (tsif_device->state == tsif_state_running) {
601 tsif_stop_hw(tsif_device);
602 /*
603 * Clocks _may_ be stopped right from IRQ
604 * context. This is far from optimal w.r.t
605 * latency.
606 *
607 * But, this branch taken only in case of
608 * severe hardware problem (I don't even know
609 * what should happens for DMOV_RSLT_ERROR);
610 * thus I prefer code simplicity over
611 * performance.
612 */
613 tsif_clock(tsif_device, 0);
614 tsif_device->state = tsif_state_flushing;
615 }
616 }
617 if (result & DMOV_RSLT_FLUSH) {
618 /*
619 * Flushing normally happens in process of
620 * @tsif_stop(), when we are waiting for outstanding
621 * DMA commands to be flushed.
622 */
623 dev_info(&tsif_device->pdev->dev,
624 "DMA channel flushed (0x%08x)\n", result);
625 if (tsif_device->state == tsif_state_flushing) {
626 if ((!tsif_device->xfer[0].busy) &&
627 (!tsif_device->xfer[1].busy)) {
628 tsif_device->state = tsif_state_stopped;
629 }
630 }
631 }
632 if (err)
633 dev_err(&tsif_device->pdev->dev,
634 "Flush data: %08x %08x %08x %08x %08x %08x\n",
635 err->flush[0], err->flush[1], err->flush[2],
636 err->flush[3], err->flush[4], err->flush[5]);
637 }
638 tsif_device->wi = xfer->wi;
639 xfer->busy = 0;
640 if (tsif_device->client_notify)
641 tsif_device->client_notify(tsif_device->client_data);
642 /*
643 * Can't schedule next DMA -
644 * DataMover driver still hold its semaphore,
645 * deadlock will occur.
646 */
647 if (reschedule)
648 tasklet_schedule(&tsif_device->dma_refill);
649}
650
651/**
652 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
653 *
654 * @data: tsif_device
655 *
656 * Reschedule DMA requests
657 *
658 * Executed in tasklet
659 */
660static void tsif_dma_refill(unsigned long data)
661{
662 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
663 if (tsif_device->state == tsif_state_running)
664 tsif_dma_schedule(tsif_device);
665}
666
667/**
668 * tsif_dma_flush - flush DMA channel
669 *
670 * @tsif_device:
671 *
672 * busy wait till DMA flushed
673 */
674static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
675{
676 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
677 tsif_device->state = tsif_state_flushing;
678 while (tsif_device->xfer[0].busy ||
679 tsif_device->xfer[1].busy) {
680 msm_dmov_flush(tsif_device->dma);
681 msleep(10);
682 }
683 }
684 tsif_device->state = tsif_state_stopped;
685 if (tsif_device->client_notify)
686 tsif_device->client_notify(tsif_device->client_data);
687}
688
689static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
690{
691 int i;
692 tsif_device->state = tsif_state_flushing;
693 tasklet_kill(&tsif_device->dma_refill);
694 tsif_dma_flush(tsif_device);
695 for (i = 0; i < 2; i++) {
696 if (tsif_device->dmov_cmd[i]) {
697 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
698 tsif_device->dmov_cmd[i],
699 tsif_device->dmov_cmd_dma[i]);
700 tsif_device->dmov_cmd[i] = NULL;
701 }
702 }
703 if (tsif_device->data_buffer) {
704 tsif_device->blob_wrapper_databuf.data = NULL;
705 tsif_device->blob_wrapper_databuf.size = 0;
706 dma_free_coherent(NULL, TSIF_BUF_SIZE,
707 tsif_device->data_buffer,
708 tsif_device->data_buffer_dma);
709 tsif_device->data_buffer = NULL;
710 }
711}
712
713static int tsif_dma_init(struct msm_tsif_device *tsif_device)
714{
715 int i;
716 /* TODO: allocate all DMA memory in one buffer */
717 /* Note: don't pass device,
718 it require coherent_dma_mask id device definition */
719 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
720 &tsif_device->data_buffer_dma, GFP_KERNEL);
721 if (!tsif_device->data_buffer)
722 goto err;
723 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
724 tsif_device->data_buffer, tsif_device->data_buffer_dma);
725 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
726 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
727 tsif_device->ri = 0;
728 tsif_device->wi = 0;
729 tsif_device->dmwi = 0;
730 for (i = 0; i < 2; i++) {
731 dmov_box *box;
732 struct msm_dmov_cmd *hdr;
733 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
734 sizeof(struct tsif_dmov_cmd),
735 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
736 if (!tsif_device->dmov_cmd[i])
737 goto err;
738 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
739 i, tsif_device->dmov_cmd[i],
740 tsif_device->dmov_cmd_dma[i]);
741 /* dst in 16 LSB, src in 16 MSB */
742 box = &(tsif_device->dmov_cmd[i]->box);
743 box->cmd = CMD_MODE_BOX | CMD_LC |
744 CMD_SRC_CRCI(tsif_device->crci);
745 box->src_row_addr =
746 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
747 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
748 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
749 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
750
751 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
752 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
753 offsetof(struct tsif_dmov_cmd, box));
754 tsif_device->xfer[i].tsif_device = tsif_device;
755 hdr = &tsif_device->xfer[i].hdr;
756 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
757 offsetof(struct tsif_dmov_cmd, box_ptr));
758 hdr->complete_func = tsif_dmov_complete_func;
759 }
760 msm_dmov_flush(tsif_device->dma);
761 return 0;
762err:
763 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
764 tsif_dma_exit(tsif_device);
765 return -ENOMEM;
766}
767
768/* ===DMA end=== */
769
770/* ===IRQ begin=== */
771
772static irqreturn_t tsif_irq(int irq, void *dev_id)
773{
774 struct msm_tsif_device *tsif_device = dev_id;
775 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
776 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
777 TSIF_STS_CTL_OVERFLOW |
778 TSIF_STS_CTL_LOST_SYNC |
779 TSIF_STS_CTL_TIMEOUT))) {
780 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
781 return IRQ_NONE;
782 }
783 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
784 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
785 tsif_device->stat_rx++;
786 }
787 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
788 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
789 tsif_device->stat_overflow++;
790 }
791 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
792 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
793 tsif_device->stat_lost_sync++;
794 }
795 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
796 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
797 tsif_device->stat_timeout++;
798 }
799 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
800 wmb();
801 return IRQ_HANDLED;
802}
803
804/* ===IRQ end=== */
805
806/* ===Device attributes begin=== */
807
808static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
809 char *buf)
810{
811 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
812 char *state_string;
813 switch (tsif_device->state) {
814 case tsif_state_stopped:
815 state_string = "stopped";
816 break;
817 case tsif_state_running:
818 state_string = "running";
819 break;
820 case tsif_state_flushing:
821 state_string = "flushing";
822 break;
823 default:
824 state_string = "???";
825 }
826 return snprintf(buf, PAGE_SIZE,
827 "Device %s\n"
828 "Mode = %d\n"
829 "Time limit = %d\n"
830 "State %s\n"
831 "Client = %p\n"
832 "Pkt/Buf = %d\n"
833 "Pkt/chunk = %d\n"
834 "--statistics--\n"
835 "Rx chunks = %d\n"
836 "Overflow = %d\n"
837 "Lost sync = %d\n"
838 "Timeout = %d\n"
839 "DMA error = %d\n"
840 "Soft drop = %d\n"
841 "IFI = %d\n"
842 "(0x%08x - 0x%08x) / %d\n"
843 "--debug--\n"
844 "GLBL_CLK_ENA = 0x%08x\n"
845 "ROW_RESET = 0x%08x\n"
846 "CLK_HALT_STATEB = 0x%08x\n"
847 "TV_NS_REG = 0x%08x\n"
848 "TSIF_NS_REG = 0x%08x\n",
849 dev_name(dev),
850 tsif_device->mode,
851 tsif_device->time_limit,
852 state_string,
853 tsif_device->client_data,
854 TSIF_PKTS_IN_BUF,
855 TSIF_PKTS_IN_CHUNK,
856 tsif_device->stat_rx,
857 tsif_device->stat_overflow,
858 tsif_device->stat_lost_sync,
859 tsif_device->stat_timeout,
860 tsif_device->stat_dmov_err,
861 tsif_device->stat_soft_drop,
862 tsif_device->stat_ifi,
863 tsif_device->stat1,
864 tsif_device->stat0,
865 TSIF_PKTS_IN_CHUNK - 1,
866 ioread32(GLBL_CLK_ENA),
867 ioread32(ROW_RESET),
868 ioread32(CLK_HALT_STATEB),
869 ioread32(TV_NS_REG),
870 ioread32(TSIF_NS_REG)
871 );
872}
873/**
874 * set_stats - reset statistics on write
875 *
876 * @dev:
877 * @attr:
878 * @buf:
879 * @count:
880 */
881static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
882 const char *buf, size_t count)
883{
884 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
885 tsif_device->stat_rx = 0;
886 tsif_device->stat_overflow = 0;
887 tsif_device->stat_lost_sync = 0;
888 tsif_device->stat_timeout = 0;
889 tsif_device->stat_dmov_err = 0;
890 tsif_device->stat_soft_drop = 0;
891 tsif_device->stat_ifi = 0;
892 return count;
893}
894static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
895
896static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
897 char *buf)
898{
899 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
900 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
901}
902
903static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
904 const char *buf, size_t count)
905{
906 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
907 int value;
908 int rc;
909 if (1 != sscanf(buf, "%d", &value)) {
910 dev_err(&tsif_device->pdev->dev,
911 "Failed to parse integer: <%s>\n", buf);
912 return -EINVAL;
913 }
914 rc = tsif_set_mode(tsif_device, value);
915 if (!rc)
916 rc = count;
917 return rc;
918}
919static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
920
921static ssize_t show_time_limit(struct device *dev,
922 struct device_attribute *attr,
923 char *buf)
924{
925 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
926 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
927}
928
929static ssize_t set_time_limit(struct device *dev,
930 struct device_attribute *attr,
931 const char *buf, size_t count)
932{
933 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
934 int value;
935 int rc;
936 if (1 != sscanf(buf, "%d", &value)) {
937 dev_err(&tsif_device->pdev->dev,
938 "Failed to parse integer: <%s>\n", buf);
939 return -EINVAL;
940 }
941 rc = tsif_set_time_limit(tsif_device, value);
942 if (!rc)
943 rc = count;
944 return rc;
945}
946static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
947 show_time_limit, set_time_limit);
948
949static ssize_t show_buf_config(struct device *dev,
950 struct device_attribute *attr,
951 char *buf)
952{
953 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
954 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
955 tsif_device->pkts_per_chunk,
956 tsif_device->chunks_per_buf);
957}
958
959static ssize_t set_buf_config(struct device *dev,
960 struct device_attribute *attr,
961 const char *buf, size_t count)
962{
963 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
964 u32 p, c;
965 int rc;
966 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
967 dev_err(&tsif_device->pdev->dev,
968 "Failed to parse integer: <%s>\n", buf);
969 return -EINVAL;
970 }
971 rc = tsif_set_buf_config(tsif_device, p, c);
972 if (!rc)
973 rc = count;
974 return rc;
975}
976static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
977 show_buf_config, set_buf_config);
978
979static struct attribute *dev_attrs[] = {
980 &dev_attr_stats.attr,
981 &dev_attr_mode.attr,
982 &dev_attr_time_limit.attr,
983 &dev_attr_buf_config.attr,
984 NULL,
985};
986static struct attribute_group dev_attr_grp = {
987 .attrs = dev_attrs,
988};
989/* ===Device attributes end=== */
990
991/* ===debugfs begin=== */
992
993static int debugfs_iomem_x32_set(void *data, u64 val)
994{
995 iowrite32(val, data);
996 wmb();
997 return 0;
998}
999
1000static int debugfs_iomem_x32_get(void *data, u64 *val)
1001{
1002 *val = ioread32(data);
1003 return 0;
1004}
1005
1006DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1007 debugfs_iomem_x32_set, "0x%08llx\n");
1008
1009struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1010 struct dentry *parent, u32 *value)
1011{
1012 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1013}
1014
1015static int action_open(struct msm_tsif_device *tsif_device)
1016{
1017 int rc = -EINVAL;
1018 int result;
1019
1020 struct msm_tsif_platform_data *pdata =
1021 tsif_device->pdev->dev.platform_data;
1022 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1023 if (tsif_device->state != tsif_state_stopped)
1024 return -EAGAIN;
1025 rc = tsif_dma_init(tsif_device);
1026 if (rc) {
1027 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1028 return rc;
1029 }
1030 tsif_device->state = tsif_state_running;
1031 /*
1032 * DMA should be scheduled prior to TSIF hardware initialization,
1033 * otherwise "bus error" will be reported by Data Mover
1034 */
1035 enable_irq(tsif_device->irq);
1036 tsif_clock(tsif_device, 1);
1037 tsif_dma_schedule(tsif_device);
1038 /*
1039 * init the device if required
1040 */
1041 if (pdata->init)
1042 pdata->init(pdata);
1043 rc = tsif_start_hw(tsif_device);
1044 if (rc) {
1045 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
1046 tsif_dma_exit(tsif_device);
1047 tsif_clock(tsif_device, 0);
1048 return rc;
1049 }
1050
1051 result = pm_runtime_get(&tsif_device->pdev->dev);
1052 if (result < 0) {
1053 dev_err(&tsif_device->pdev->dev,
1054 "Runtime PM: Unable to wake up the device, rc = %d\n",
1055 result);
1056 return result;
1057 }
1058
1059 wake_lock(&tsif_device->wake_lock);
1060 return rc;
1061}
1062
1063static int action_close(struct msm_tsif_device *tsif_device)
1064{
1065 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1066 (int)tsif_device->state);
1067 /*
1068 * DMA should be flushed/stopped prior to TSIF hardware stop,
1069 * otherwise "bus error" will be reported by Data Mover
1070 */
1071 tsif_stop_hw(tsif_device);
1072 tsif_dma_exit(tsif_device);
1073 tsif_clock(tsif_device, 0);
1074 disable_irq(tsif_device->irq);
1075
1076 pm_runtime_put(&tsif_device->pdev->dev);
1077 wake_unlock(&tsif_device->wake_lock);
1078 return 0;
1079}
1080
1081
1082static struct {
1083 int (*func)(struct msm_tsif_device *);
1084 const char *name;
1085} actions[] = {
1086 { action_open, "open"},
1087 { action_close, "close"},
1088};
1089
1090static ssize_t tsif_debugfs_action_write(struct file *filp,
1091 const char __user *userbuf,
1092 size_t count, loff_t *f_pos)
1093{
1094 int i;
1095 struct msm_tsif_device *tsif_device = filp->private_data;
1096 char s[40];
1097 int len = min(sizeof(s) - 1, count);
1098 if (copy_from_user(s, userbuf, len))
1099 return -EFAULT;
1100 s[len] = '\0';
1101 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1102 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1103 if (!strncmp(s, actions[i].name,
1104 min(count, strlen(actions[i].name)))) {
1105 int rc = actions[i].func(tsif_device);
1106 if (!rc)
1107 rc = count;
1108 return rc;
1109 }
1110 }
1111 return -EINVAL;
1112}
1113
1114static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1115{
1116 filp->private_data = inode->i_private;
1117 return 0;
1118}
1119
1120static const struct file_operations fops_debugfs_action = {
1121 .open = tsif_debugfs_generic_open,
1122 .write = tsif_debugfs_action_write,
1123};
1124
1125static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1126 size_t count, loff_t *f_pos)
1127{
1128 static char bufa[200];
1129 static char *buf = bufa;
1130 int sz = sizeof(bufa);
1131 struct msm_tsif_device *tsif_device = filp->private_data;
1132 int len = 0;
1133 if (tsif_device) {
1134 int i;
1135 len += snprintf(buf + len, sz - len,
1136 "ri %3d | wi %3d | dmwi %3d |",
1137 tsif_device->ri, tsif_device->wi,
1138 tsif_device->dmwi);
1139 for (i = 0; i < 2; i++) {
1140 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1141 if (xfer->busy) {
1142 u32 dst =
1143 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1144 u32 base = tsif_device->data_buffer_dma;
1145 int w = (dst - base) / TSIF_PKT_SIZE;
1146 len += snprintf(buf + len, sz - len,
1147 " [%3d]{%3d}",
1148 w, xfer->wi);
1149 } else {
1150 len += snprintf(buf + len, sz - len,
1151 " ---idle---");
1152 }
1153 }
1154 len += snprintf(buf + len, sz - len, "\n");
1155 } else {
1156 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1157 }
1158 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1159}
1160
1161static const struct file_operations fops_debugfs_dma = {
1162 .open = tsif_debugfs_generic_open,
1163 .read = tsif_debugfs_dma_read,
1164};
1165
1166static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1167 size_t count, loff_t *f_pos)
1168{
1169 static char bufa[300];
1170 static char *buf = bufa;
1171 int sz = sizeof(bufa);
1172 struct msm_tsif_device *tsif_device = filp->private_data;
1173 int len = 0;
1174 if (tsif_device) {
1175 struct msm_tsif_platform_data *pdata =
1176 tsif_device->pdev->dev.platform_data;
1177 int i;
1178 for (i = 0; i < pdata->num_gpios; i++) {
1179 if (pdata->gpios[i].gpio_cfg) {
1180 int x = !!gpio_get_value(GPIO_PIN(
1181 pdata->gpios[i].gpio_cfg));
1182 len += snprintf(buf + len, sz - len,
1183 "%15s: %d\n",
1184 pdata->gpios[i].label, x);
1185 }
1186 }
1187 } else {
1188 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1189 }
1190 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1191}
1192
1193static const struct file_operations fops_debugfs_gpios = {
1194 .open = tsif_debugfs_generic_open,
1195 .read = tsif_debugfs_gpios_read,
1196};
1197
1198
1199static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1200{
1201 tsif_device->dent_tsif = debugfs_create_dir(
1202 dev_name(&tsif_device->pdev->dev), NULL);
1203 if (tsif_device->dent_tsif) {
1204 int i;
1205 void __iomem *base = tsif_device->base;
1206 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1207 tsif_device->debugfs_tsif_regs[i] =
1208 debugfs_create_iomem_x32(
1209 debugfs_tsif_regs[i].name,
1210 debugfs_tsif_regs[i].mode,
1211 tsif_device->dent_tsif,
1212 base + debugfs_tsif_regs[i].offset);
1213 }
1214 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1215 S_IRUGO,
1216 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1217 tsif_device->debugfs_action = debugfs_create_file("action",
1218 S_IWUSR,
1219 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1220 tsif_device->debugfs_dma = debugfs_create_file("dma",
1221 S_IRUGO,
1222 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1223 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1224 S_IRUGO,
1225 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1226 }
1227}
1228
1229static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1230{
1231 if (tsif_device->dent_tsif) {
1232 int i;
1233 debugfs_remove_recursive(tsif_device->dent_tsif);
1234 tsif_device->dent_tsif = NULL;
1235 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1236 tsif_device->debugfs_tsif_regs[i] = NULL;
1237 tsif_device->debugfs_gpio = NULL;
1238 tsif_device->debugfs_action = NULL;
1239 tsif_device->debugfs_dma = NULL;
1240 tsif_device->debugfs_databuf = NULL;
1241 }
1242}
1243/* ===debugfs end=== */
1244
1245/* ===module begin=== */
1246static LIST_HEAD(tsif_devices);
1247
1248static struct msm_tsif_device *tsif_find_by_id(int id)
1249{
1250 struct msm_tsif_device *tsif_device;
1251 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1252 if (tsif_device->pdev->id == id)
1253 return tsif_device;
1254 }
1255 return NULL;
1256}
1257
1258static int __devinit msm_tsif_probe(struct platform_device *pdev)
1259{
1260 int rc = -ENODEV;
1261 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1262 struct msm_tsif_device *tsif_device;
1263 struct resource *res;
1264 /* check device validity */
1265 /* must have platform data */
1266 if (!plat) {
1267 dev_err(&pdev->dev, "Platform data not available\n");
1268 rc = -EINVAL;
1269 goto out;
1270 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001271
1272 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1274 rc = -EINVAL;
1275 goto out;
1276 }
1277 /* OK, we will use this device */
1278 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1279 if (!tsif_device) {
1280 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1281 rc = -ENOMEM;
1282 goto out;
1283 }
1284 /* cross links */
1285 tsif_device->pdev = pdev;
1286 platform_set_drvdata(pdev, tsif_device);
1287 tsif_device->mode = 1;
1288 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1289 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1290 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1291 (unsigned long)tsif_device);
1292 if (tsif_get_clocks(tsif_device))
1293 goto err_clocks;
1294/* map I/O memory */
1295 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1296 if (!tsif_device->memres) {
1297 dev_err(&pdev->dev, "Missing MEM resource\n");
1298 rc = -ENXIO;
1299 goto err_rgn;
1300 }
1301 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1302 if (!res) {
1303 dev_err(&pdev->dev, "Missing DMA resource\n");
1304 rc = -ENXIO;
1305 goto err_rgn;
1306 }
1307 tsif_device->dma = res->start;
1308 tsif_device->crci = res->end;
1309 tsif_device->base = ioremap(tsif_device->memres->start,
1310 resource_size(tsif_device->memres));
1311 if (!tsif_device->base) {
1312 dev_err(&pdev->dev, "ioremap failed\n");
1313 goto err_ioremap;
1314 }
1315 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1316 tsif_device->memres->start, tsif_device->base);
1317 rc = tsif_start_gpios(tsif_device);
1318 if (rc)
1319 goto err_gpio;
1320
1321 pm_runtime_set_active(&pdev->dev);
1322 pm_runtime_enable(&pdev->dev);
1323
1324 tsif_debugfs_init(tsif_device);
1325 rc = platform_get_irq(pdev, 0);
1326 if (rc > 0) {
1327 tsif_device->irq = rc;
1328 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1329 dev_name(&pdev->dev), tsif_device);
1330 disable_irq(tsif_device->irq);
1331 }
1332 if (rc) {
1333 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1334 tsif_device->irq, rc);
1335 goto err_irq;
1336 }
1337 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1338 if (rc) {
1339 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1340 goto err_attrs;
1341 }
1342 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1343 dev_name(&pdev->dev));
1344 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1345 tsif_device->irq, tsif_device->memres->start,
1346 tsif_device->dma, tsif_device->crci);
1347 list_add(&tsif_device->devlist, &tsif_devices);
1348 return 0;
1349/* error path */
1350 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1351err_attrs:
1352 free_irq(tsif_device->irq, tsif_device);
1353err_irq:
1354 tsif_debugfs_exit(tsif_device);
1355 tsif_stop_gpios(tsif_device);
1356err_gpio:
1357 iounmap(tsif_device->base);
1358err_ioremap:
1359err_rgn:
1360 tsif_put_clocks(tsif_device);
1361err_clocks:
1362 kfree(tsif_device);
1363out:
1364 return rc;
1365}
1366
1367static int __devexit msm_tsif_remove(struct platform_device *pdev)
1368{
1369 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1370 dev_info(&pdev->dev, "Unload\n");
1371 list_del(&tsif_device->devlist);
1372 wake_lock_destroy(&tsif_device->wake_lock);
1373 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1374 free_irq(tsif_device->irq, tsif_device);
1375 tsif_debugfs_exit(tsif_device);
1376 tsif_dma_exit(tsif_device);
1377 tsif_stop_gpios(tsif_device);
1378 iounmap(tsif_device->base);
1379 tsif_put_clocks(tsif_device);
1380
1381 pm_runtime_put(&pdev->dev);
1382 pm_runtime_disable(&pdev->dev);
1383 kfree(tsif_device);
1384 return 0;
1385}
1386
1387static int tsif_runtime_suspend(struct device *dev)
1388{
1389 dev_dbg(dev, "pm_runtime: suspending...\n");
1390 return 0;
1391}
1392
1393static int tsif_runtime_resume(struct device *dev)
1394{
1395 dev_dbg(dev, "pm_runtime: resuming...\n");
1396 return 0;
1397}
1398
1399static const struct dev_pm_ops tsif_dev_pm_ops = {
1400 .runtime_suspend = tsif_runtime_suspend,
1401 .runtime_resume = tsif_runtime_resume,
1402};
1403
1404
1405static struct platform_driver msm_tsif_driver = {
1406 .probe = msm_tsif_probe,
1407 .remove = __exit_p(msm_tsif_remove),
1408 .driver = {
1409 .name = "msm_tsif",
1410 .pm = &tsif_dev_pm_ops,
1411 },
1412};
1413
1414static int __init mod_init(void)
1415{
1416 int rc = platform_driver_register(&msm_tsif_driver);
1417 if (rc)
1418 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1419 return rc;
1420}
1421
1422static void __exit mod_exit(void)
1423{
1424 platform_driver_unregister(&msm_tsif_driver);
1425}
1426/* ===module end=== */
1427
1428/* public API */
1429
Joel Nider5578bdb2011-08-12 09:37:11 +03001430int tsif_get_active(void)
1431{
1432 struct msm_tsif_device *tsif_device;
1433 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1434 return tsif_device->pdev->id;
1435 }
1436 return -ENODEV;
1437}
1438EXPORT_SYMBOL(tsif_get_active);
1439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001440void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1441{
1442 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001443 if (!tsif_device)
1444 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445 if (tsif_device->client_notify || tsif_device->client_data)
1446 return ERR_PTR(-EBUSY);
1447 tsif_device->client_notify = notify;
1448 tsif_device->client_data = data;
1449 /* prevent from unloading */
1450 get_device(&tsif_device->pdev->dev);
1451 return tsif_device;
1452}
1453EXPORT_SYMBOL(tsif_attach);
1454
1455void tsif_detach(void *cookie)
1456{
1457 struct msm_tsif_device *tsif_device = cookie;
1458 tsif_device->client_notify = NULL;
1459 tsif_device->client_data = NULL;
1460 put_device(&tsif_device->pdev->dev);
1461}
1462EXPORT_SYMBOL(tsif_detach);
1463
1464void tsif_get_info(void *cookie, void **pdata, int *psize)
1465{
1466 struct msm_tsif_device *tsif_device = cookie;
1467 if (pdata)
1468 *pdata = tsif_device->data_buffer;
1469 if (psize)
1470 *psize = TSIF_PKTS_IN_BUF;
1471}
1472EXPORT_SYMBOL(tsif_get_info);
1473
1474int tsif_set_mode(void *cookie, int mode)
1475{
1476 struct msm_tsif_device *tsif_device = cookie;
1477 if (tsif_device->state != tsif_state_stopped) {
1478 dev_err(&tsif_device->pdev->dev,
1479 "Can't change mode while device is active\n");
1480 return -EBUSY;
1481 }
1482 switch (mode) {
1483 case 1:
1484 case 2:
1485 case 3:
1486 tsif_device->mode = mode;
1487 break;
1488 default:
1489 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1490 return -EINVAL;
1491 }
1492 return 0;
1493}
1494EXPORT_SYMBOL(tsif_set_mode);
1495
1496int tsif_set_time_limit(void *cookie, u32 value)
1497{
1498 struct msm_tsif_device *tsif_device = cookie;
1499 if (tsif_device->state != tsif_state_stopped) {
1500 dev_err(&tsif_device->pdev->dev,
1501 "Can't change time limit while device is active\n");
1502 return -EBUSY;
1503 }
1504 if (value != (value & 0xFFFFFF)) {
1505 dev_err(&tsif_device->pdev->dev,
1506 "Invalid time limit (should be 24 bit): %#x\n", value);
1507 return -EINVAL;
1508 }
1509 tsif_device->time_limit = value;
1510 return 0;
1511}
1512EXPORT_SYMBOL(tsif_set_time_limit);
1513
1514int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1515{
1516 struct msm_tsif_device *tsif_device = cookie;
1517 if (tsif_device->data_buffer) {
1518 dev_err(&tsif_device->pdev->dev,
1519 "Data buffer already allocated: %p\n",
1520 tsif_device->data_buffer);
1521 return -EBUSY;
1522 }
1523 /* check for crazy user */
1524 if (pkts_in_chunk * chunks_in_buf > 10240) {
1525 dev_err(&tsif_device->pdev->dev,
1526 "Buffer requested is too large: %d * %d\n",
1527 pkts_in_chunk,
1528 chunks_in_buf);
1529 return -EINVAL;
1530 }
1531 /* parameters are OK, execute */
1532 tsif_device->pkts_per_chunk = pkts_in_chunk;
1533 tsif_device->chunks_per_buf = chunks_in_buf;
1534 return 0;
1535}
1536EXPORT_SYMBOL(tsif_set_buf_config);
1537
1538void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1539{
1540 struct msm_tsif_device *tsif_device = cookie;
1541 if (ri)
1542 *ri = tsif_device->ri;
1543 if (wi)
1544 *wi = tsif_device->wi;
1545 if (state)
1546 *state = tsif_device->state;
1547}
1548EXPORT_SYMBOL(tsif_get_state);
1549
1550int tsif_start(void *cookie)
1551{
1552 struct msm_tsif_device *tsif_device = cookie;
1553 return action_open(tsif_device);
1554}
1555EXPORT_SYMBOL(tsif_start);
1556
1557void tsif_stop(void *cookie)
1558{
1559 struct msm_tsif_device *tsif_device = cookie;
1560 action_close(tsif_device);
1561}
1562EXPORT_SYMBOL(tsif_stop);
1563
1564void tsif_reclaim_packets(void *cookie, int read_index)
1565{
1566 struct msm_tsif_device *tsif_device = cookie;
1567 tsif_device->ri = read_index;
1568}
1569EXPORT_SYMBOL(tsif_reclaim_packets);
1570
1571module_init(mod_init);
1572module_exit(mod_exit);
1573
1574MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1575 " Driver for the MSM chipset");
1576MODULE_LICENSE("GPL v2");
1577