blob: 05f6c86769f8bd6f842e008aed8edcdf2b5ba462 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
Hamad Kadmany68bd27a2013-01-31 14:53:32 +02004 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
Steve Mucklef132c6c2012-06-06 18:30:57 -070034#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <mach/dma.h>
36#include <mach/msm_tsif.h>
37
38/*
39 * TSIF register offsets
40 */
41#define TSIF_STS_CTL_OFF (0x0)
42#define TSIF_TIME_LIMIT_OFF (0x4)
43#define TSIF_CLK_REF_OFF (0x8)
44#define TSIF_LPBK_FLAGS_OFF (0xc)
45#define TSIF_LPBK_DATA_OFF (0x10)
46#define TSIF_TEST_CTL_OFF (0x14)
47#define TSIF_TEST_MODE_OFF (0x18)
48#define TSIF_TEST_RESET_OFF (0x1c)
49#define TSIF_TEST_EXPORT_OFF (0x20)
50#define TSIF_TEST_CURRENT_OFF (0x24)
51
52#define TSIF_DATA_PORT_OFF (0x100)
53
54/* bits for TSIF_STS_CTL register */
55#define TSIF_STS_CTL_EN_IRQ (1 << 28)
56#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
57#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
58#define TSIF_STS_CTL_OVERFLOW (1 << 25)
59#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
60#define TSIF_STS_CTL_TIMEOUT (1 << 23)
61#define TSIF_STS_CTL_INV_SYNC (1 << 21)
62#define TSIF_STS_CTL_INV_NULL (1 << 20)
63#define TSIF_STS_CTL_INV_ERROR (1 << 19)
64#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
65#define TSIF_STS_CTL_INV_DATA (1 << 17)
66#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
67#define TSIF_STS_CTL_SPARE (1 << 15)
68#define TSIF_STS_CTL_EN_NULL (1 << 11)
69#define TSIF_STS_CTL_EN_ERROR (1 << 10)
70#define TSIF_STS_CTL_LAST_BIT (1 << 9)
71#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
72#define TSIF_STS_CTL_EN_TCR (1 << 7)
73#define TSIF_STS_CTL_TEST_MODE (3 << 5)
74#define TSIF_STS_CTL_EN_DM (1 << 4)
75#define TSIF_STS_CTL_STOP (1 << 3)
76#define TSIF_STS_CTL_START (1 << 0)
77
78/*
79 * Data buffering parameters
80 *
81 * Data stored in cyclic buffer;
82 *
83 * Data organized in chunks of packets.
84 * One chunk processed at a time by the data mover
85 *
86 */
87#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
88#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
89#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
90#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
91#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
92#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030093#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094
95#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
96#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
97#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
98#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
99#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
100
101/* used to create debugfs entries */
102static const struct {
103 const char *name;
104 mode_t mode;
105 int offset;
106} debugfs_tsif_regs[] = {
107 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
108 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
109 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
110 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
111 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
112 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
113 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
114 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
115 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
116 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
117 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
118};
119
120/* structures for Data Mover */
121struct tsif_dmov_cmd {
122 dmov_box box;
123 dma_addr_t box_ptr;
124};
125
126struct msm_tsif_device;
127
128struct tsif_xfer {
129 struct msm_dmov_cmd hdr;
130 struct msm_tsif_device *tsif_device;
131 int busy;
132 int wi; /**< set devices's write index after xfer */
133};
134
135struct msm_tsif_device {
136 struct list_head devlist;
137 struct platform_device *pdev;
138 struct resource *memres;
139 void __iomem *base;
140 unsigned int irq;
141 int mode;
142 u32 time_limit;
Hamad Kadmany509b7662012-10-18 14:00:39 +0200143 int clock_inverse;
144 int data_inverse;
145 int sync_inverse;
146 int enable_inverse;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147 enum tsif_state state;
148 struct wake_lock wake_lock;
149 /* clocks */
150 struct clk *tsif_clk;
151 struct clk *tsif_pclk;
152 struct clk *tsif_ref_clk;
153 /* debugfs */
154 struct dentry *dent_tsif;
155 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
156 struct dentry *debugfs_gpio;
157 struct dentry *debugfs_action;
158 struct dentry *debugfs_dma;
159 struct dentry *debugfs_databuf;
160 struct debugfs_blob_wrapper blob_wrapper_databuf;
161 /* DMA related */
162 int dma;
163 int crci;
164 void *data_buffer;
165 dma_addr_t data_buffer_dma;
166 u32 pkts_per_chunk;
167 u32 chunks_per_buf;
168 int ri;
169 int wi;
170 int dmwi; /**< DataMover write index */
171 struct tsif_dmov_cmd *dmov_cmd[2];
172 dma_addr_t dmov_cmd_dma[2];
173 struct tsif_xfer xfer[2];
174 struct tasklet_struct dma_refill;
Joel Nider6682b382012-07-03 13:59:27 +0300175 struct tasklet_struct clocks_off;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 /* statistics */
177 u32 stat_rx;
178 u32 stat_overflow;
179 u32 stat_lost_sync;
180 u32 stat_timeout;
181 u32 stat_dmov_err;
182 u32 stat_soft_drop;
183 int stat_ifi; /* inter frame interval */
184 u32 stat0, stat1;
185 /* client */
186 void *client_data;
187 void (*client_notify)(void *client_data);
188};
189
190/* ===clocks begin=== */
191
192static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
193{
194 if (tsif_device->tsif_clk) {
195 clk_put(tsif_device->tsif_clk);
196 tsif_device->tsif_clk = NULL;
197 }
198 if (tsif_device->tsif_pclk) {
199 clk_put(tsif_device->tsif_pclk);
200 tsif_device->tsif_pclk = NULL;
201 }
202
203 if (tsif_device->tsif_ref_clk) {
204 clk_put(tsif_device->tsif_ref_clk);
205 tsif_device->tsif_ref_clk = NULL;
206 }
207}
208
209static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
210{
211 struct msm_tsif_platform_data *pdata =
212 tsif_device->pdev->dev.platform_data;
213 int rc = 0;
214
215 if (pdata->tsif_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700216 tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
217 pdata->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 if (IS_ERR(tsif_device->tsif_clk)) {
219 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
220 pdata->tsif_clk);
221 rc = PTR_ERR(tsif_device->tsif_clk);
222 tsif_device->tsif_clk = NULL;
223 goto ret;
224 }
225 }
226 if (pdata->tsif_pclk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700227 tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
228 pdata->tsif_pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 if (IS_ERR(tsif_device->tsif_pclk)) {
230 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
231 pdata->tsif_pclk);
232 rc = PTR_ERR(tsif_device->tsif_pclk);
233 tsif_device->tsif_pclk = NULL;
234 goto ret;
235 }
236 }
237 if (pdata->tsif_ref_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700238 tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
239 pdata->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 if (IS_ERR(tsif_device->tsif_ref_clk)) {
241 dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
242 pdata->tsif_ref_clk);
243 rc = PTR_ERR(tsif_device->tsif_ref_clk);
244 tsif_device->tsif_ref_clk = NULL;
245 goto ret;
246 }
247 }
248 return 0;
249ret:
250 tsif_put_clocks(tsif_device);
251 return rc;
252}
253
254static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
255{
256 if (on) {
257 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300258 clk_prepare_enable(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300260 clk_prepare_enable(tsif_device->tsif_pclk);
261 clk_prepare_enable(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 } else {
263 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300264 clk_disable_unprepare(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300266 clk_disable_unprepare(tsif_device->tsif_pclk);
267 clk_disable_unprepare(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 }
269}
Joel Nider6682b382012-07-03 13:59:27 +0300270
271static void tsif_clocks_off(unsigned long data)
272{
273 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
274 tsif_clock(tsif_device, 0);
275}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276/* ===clocks end=== */
277/* ===gpio begin=== */
278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279static int tsif_gpios_disable(const struct msm_gpio *table, int size)
280{
281 int rc = 0;
282 int i;
283 const struct msm_gpio *g;
284 for (i = size-1; i >= 0; i--) {
285 int tmp;
286 g = table + i;
Joel Nider951b2832012-05-07 21:13:38 +0300287 tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
288 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
289 GPIO_CFG_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 if (tmp) {
291 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
292 " <%s> failed: %d\n",
293 g->gpio_cfg, g->label ?: "?", rc);
294 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
295 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
296 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
297 GPIO_DRVSTR(g->gpio_cfg));
298 if (!rc)
299 rc = tmp;
300 }
301 }
302
303 return rc;
304}
305
306static int tsif_gpios_enable(const struct msm_gpio *table, int size)
307{
308 int rc;
309 int i;
310 const struct msm_gpio *g;
311 for (i = 0; i < size; i++) {
312 g = table + i;
313 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
314 if (rc) {
315 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
316 " <%s> failed: %d\n",
317 g->gpio_cfg, g->label ?: "?", rc);
318 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
319 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
320 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
321 GPIO_DRVSTR(g->gpio_cfg));
322 goto err;
323 }
324 }
325 return 0;
326err:
327 tsif_gpios_disable(table, i);
328 return rc;
329}
330
331static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
332{
Hamad Kadmanyb18fac52012-09-01 12:57:24 +0300333 int rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 rc = tsif_gpios_enable(table, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 return rc;
336}
337
338static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
339{
340 tsif_gpios_disable(table, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341}
342
343static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
344{
345 struct msm_tsif_platform_data *pdata =
346 tsif_device->pdev->dev.platform_data;
347 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
348}
349
350static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
351{
352 struct msm_tsif_platform_data *pdata =
353 tsif_device->pdev->dev.platform_data;
354 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
355}
356
357/* ===gpio end=== */
358
359static int tsif_start_hw(struct msm_tsif_device *tsif_device)
360{
361 u32 ctl = TSIF_STS_CTL_EN_IRQ |
362 TSIF_STS_CTL_EN_TIME_LIM |
363 TSIF_STS_CTL_EN_TCR |
364 TSIF_STS_CTL_EN_DM;
Hamad Kadmany509b7662012-10-18 14:00:39 +0200365
366 if (tsif_device->clock_inverse)
367 ctl |= TSIF_STS_CTL_INV_CLOCK;
368
369 if (tsif_device->data_inverse)
370 ctl |= TSIF_STS_CTL_INV_DATA;
371
372 if (tsif_device->sync_inverse)
373 ctl |= TSIF_STS_CTL_INV_SYNC;
374
375 if (tsif_device->enable_inverse)
376 ctl |= TSIF_STS_CTL_INV_ENABLE;
377
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
379 switch (tsif_device->mode) {
380 case 1: /* mode 1 */
381 ctl |= (0 << 5);
382 break;
383 case 2: /* mode 2 */
384 ctl |= (1 << 5);
385 break;
386 case 3: /* manual - control from debugfs */
387 return 0;
388 break;
389 default:
390 return -EINVAL;
391 }
392 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
393 iowrite32(tsif_device->time_limit,
394 tsif_device->base + TSIF_TIME_LIMIT_OFF);
395 wmb();
396 iowrite32(ctl | TSIF_STS_CTL_START,
397 tsif_device->base + TSIF_STS_CTL_OFF);
398 wmb();
399 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
400 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
401}
402
403static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
404{
405 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
406 wmb();
407}
408
409/* ===DMA begin=== */
410/**
411 * TSIF DMA theory of operation
412 *
413 * Circular memory buffer \a tsif_mem_buffer allocated;
414 * 4 pointers points to and moved forward on:
415 * - \a ri index of first ready to read packet.
416 * Updated by client's call to tsif_reclaim_packets()
417 * - \a wi points to the next packet to be written by DM.
418 * Data below is valid and will not be overriden by DMA.
419 * Moved on DM callback
420 * - \a dmwi points to the next packet not scheduled yet for DM
421 * moved when packet scheduled for DM
422 *
423 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
424 * at time immediately after scheduling.
425 *
426 * Initially, 2 packets get scheduled for the DM.
427 *
428 * Upon packet receive, DM writes packet to the pre-programmed
429 * location and invoke its callback.
430 *
431 * DM callback moves sets wi pointer to \a xfer->wi;
432 * then it schedules next packet for DM and moves \a dmwi pointer.
433 *
434 * Buffer overflow handling
435 *
436 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
437 * DMA re-scheduled to the same index.
438 * Callback check and not move \a wi to become equal to \a ri
439 *
440 * On \a read request, data between \a ri and \a wi pointers may be read;
441 * \ri pointer moved accordingly.
442 *
443 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
444 * \a wi is between [\a ri, \a dmwi]
445 *
446 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
447 *
448 * Number of scheduled packets for DM: (dmwi-wi)
449 */
450
451/**
452 * tsif_dma_schedule - schedule DMA transfers
453 *
454 * @tsif_device: device
455 *
456 * Executed from process context on init, or from tasklet when
457 * re-scheduling upon DMA completion.
458 * This prevent concurrent execution from several CPU's
459 */
460static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
461{
462 int i, dmwi0, dmwi1, found = 0;
463 /* find free entry */
464 for (i = 0; i < 2; i++) {
465 struct tsif_xfer *xfer = &tsif_device->xfer[i];
466 if (xfer->busy)
467 continue;
468 found++;
469 xfer->busy = 1;
470 dmwi0 = tsif_device->dmwi;
471 tsif_device->dmov_cmd[i]->box.dst_row_addr =
472 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
473 /* proposed value for dmwi */
474 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
475 /**
476 * If dmwi going to overlap with ri,
477 * overflow occurs because data was not read.
478 * Still get this packet, to not interrupt TSIF
479 * hardware, but do not advance dmwi.
480 *
481 * Upon receive, packet will be dropped.
482 */
483 if (dmwi1 != tsif_device->ri) {
484 tsif_device->dmwi = dmwi1;
485 } else {
486 dev_info(&tsif_device->pdev->dev,
487 "Overflow detected\n");
488 }
489 xfer->wi = tsif_device->dmwi;
490#ifdef CONFIG_TSIF_DEBUG
491 dev_info(&tsif_device->pdev->dev,
492 "schedule xfer[%d] -> [%2d]{%2d}\n",
493 i, dmwi0, xfer->wi);
494#endif
495 /* complete all the writes to box */
496 dma_coherent_pre_ops();
497 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
498 }
499 if (!found)
500 dev_info(&tsif_device->pdev->dev,
501 "All xfer entries are busy\n");
502}
503
504/**
505 * tsif_dmov_complete_func - DataMover completion callback
506 *
507 * @cmd: original DM command
508 * @result: DM result
509 * @err: optional error buffer
510 *
511 * Executed in IRQ context (Data Mover's IRQ)
512 * DataMover's spinlock @msm_dmov_lock held.
513 */
514static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
515 unsigned int result,
516 struct msm_dmov_errdata *err)
517{
518 int i;
519 u32 data_offset;
520 struct tsif_xfer *xfer;
521 struct msm_tsif_device *tsif_device;
522 int reschedule = 0;
523 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
524 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
525 return;
526 }
527 /* restore original context */
528 xfer = container_of(cmd, struct tsif_xfer, hdr);
529 tsif_device = xfer->tsif_device;
530 i = xfer - tsif_device->xfer;
531 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
532 tsif_device->data_buffer_dma;
533
534 /* order reads from the xferred buffer */
535 dma_coherent_post_ops();
536 if (result & DMOV_RSLT_DONE) {
537 int w = data_offset / TSIF_PKT_SIZE;
538 tsif_device->stat_rx++;
539 /*
540 * sowtware overflow when I was scheduled?
541 *
542 * @w is where this xfer was actually written to;
543 * @xfer->wi is where device's @wi will be set;
544 *
545 * if these 2 are equal, we are short in space and
546 * going to overwrite this xfer - this is "soft drop"
547 */
548 if (w == xfer->wi)
549 tsif_device->stat_soft_drop++;
550 reschedule = (tsif_device->state == tsif_state_running);
551#ifdef CONFIG_TSIF_DEBUG
552 /* IFI calculation */
553 /*
554 * update stat_ifi (inter frame interval)
555 *
556 * Calculate time difference between last and 1-st
557 * packets in chunk
558 *
559 * To be removed after tuning
560 */
561 if (TSIF_PKTS_IN_CHUNK > 1) {
562 void *ptr = tsif_device->data_buffer + data_offset;
563 u32 *p0 = ptr;
564 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
565 TSIF_PKT_SIZE;
566 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
567 tsif_pkt_status(p0));
568 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
569 tsif_pkt_status(p1));
570 tsif_device->stat_ifi = (tts1 - tts0) /
571 (TSIF_PKTS_IN_CHUNK - 1);
572 }
573#endif
574 } else {
575 /**
576 * Error or flush
577 *
578 * To recover - re-open TSIF device.
579 */
580 /* mark status "not valid" in data buffer */
581 int n;
582 void *ptr = tsif_device->data_buffer + data_offset;
583 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
584 u32 *p = ptr + (n * TSIF_PKT_SIZE);
585 /* last dword is status + TTS */
586 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
587 }
588 if (result & DMOV_RSLT_ERROR) {
589 dev_err(&tsif_device->pdev->dev,
590 "DMA error (0x%08x)\n", result);
591 tsif_device->stat_dmov_err++;
592 /* force device close */
593 if (tsif_device->state == tsif_state_running) {
594 tsif_stop_hw(tsif_device);
595 /*
Joel Nider6682b382012-07-03 13:59:27 +0300596 * This branch is taken only in case of
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 * severe hardware problem (I don't even know
Joel Nider6682b382012-07-03 13:59:27 +0300598 * what should happen for DMOV_RSLT_ERROR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 * thus I prefer code simplicity over
600 * performance.
Joel Nider6682b382012-07-03 13:59:27 +0300601 * Clocks are turned off from outside the
602 * interrupt context.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 */
Joel Nider6682b382012-07-03 13:59:27 +0300604 tasklet_schedule(&tsif_device->clocks_off);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 tsif_device->state = tsif_state_flushing;
606 }
607 }
608 if (result & DMOV_RSLT_FLUSH) {
609 /*
610 * Flushing normally happens in process of
611 * @tsif_stop(), when we are waiting for outstanding
612 * DMA commands to be flushed.
613 */
614 dev_info(&tsif_device->pdev->dev,
615 "DMA channel flushed (0x%08x)\n", result);
616 if (tsif_device->state == tsif_state_flushing) {
617 if ((!tsif_device->xfer[0].busy) &&
618 (!tsif_device->xfer[1].busy)) {
619 tsif_device->state = tsif_state_stopped;
620 }
621 }
622 }
623 if (err)
624 dev_err(&tsif_device->pdev->dev,
625 "Flush data: %08x %08x %08x %08x %08x %08x\n",
626 err->flush[0], err->flush[1], err->flush[2],
627 err->flush[3], err->flush[4], err->flush[5]);
628 }
629 tsif_device->wi = xfer->wi;
630 xfer->busy = 0;
631 if (tsif_device->client_notify)
632 tsif_device->client_notify(tsif_device->client_data);
633 /*
634 * Can't schedule next DMA -
635 * DataMover driver still hold its semaphore,
636 * deadlock will occur.
637 */
638 if (reschedule)
639 tasklet_schedule(&tsif_device->dma_refill);
640}
641
642/**
643 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
644 *
645 * @data: tsif_device
646 *
647 * Reschedule DMA requests
648 *
649 * Executed in tasklet
650 */
651static void tsif_dma_refill(unsigned long data)
652{
653 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
654 if (tsif_device->state == tsif_state_running)
655 tsif_dma_schedule(tsif_device);
656}
657
658/**
659 * tsif_dma_flush - flush DMA channel
660 *
661 * @tsif_device:
662 *
663 * busy wait till DMA flushed
664 */
665static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
666{
667 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
668 tsif_device->state = tsif_state_flushing;
669 while (tsif_device->xfer[0].busy ||
670 tsif_device->xfer[1].busy) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700671 msm_dmov_flush(tsif_device->dma, 1);
Joel Nider951b2832012-05-07 21:13:38 +0300672 usleep(10000);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 }
674 }
675 tsif_device->state = tsif_state_stopped;
676 if (tsif_device->client_notify)
677 tsif_device->client_notify(tsif_device->client_data);
678}
679
680static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
681{
682 int i;
683 tsif_device->state = tsif_state_flushing;
684 tasklet_kill(&tsif_device->dma_refill);
685 tsif_dma_flush(tsif_device);
686 for (i = 0; i < 2; i++) {
687 if (tsif_device->dmov_cmd[i]) {
688 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
689 tsif_device->dmov_cmd[i],
690 tsif_device->dmov_cmd_dma[i]);
691 tsif_device->dmov_cmd[i] = NULL;
692 }
693 }
694 if (tsif_device->data_buffer) {
695 tsif_device->blob_wrapper_databuf.data = NULL;
696 tsif_device->blob_wrapper_databuf.size = 0;
697 dma_free_coherent(NULL, TSIF_BUF_SIZE,
698 tsif_device->data_buffer,
699 tsif_device->data_buffer_dma);
700 tsif_device->data_buffer = NULL;
701 }
702}
703
704static int tsif_dma_init(struct msm_tsif_device *tsif_device)
705{
706 int i;
707 /* TODO: allocate all DMA memory in one buffer */
708 /* Note: don't pass device,
709 it require coherent_dma_mask id device definition */
710 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
711 &tsif_device->data_buffer_dma, GFP_KERNEL);
712 if (!tsif_device->data_buffer)
713 goto err;
714 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
715 tsif_device->data_buffer, tsif_device->data_buffer_dma);
716 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
717 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
718 tsif_device->ri = 0;
719 tsif_device->wi = 0;
720 tsif_device->dmwi = 0;
721 for (i = 0; i < 2; i++) {
722 dmov_box *box;
723 struct msm_dmov_cmd *hdr;
724 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
725 sizeof(struct tsif_dmov_cmd),
726 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
727 if (!tsif_device->dmov_cmd[i])
728 goto err;
729 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
730 i, tsif_device->dmov_cmd[i],
731 tsif_device->dmov_cmd_dma[i]);
732 /* dst in 16 LSB, src in 16 MSB */
733 box = &(tsif_device->dmov_cmd[i]->box);
734 box->cmd = CMD_MODE_BOX | CMD_LC |
735 CMD_SRC_CRCI(tsif_device->crci);
736 box->src_row_addr =
737 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
738 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
739 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
740 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
741
742 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
743 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
744 offsetof(struct tsif_dmov_cmd, box));
745 tsif_device->xfer[i].tsif_device = tsif_device;
746 hdr = &tsif_device->xfer[i].hdr;
747 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
748 offsetof(struct tsif_dmov_cmd, box_ptr));
749 hdr->complete_func = tsif_dmov_complete_func;
750 }
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700751 msm_dmov_flush(tsif_device->dma, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752 return 0;
753err:
754 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
755 tsif_dma_exit(tsif_device);
756 return -ENOMEM;
757}
758
759/* ===DMA end=== */
760
761/* ===IRQ begin=== */
762
763static irqreturn_t tsif_irq(int irq, void *dev_id)
764{
765 struct msm_tsif_device *tsif_device = dev_id;
766 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
767 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
768 TSIF_STS_CTL_OVERFLOW |
769 TSIF_STS_CTL_LOST_SYNC |
770 TSIF_STS_CTL_TIMEOUT))) {
771 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
772 return IRQ_NONE;
773 }
774 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
775 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
776 tsif_device->stat_rx++;
777 }
778 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
779 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
780 tsif_device->stat_overflow++;
781 }
782 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
783 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
784 tsif_device->stat_lost_sync++;
785 }
786 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
787 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
788 tsif_device->stat_timeout++;
789 }
790 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
791 wmb();
792 return IRQ_HANDLED;
793}
794
795/* ===IRQ end=== */
796
797/* ===Device attributes begin=== */
798
799static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
800 char *buf)
801{
802 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
803 char *state_string;
804 switch (tsif_device->state) {
805 case tsif_state_stopped:
806 state_string = "stopped";
807 break;
808 case tsif_state_running:
809 state_string = "running";
810 break;
811 case tsif_state_flushing:
812 state_string = "flushing";
813 break;
814 default:
815 state_string = "???";
816 }
817 return snprintf(buf, PAGE_SIZE,
818 "Device %s\n"
819 "Mode = %d\n"
820 "Time limit = %d\n"
821 "State %s\n"
822 "Client = %p\n"
823 "Pkt/Buf = %d\n"
824 "Pkt/chunk = %d\n"
Hamad Kadmany509b7662012-10-18 14:00:39 +0200825 "Clock inv = %d\n"
826 "Data inv = %d\n"
827 "Sync inv = %d\n"
828 "Enable inv = %d\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 "--statistics--\n"
830 "Rx chunks = %d\n"
831 "Overflow = %d\n"
832 "Lost sync = %d\n"
833 "Timeout = %d\n"
834 "DMA error = %d\n"
835 "Soft drop = %d\n"
836 "IFI = %d\n"
837 "(0x%08x - 0x%08x) / %d\n"
838 "--debug--\n"
839 "GLBL_CLK_ENA = 0x%08x\n"
840 "ROW_RESET = 0x%08x\n"
841 "CLK_HALT_STATEB = 0x%08x\n"
842 "TV_NS_REG = 0x%08x\n"
843 "TSIF_NS_REG = 0x%08x\n",
844 dev_name(dev),
845 tsif_device->mode,
846 tsif_device->time_limit,
847 state_string,
848 tsif_device->client_data,
849 TSIF_PKTS_IN_BUF,
850 TSIF_PKTS_IN_CHUNK,
Hamad Kadmany509b7662012-10-18 14:00:39 +0200851 tsif_device->clock_inverse,
852 tsif_device->data_inverse,
853 tsif_device->sync_inverse,
854 tsif_device->enable_inverse,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 tsif_device->stat_rx,
856 tsif_device->stat_overflow,
857 tsif_device->stat_lost_sync,
858 tsif_device->stat_timeout,
859 tsif_device->stat_dmov_err,
860 tsif_device->stat_soft_drop,
861 tsif_device->stat_ifi,
862 tsif_device->stat1,
863 tsif_device->stat0,
864 TSIF_PKTS_IN_CHUNK - 1,
865 ioread32(GLBL_CLK_ENA),
866 ioread32(ROW_RESET),
867 ioread32(CLK_HALT_STATEB),
868 ioread32(TV_NS_REG),
869 ioread32(TSIF_NS_REG)
870 );
871}
872/**
873 * set_stats - reset statistics on write
874 *
875 * @dev:
876 * @attr:
877 * @buf:
878 * @count:
879 */
880static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
881 const char *buf, size_t count)
882{
883 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
884 tsif_device->stat_rx = 0;
885 tsif_device->stat_overflow = 0;
886 tsif_device->stat_lost_sync = 0;
887 tsif_device->stat_timeout = 0;
888 tsif_device->stat_dmov_err = 0;
889 tsif_device->stat_soft_drop = 0;
890 tsif_device->stat_ifi = 0;
891 return count;
892}
893static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
894
895static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
896 char *buf)
897{
898 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
899 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
900}
901
902static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
903 const char *buf, size_t count)
904{
905 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
906 int value;
907 int rc;
908 if (1 != sscanf(buf, "%d", &value)) {
909 dev_err(&tsif_device->pdev->dev,
910 "Failed to parse integer: <%s>\n", buf);
911 return -EINVAL;
912 }
913 rc = tsif_set_mode(tsif_device, value);
914 if (!rc)
915 rc = count;
916 return rc;
917}
918static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
919
920static ssize_t show_time_limit(struct device *dev,
921 struct device_attribute *attr,
922 char *buf)
923{
924 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
925 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
926}
927
928static ssize_t set_time_limit(struct device *dev,
929 struct device_attribute *attr,
930 const char *buf, size_t count)
931{
932 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
933 int value;
934 int rc;
935 if (1 != sscanf(buf, "%d", &value)) {
936 dev_err(&tsif_device->pdev->dev,
937 "Failed to parse integer: <%s>\n", buf);
938 return -EINVAL;
939 }
940 rc = tsif_set_time_limit(tsif_device, value);
941 if (!rc)
942 rc = count;
943 return rc;
944}
945static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
946 show_time_limit, set_time_limit);
947
948static ssize_t show_buf_config(struct device *dev,
949 struct device_attribute *attr,
950 char *buf)
951{
952 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
953 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
954 tsif_device->pkts_per_chunk,
955 tsif_device->chunks_per_buf);
956}
957
958static ssize_t set_buf_config(struct device *dev,
959 struct device_attribute *attr,
960 const char *buf, size_t count)
961{
962 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
963 u32 p, c;
964 int rc;
965 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
966 dev_err(&tsif_device->pdev->dev,
967 "Failed to parse integer: <%s>\n", buf);
968 return -EINVAL;
969 }
970 rc = tsif_set_buf_config(tsif_device, p, c);
971 if (!rc)
972 rc = count;
973 return rc;
974}
975static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
976 show_buf_config, set_buf_config);
977
Hamad Kadmany509b7662012-10-18 14:00:39 +0200978static ssize_t show_clk_inverse(struct device *dev,
979 struct device_attribute *attr, char *buf)
980{
981 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
982 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->clock_inverse);
983}
984
985static ssize_t set_clk_inverse(struct device *dev,
986 struct device_attribute *attr, const char *buf, size_t count)
987{
988 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
989 int value;
990 int rc;
991 if (1 != sscanf(buf, "%d", &value)) {
992 dev_err(&tsif_device->pdev->dev,
993 "Failed to parse integer: <%s>\n", buf);
994 return -EINVAL;
995 }
996 rc = tsif_set_clk_inverse(tsif_device, value);
997 if (!rc)
998 rc = count;
999 return rc;
1000}
1001static DEVICE_ATTR(clk_inverse, S_IRUGO | S_IWUSR,
1002 show_clk_inverse, set_clk_inverse);
1003
1004static ssize_t show_data_inverse(struct device *dev,
1005 struct device_attribute *attr, char *buf)
1006{
1007 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1008 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->data_inverse);
1009}
1010
1011static ssize_t set_data_inverse(struct device *dev,
1012 struct device_attribute *attr, const char *buf, size_t count)
1013{
1014 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1015 int value;
1016 int rc;
1017 if (1 != sscanf(buf, "%d", &value)) {
1018 dev_err(&tsif_device->pdev->dev,
1019 "Failed to parse integer: <%s>\n", buf);
1020 return -EINVAL;
1021 }
1022 rc = tsif_set_data_inverse(tsif_device, value);
1023 if (!rc)
1024 rc = count;
1025 return rc;
1026}
1027static DEVICE_ATTR(data_inverse, S_IRUGO | S_IWUSR,
1028 show_data_inverse, set_data_inverse);
1029
1030static ssize_t show_sync_inverse(struct device *dev,
1031 struct device_attribute *attr, char *buf)
1032{
1033 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1034 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->sync_inverse);
1035}
1036
1037static ssize_t set_sync_inverse(struct device *dev,
1038 struct device_attribute *attr, const char *buf, size_t count)
1039{
1040 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1041 int value;
1042 int rc;
1043 if (1 != sscanf(buf, "%d", &value)) {
1044 dev_err(&tsif_device->pdev->dev,
1045 "Failed to parse integer: <%s>\n", buf);
1046 return -EINVAL;
1047 }
1048 rc = tsif_set_sync_inverse(tsif_device, value);
1049 if (!rc)
1050 rc = count;
1051 return rc;
1052}
1053static DEVICE_ATTR(sync_inverse, S_IRUGO | S_IWUSR,
1054 show_sync_inverse, set_sync_inverse);
1055
1056static ssize_t show_enable_inverse(struct device *dev,
1057 struct device_attribute *attr, char *buf)
1058{
1059 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1060 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->enable_inverse);
1061}
1062
1063static ssize_t set_enable_inverse(struct device *dev,
1064 struct device_attribute *attr, const char *buf, size_t count)
1065{
1066 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1067 int value;
1068 int rc;
1069 if (1 != sscanf(buf, "%d", &value)) {
1070 dev_err(&tsif_device->pdev->dev,
1071 "Failed to parse integer: <%s>\n", buf);
1072 return -EINVAL;
1073 }
1074 rc = tsif_set_enable_inverse(tsif_device, value);
1075 if (!rc)
1076 rc = count;
1077 return rc;
1078}
1079static DEVICE_ATTR(enable_inverse, S_IRUGO | S_IWUSR,
1080 show_enable_inverse, set_enable_inverse);
1081
1082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083static struct attribute *dev_attrs[] = {
1084 &dev_attr_stats.attr,
1085 &dev_attr_mode.attr,
1086 &dev_attr_time_limit.attr,
1087 &dev_attr_buf_config.attr,
Hamad Kadmany509b7662012-10-18 14:00:39 +02001088 &dev_attr_clk_inverse.attr,
1089 &dev_attr_data_inverse.attr,
1090 &dev_attr_sync_inverse.attr,
1091 &dev_attr_enable_inverse.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092 NULL,
1093};
1094static struct attribute_group dev_attr_grp = {
1095 .attrs = dev_attrs,
1096};
1097/* ===Device attributes end=== */
1098
1099/* ===debugfs begin=== */
1100
1101static int debugfs_iomem_x32_set(void *data, u64 val)
1102{
1103 iowrite32(val, data);
1104 wmb();
1105 return 0;
1106}
1107
1108static int debugfs_iomem_x32_get(void *data, u64 *val)
1109{
1110 *val = ioread32(data);
1111 return 0;
1112}
1113
1114DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1115 debugfs_iomem_x32_set, "0x%08llx\n");
1116
1117struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1118 struct dentry *parent, u32 *value)
1119{
1120 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1121}
1122
1123static int action_open(struct msm_tsif_device *tsif_device)
1124{
1125 int rc = -EINVAL;
1126 int result;
1127
1128 struct msm_tsif_platform_data *pdata =
1129 tsif_device->pdev->dev.platform_data;
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1132 if (tsif_device->state != tsif_state_stopped)
1133 return -EAGAIN;
1134 rc = tsif_dma_init(tsif_device);
1135 if (rc) {
1136 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1137 return rc;
1138 }
1139 tsif_device->state = tsif_state_running;
Joel Nider951b2832012-05-07 21:13:38 +03001140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141 /*
1142 * DMA should be scheduled prior to TSIF hardware initialization,
1143 * otherwise "bus error" will be reported by Data Mover
1144 */
1145 enable_irq(tsif_device->irq);
1146 tsif_clock(tsif_device, 1);
1147 tsif_dma_schedule(tsif_device);
1148 /*
1149 * init the device if required
1150 */
1151 if (pdata->init)
1152 pdata->init(pdata);
1153 rc = tsif_start_hw(tsif_device);
1154 if (rc) {
1155 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
1156 tsif_dma_exit(tsif_device);
1157 tsif_clock(tsif_device, 0);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001158 disable_irq(tsif_device->irq);
1159 return rc;
1160 }
1161
1162 /* make sure the GPIO's are set up */
1163 rc = tsif_start_gpios(tsif_device);
1164 if (rc) {
1165 dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
1166 tsif_stop_hw(tsif_device);
1167 tsif_dma_exit(tsif_device);
1168 tsif_clock(tsif_device, 0);
1169 disable_irq(tsif_device->irq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 return rc;
1171 }
1172
1173 result = pm_runtime_get(&tsif_device->pdev->dev);
1174 if (result < 0) {
1175 dev_err(&tsif_device->pdev->dev,
1176 "Runtime PM: Unable to wake up the device, rc = %d\n",
1177 result);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001178 tsif_stop_gpios(tsif_device);
1179 tsif_stop_hw(tsif_device);
1180 tsif_dma_exit(tsif_device);
1181 tsif_clock(tsif_device, 0);
1182 disable_irq(tsif_device->irq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 return result;
1184 }
1185
1186 wake_lock(&tsif_device->wake_lock);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001187 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188}
1189
1190static int action_close(struct msm_tsif_device *tsif_device)
1191{
1192 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1193 (int)tsif_device->state);
Joel Nider951b2832012-05-07 21:13:38 +03001194
1195 /* turn off the GPIO's to prevent new data from entering */
1196 tsif_stop_gpios(tsif_device);
1197
1198 /* we unfortunately must sleep here to give the ADM time to
1199 * complete any outstanding reads after the GPIO's are turned
1200 * off. There is no indication from the ADM hardware that
1201 * there are any outstanding reads on the bus, and if we
1202 * stop the TSIF too quickly, it can cause a bus error.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 */
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001204 msleep(250);
Joel Nider951b2832012-05-07 21:13:38 +03001205
1206 /* now we can stop the core */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207 tsif_stop_hw(tsif_device);
1208 tsif_dma_exit(tsif_device);
1209 tsif_clock(tsif_device, 0);
1210 disable_irq(tsif_device->irq);
1211
1212 pm_runtime_put(&tsif_device->pdev->dev);
1213 wake_unlock(&tsif_device->wake_lock);
1214 return 0;
1215}
1216
1217
1218static struct {
1219 int (*func)(struct msm_tsif_device *);
1220 const char *name;
1221} actions[] = {
1222 { action_open, "open"},
1223 { action_close, "close"},
1224};
1225
1226static ssize_t tsif_debugfs_action_write(struct file *filp,
1227 const char __user *userbuf,
1228 size_t count, loff_t *f_pos)
1229{
1230 int i;
1231 struct msm_tsif_device *tsif_device = filp->private_data;
1232 char s[40];
1233 int len = min(sizeof(s) - 1, count);
1234 if (copy_from_user(s, userbuf, len))
1235 return -EFAULT;
1236 s[len] = '\0';
1237 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1238 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1239 if (!strncmp(s, actions[i].name,
1240 min(count, strlen(actions[i].name)))) {
1241 int rc = actions[i].func(tsif_device);
1242 if (!rc)
1243 rc = count;
1244 return rc;
1245 }
1246 }
1247 return -EINVAL;
1248}
1249
1250static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1251{
1252 filp->private_data = inode->i_private;
1253 return 0;
1254}
1255
1256static const struct file_operations fops_debugfs_action = {
1257 .open = tsif_debugfs_generic_open,
1258 .write = tsif_debugfs_action_write,
1259};
1260
1261static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1262 size_t count, loff_t *f_pos)
1263{
1264 static char bufa[200];
1265 static char *buf = bufa;
1266 int sz = sizeof(bufa);
1267 struct msm_tsif_device *tsif_device = filp->private_data;
1268 int len = 0;
1269 if (tsif_device) {
1270 int i;
1271 len += snprintf(buf + len, sz - len,
1272 "ri %3d | wi %3d | dmwi %3d |",
1273 tsif_device->ri, tsif_device->wi,
1274 tsif_device->dmwi);
1275 for (i = 0; i < 2; i++) {
1276 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1277 if (xfer->busy) {
1278 u32 dst =
1279 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1280 u32 base = tsif_device->data_buffer_dma;
1281 int w = (dst - base) / TSIF_PKT_SIZE;
1282 len += snprintf(buf + len, sz - len,
1283 " [%3d]{%3d}",
1284 w, xfer->wi);
1285 } else {
1286 len += snprintf(buf + len, sz - len,
1287 " ---idle---");
1288 }
1289 }
1290 len += snprintf(buf + len, sz - len, "\n");
1291 } else {
1292 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1293 }
1294 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1295}
1296
1297static const struct file_operations fops_debugfs_dma = {
1298 .open = tsif_debugfs_generic_open,
1299 .read = tsif_debugfs_dma_read,
1300};
1301
1302static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1303 size_t count, loff_t *f_pos)
1304{
1305 static char bufa[300];
1306 static char *buf = bufa;
1307 int sz = sizeof(bufa);
1308 struct msm_tsif_device *tsif_device = filp->private_data;
1309 int len = 0;
1310 if (tsif_device) {
1311 struct msm_tsif_platform_data *pdata =
1312 tsif_device->pdev->dev.platform_data;
1313 int i;
1314 for (i = 0; i < pdata->num_gpios; i++) {
1315 if (pdata->gpios[i].gpio_cfg) {
1316 int x = !!gpio_get_value(GPIO_PIN(
1317 pdata->gpios[i].gpio_cfg));
1318 len += snprintf(buf + len, sz - len,
1319 "%15s: %d\n",
1320 pdata->gpios[i].label, x);
1321 }
1322 }
1323 } else {
1324 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1325 }
1326 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1327}
1328
1329static const struct file_operations fops_debugfs_gpios = {
1330 .open = tsif_debugfs_generic_open,
1331 .read = tsif_debugfs_gpios_read,
1332};
1333
1334
1335static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1336{
1337 tsif_device->dent_tsif = debugfs_create_dir(
1338 dev_name(&tsif_device->pdev->dev), NULL);
1339 if (tsif_device->dent_tsif) {
1340 int i;
1341 void __iomem *base = tsif_device->base;
1342 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1343 tsif_device->debugfs_tsif_regs[i] =
1344 debugfs_create_iomem_x32(
1345 debugfs_tsif_regs[i].name,
1346 debugfs_tsif_regs[i].mode,
1347 tsif_device->dent_tsif,
1348 base + debugfs_tsif_regs[i].offset);
1349 }
1350 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1351 S_IRUGO,
1352 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1353 tsif_device->debugfs_action = debugfs_create_file("action",
1354 S_IWUSR,
1355 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1356 tsif_device->debugfs_dma = debugfs_create_file("dma",
1357 S_IRUGO,
1358 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1359 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1360 S_IRUGO,
1361 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1362 }
1363}
1364
1365static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1366{
1367 if (tsif_device->dent_tsif) {
1368 int i;
1369 debugfs_remove_recursive(tsif_device->dent_tsif);
1370 tsif_device->dent_tsif = NULL;
1371 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1372 tsif_device->debugfs_tsif_regs[i] = NULL;
1373 tsif_device->debugfs_gpio = NULL;
1374 tsif_device->debugfs_action = NULL;
1375 tsif_device->debugfs_dma = NULL;
1376 tsif_device->debugfs_databuf = NULL;
1377 }
1378}
1379/* ===debugfs end=== */
1380
1381/* ===module begin=== */
1382static LIST_HEAD(tsif_devices);
1383
1384static struct msm_tsif_device *tsif_find_by_id(int id)
1385{
1386 struct msm_tsif_device *tsif_device;
1387 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1388 if (tsif_device->pdev->id == id)
1389 return tsif_device;
1390 }
1391 return NULL;
1392}
1393
1394static int __devinit msm_tsif_probe(struct platform_device *pdev)
1395{
1396 int rc = -ENODEV;
1397 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1398 struct msm_tsif_device *tsif_device;
1399 struct resource *res;
1400 /* check device validity */
1401 /* must have platform data */
1402 if (!plat) {
1403 dev_err(&pdev->dev, "Platform data not available\n");
1404 rc = -EINVAL;
1405 goto out;
1406 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001407
1408 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1410 rc = -EINVAL;
1411 goto out;
1412 }
1413 /* OK, we will use this device */
1414 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1415 if (!tsif_device) {
1416 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1417 rc = -ENOMEM;
1418 goto out;
1419 }
1420 /* cross links */
1421 tsif_device->pdev = pdev;
1422 platform_set_drvdata(pdev, tsif_device);
1423 tsif_device->mode = 1;
Hamad Kadmany509b7662012-10-18 14:00:39 +02001424 tsif_device->clock_inverse = 0;
1425 tsif_device->data_inverse = 0;
1426 tsif_device->sync_inverse = 0;
1427 tsif_device->enable_inverse = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1429 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1430 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1431 (unsigned long)tsif_device);
Joel Nider6682b382012-07-03 13:59:27 +03001432 tasklet_init(&tsif_device->clocks_off, tsif_clocks_off,
1433 (unsigned long)tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 if (tsif_get_clocks(tsif_device))
1435 goto err_clocks;
1436/* map I/O memory */
1437 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1438 if (!tsif_device->memres) {
1439 dev_err(&pdev->dev, "Missing MEM resource\n");
1440 rc = -ENXIO;
1441 goto err_rgn;
1442 }
1443 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1444 if (!res) {
1445 dev_err(&pdev->dev, "Missing DMA resource\n");
1446 rc = -ENXIO;
1447 goto err_rgn;
1448 }
1449 tsif_device->dma = res->start;
1450 tsif_device->crci = res->end;
1451 tsif_device->base = ioremap(tsif_device->memres->start,
1452 resource_size(tsif_device->memres));
1453 if (!tsif_device->base) {
1454 dev_err(&pdev->dev, "ioremap failed\n");
1455 goto err_ioremap;
1456 }
1457 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1458 tsif_device->memres->start, tsif_device->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459
1460 pm_runtime_set_active(&pdev->dev);
1461 pm_runtime_enable(&pdev->dev);
1462
1463 tsif_debugfs_init(tsif_device);
1464 rc = platform_get_irq(pdev, 0);
1465 if (rc > 0) {
1466 tsif_device->irq = rc;
1467 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1468 dev_name(&pdev->dev), tsif_device);
1469 disable_irq(tsif_device->irq);
1470 }
1471 if (rc) {
1472 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1473 tsif_device->irq, rc);
1474 goto err_irq;
1475 }
1476 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1477 if (rc) {
1478 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1479 goto err_attrs;
1480 }
1481 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1482 dev_name(&pdev->dev));
1483 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1484 tsif_device->irq, tsif_device->memres->start,
1485 tsif_device->dma, tsif_device->crci);
1486 list_add(&tsif_device->devlist, &tsif_devices);
1487 return 0;
1488/* error path */
1489 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1490err_attrs:
1491 free_irq(tsif_device->irq, tsif_device);
1492err_irq:
1493 tsif_debugfs_exit(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 iounmap(tsif_device->base);
1495err_ioremap:
1496err_rgn:
1497 tsif_put_clocks(tsif_device);
1498err_clocks:
1499 kfree(tsif_device);
1500out:
1501 return rc;
1502}
1503
1504static int __devexit msm_tsif_remove(struct platform_device *pdev)
1505{
1506 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1507 dev_info(&pdev->dev, "Unload\n");
1508 list_del(&tsif_device->devlist);
1509 wake_lock_destroy(&tsif_device->wake_lock);
1510 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1511 free_irq(tsif_device->irq, tsif_device);
1512 tsif_debugfs_exit(tsif_device);
1513 tsif_dma_exit(tsif_device);
1514 tsif_stop_gpios(tsif_device);
1515 iounmap(tsif_device->base);
1516 tsif_put_clocks(tsif_device);
1517
1518 pm_runtime_put(&pdev->dev);
1519 pm_runtime_disable(&pdev->dev);
1520 kfree(tsif_device);
1521 return 0;
1522}
1523
1524static int tsif_runtime_suspend(struct device *dev)
1525{
1526 dev_dbg(dev, "pm_runtime: suspending...\n");
1527 return 0;
1528}
1529
1530static int tsif_runtime_resume(struct device *dev)
1531{
1532 dev_dbg(dev, "pm_runtime: resuming...\n");
1533 return 0;
1534}
1535
1536static const struct dev_pm_ops tsif_dev_pm_ops = {
1537 .runtime_suspend = tsif_runtime_suspend,
1538 .runtime_resume = tsif_runtime_resume,
1539};
1540
1541
1542static struct platform_driver msm_tsif_driver = {
1543 .probe = msm_tsif_probe,
1544 .remove = __exit_p(msm_tsif_remove),
1545 .driver = {
1546 .name = "msm_tsif",
1547 .pm = &tsif_dev_pm_ops,
1548 },
1549};
1550
1551static int __init mod_init(void)
1552{
1553 int rc = platform_driver_register(&msm_tsif_driver);
1554 if (rc)
1555 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1556 return rc;
1557}
1558
1559static void __exit mod_exit(void)
1560{
1561 platform_driver_unregister(&msm_tsif_driver);
1562}
1563/* ===module end=== */
1564
1565/* public API */
1566
Joel Nider5578bdb2011-08-12 09:37:11 +03001567int tsif_get_active(void)
1568{
1569 struct msm_tsif_device *tsif_device;
1570 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1571 return tsif_device->pdev->id;
1572 }
1573 return -ENODEV;
1574}
1575EXPORT_SYMBOL(tsif_get_active);
1576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1578{
1579 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001580 if (!tsif_device)
1581 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 if (tsif_device->client_notify || tsif_device->client_data)
1583 return ERR_PTR(-EBUSY);
1584 tsif_device->client_notify = notify;
1585 tsif_device->client_data = data;
1586 /* prevent from unloading */
1587 get_device(&tsif_device->pdev->dev);
1588 return tsif_device;
1589}
1590EXPORT_SYMBOL(tsif_attach);
1591
1592void tsif_detach(void *cookie)
1593{
1594 struct msm_tsif_device *tsif_device = cookie;
1595 tsif_device->client_notify = NULL;
1596 tsif_device->client_data = NULL;
1597 put_device(&tsif_device->pdev->dev);
1598}
1599EXPORT_SYMBOL(tsif_detach);
1600
1601void tsif_get_info(void *cookie, void **pdata, int *psize)
1602{
1603 struct msm_tsif_device *tsif_device = cookie;
1604 if (pdata)
1605 *pdata = tsif_device->data_buffer;
1606 if (psize)
1607 *psize = TSIF_PKTS_IN_BUF;
1608}
1609EXPORT_SYMBOL(tsif_get_info);
1610
1611int tsif_set_mode(void *cookie, int mode)
1612{
1613 struct msm_tsif_device *tsif_device = cookie;
1614 if (tsif_device->state != tsif_state_stopped) {
1615 dev_err(&tsif_device->pdev->dev,
1616 "Can't change mode while device is active\n");
1617 return -EBUSY;
1618 }
1619 switch (mode) {
1620 case 1:
1621 case 2:
1622 case 3:
1623 tsif_device->mode = mode;
1624 break;
1625 default:
1626 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1627 return -EINVAL;
1628 }
1629 return 0;
1630}
1631EXPORT_SYMBOL(tsif_set_mode);
1632
1633int tsif_set_time_limit(void *cookie, u32 value)
1634{
1635 struct msm_tsif_device *tsif_device = cookie;
1636 if (tsif_device->state != tsif_state_stopped) {
1637 dev_err(&tsif_device->pdev->dev,
1638 "Can't change time limit while device is active\n");
1639 return -EBUSY;
1640 }
1641 if (value != (value & 0xFFFFFF)) {
1642 dev_err(&tsif_device->pdev->dev,
1643 "Invalid time limit (should be 24 bit): %#x\n", value);
1644 return -EINVAL;
1645 }
1646 tsif_device->time_limit = value;
1647 return 0;
1648}
1649EXPORT_SYMBOL(tsif_set_time_limit);
1650
1651int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1652{
1653 struct msm_tsif_device *tsif_device = cookie;
1654 if (tsif_device->data_buffer) {
1655 dev_err(&tsif_device->pdev->dev,
1656 "Data buffer already allocated: %p\n",
1657 tsif_device->data_buffer);
1658 return -EBUSY;
1659 }
1660 /* check for crazy user */
1661 if (pkts_in_chunk * chunks_in_buf > 10240) {
1662 dev_err(&tsif_device->pdev->dev,
1663 "Buffer requested is too large: %d * %d\n",
1664 pkts_in_chunk,
1665 chunks_in_buf);
1666 return -EINVAL;
1667 }
1668 /* parameters are OK, execute */
1669 tsif_device->pkts_per_chunk = pkts_in_chunk;
1670 tsif_device->chunks_per_buf = chunks_in_buf;
1671 return 0;
1672}
1673EXPORT_SYMBOL(tsif_set_buf_config);
1674
Hamad Kadmany509b7662012-10-18 14:00:39 +02001675int tsif_set_clk_inverse(void *cookie, int value)
1676{
1677 struct msm_tsif_device *tsif_device = cookie;
1678 if (tsif_device->state != tsif_state_stopped) {
1679 dev_err(&tsif_device->pdev->dev,
1680 "Can't change clock inverse while device is active\n");
1681 return -EBUSY;
1682 }
1683 if ((value != 0) && (value != 1)) {
1684 dev_err(&tsif_device->pdev->dev,
1685 "Invalid parameter, either 0 or 1: %#x\n", value);
1686 return -EINVAL;
1687 }
1688 tsif_device->clock_inverse = value;
1689 return 0;
1690}
1691EXPORT_SYMBOL(tsif_set_clk_inverse);
1692
1693int tsif_set_data_inverse(void *cookie, int value)
1694{
1695 struct msm_tsif_device *tsif_device = cookie;
1696 if (tsif_device->state != tsif_state_stopped) {
1697 dev_err(&tsif_device->pdev->dev,
1698 "Can't change data inverse while device is active\n");
1699 return -EBUSY;
1700 }
1701 if ((value != 0) && (value != 1)) {
1702 dev_err(&tsif_device->pdev->dev,
1703 "Invalid parameter, either 0 or 1: %#x\n", value);
1704 return -EINVAL;
1705 }
1706 tsif_device->data_inverse = value;
1707 return 0;
1708}
1709EXPORT_SYMBOL(tsif_set_data_inverse);
1710
1711int tsif_set_sync_inverse(void *cookie, int value)
1712{
1713 struct msm_tsif_device *tsif_device = cookie;
1714 if (tsif_device->state != tsif_state_stopped) {
1715 dev_err(&tsif_device->pdev->dev,
1716 "Can't change sync inverse while device is active\n");
1717 return -EBUSY;
1718 }
1719 if ((value != 0) && (value != 1)) {
1720 dev_err(&tsif_device->pdev->dev,
1721 "Invalid parameter, either 0 or 1: %#x\n", value);
1722 return -EINVAL;
1723 }
1724 tsif_device->sync_inverse = value;
1725 return 0;
1726}
1727EXPORT_SYMBOL(tsif_set_sync_inverse);
1728
1729int tsif_set_enable_inverse(void *cookie, int value)
1730{
1731 struct msm_tsif_device *tsif_device = cookie;
1732 if (tsif_device->state != tsif_state_stopped) {
1733 dev_err(&tsif_device->pdev->dev,
1734 "Can't change enable inverse while device is active\n");
1735 return -EBUSY;
1736 }
1737 if ((value != 0) && (value != 1)) {
1738 dev_err(&tsif_device->pdev->dev,
1739 "Invalid parameter, either 0 or 1: %#x\n", value);
1740 return -EINVAL;
1741 }
1742 tsif_device->enable_inverse = value;
1743 return 0;
1744}
1745EXPORT_SYMBOL(tsif_set_enable_inverse);
1746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001747void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1748{
1749 struct msm_tsif_device *tsif_device = cookie;
1750 if (ri)
1751 *ri = tsif_device->ri;
1752 if (wi)
1753 *wi = tsif_device->wi;
1754 if (state)
1755 *state = tsif_device->state;
1756}
1757EXPORT_SYMBOL(tsif_get_state);
1758
1759int tsif_start(void *cookie)
1760{
1761 struct msm_tsif_device *tsif_device = cookie;
1762 return action_open(tsif_device);
1763}
1764EXPORT_SYMBOL(tsif_start);
1765
1766void tsif_stop(void *cookie)
1767{
1768 struct msm_tsif_device *tsif_device = cookie;
1769 action_close(tsif_device);
1770}
1771EXPORT_SYMBOL(tsif_stop);
1772
Hamad Kadmany68bd27a2013-01-31 14:53:32 +02001773int tsif_get_ref_clk_counter(void *cookie, u32 *tcr_counter)
1774{
1775 struct msm_tsif_device *tsif_device = cookie;
1776
1777 if (!tsif_device || !tcr_counter)
1778 return -EINVAL;
1779
1780 if (tsif_device->state == tsif_state_running)
1781 *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
1782 else
1783 *tcr_counter = 0;
1784
1785 return 0;
1786}
1787EXPORT_SYMBOL(tsif_get_ref_clk_counter);
1788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789void tsif_reclaim_packets(void *cookie, int read_index)
1790{
1791 struct msm_tsif_device *tsif_device = cookie;
1792 tsif_device->ri = read_index;
1793}
1794EXPORT_SYMBOL(tsif_reclaim_packets);
1795
1796module_init(mod_init);
1797module_exit(mod_exit);
1798
1799MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1800 " Driver for the MSM chipset");
1801MODULE_LICENSE("GPL v2");
1802