blob: f80fbcc6fea625b199bbc0d29d9f61c3893978a1 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * TSIF Driver
3 *
Hamad Kadmany68bd27a2013-01-31 14:53:32 +02004 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/module.h> /* Needed by all modules */
17#include <linux/kernel.h> /* Needed for KERN_INFO */
18#include <linux/init.h> /* Needed for the macros */
19#include <linux/err.h> /* IS_ERR etc. */
20#include <linux/platform_device.h>
21
22#include <linux/ioport.h> /* XXX_mem_region */
23#include <linux/debugfs.h>
24#include <linux/dma-mapping.h> /* dma_XXX */
25#include <linux/delay.h> /* msleep */
26
27#include <linux/io.h> /* ioXXX */
28#include <linux/uaccess.h> /* copy_from_user */
29#include <linux/clk.h>
30#include <linux/wakelock.h>
31#include <linux/tsif_api.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h> /* kfree, kzalloc */
Steve Mucklef132c6c2012-06-06 18:30:57 -070034#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#include <mach/dma.h>
36#include <mach/msm_tsif.h>
37
38/*
39 * TSIF register offsets
40 */
41#define TSIF_STS_CTL_OFF (0x0)
42#define TSIF_TIME_LIMIT_OFF (0x4)
43#define TSIF_CLK_REF_OFF (0x8)
44#define TSIF_LPBK_FLAGS_OFF (0xc)
45#define TSIF_LPBK_DATA_OFF (0x10)
46#define TSIF_TEST_CTL_OFF (0x14)
47#define TSIF_TEST_MODE_OFF (0x18)
48#define TSIF_TEST_RESET_OFF (0x1c)
49#define TSIF_TEST_EXPORT_OFF (0x20)
50#define TSIF_TEST_CURRENT_OFF (0x24)
51
52#define TSIF_DATA_PORT_OFF (0x100)
53
54/* bits for TSIF_STS_CTL register */
55#define TSIF_STS_CTL_EN_IRQ (1 << 28)
56#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
57#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
58#define TSIF_STS_CTL_OVERFLOW (1 << 25)
59#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
60#define TSIF_STS_CTL_TIMEOUT (1 << 23)
61#define TSIF_STS_CTL_INV_SYNC (1 << 21)
62#define TSIF_STS_CTL_INV_NULL (1 << 20)
63#define TSIF_STS_CTL_INV_ERROR (1 << 19)
64#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
65#define TSIF_STS_CTL_INV_DATA (1 << 17)
66#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
67#define TSIF_STS_CTL_SPARE (1 << 15)
68#define TSIF_STS_CTL_EN_NULL (1 << 11)
69#define TSIF_STS_CTL_EN_ERROR (1 << 10)
70#define TSIF_STS_CTL_LAST_BIT (1 << 9)
71#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
72#define TSIF_STS_CTL_EN_TCR (1 << 7)
73#define TSIF_STS_CTL_TEST_MODE (3 << 5)
74#define TSIF_STS_CTL_EN_DM (1 << 4)
75#define TSIF_STS_CTL_STOP (1 << 3)
76#define TSIF_STS_CTL_START (1 << 0)
77
78/*
79 * Data buffering parameters
80 *
81 * Data stored in cyclic buffer;
82 *
83 * Data organized in chunks of packets.
84 * One chunk processed at a time by the data mover
85 *
86 */
87#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
88#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
89#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
90#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
91#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
92#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
Joel Nider5578bdb2011-08-12 09:37:11 +030093#define TSIF_MAX_ID 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094
95#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
96#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
97#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
98#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
99#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
100
101/* used to create debugfs entries */
102static const struct {
103 const char *name;
104 mode_t mode;
105 int offset;
106} debugfs_tsif_regs[] = {
107 {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
108 {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
109 {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
110 {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
111 {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
112 {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
113 {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
114 {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
115 {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
116 {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
117 {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
118};
119
120/* structures for Data Mover */
121struct tsif_dmov_cmd {
122 dmov_box box;
123 dma_addr_t box_ptr;
124};
125
126struct msm_tsif_device;
127
128struct tsif_xfer {
129 struct msm_dmov_cmd hdr;
130 struct msm_tsif_device *tsif_device;
131 int busy;
132 int wi; /**< set devices's write index after xfer */
133};
134
135struct msm_tsif_device {
136 struct list_head devlist;
137 struct platform_device *pdev;
138 struct resource *memres;
139 void __iomem *base;
140 unsigned int irq;
141 int mode;
142 u32 time_limit;
Hamad Kadmany509b7662012-10-18 14:00:39 +0200143 int clock_inverse;
144 int data_inverse;
145 int sync_inverse;
146 int enable_inverse;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147 enum tsif_state state;
148 struct wake_lock wake_lock;
149 /* clocks */
150 struct clk *tsif_clk;
151 struct clk *tsif_pclk;
152 struct clk *tsif_ref_clk;
153 /* debugfs */
154 struct dentry *dent_tsif;
155 struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
156 struct dentry *debugfs_gpio;
157 struct dentry *debugfs_action;
158 struct dentry *debugfs_dma;
159 struct dentry *debugfs_databuf;
160 struct debugfs_blob_wrapper blob_wrapper_databuf;
161 /* DMA related */
162 int dma;
163 int crci;
164 void *data_buffer;
165 dma_addr_t data_buffer_dma;
166 u32 pkts_per_chunk;
167 u32 chunks_per_buf;
168 int ri;
169 int wi;
170 int dmwi; /**< DataMover write index */
171 struct tsif_dmov_cmd *dmov_cmd[2];
172 dma_addr_t dmov_cmd_dma[2];
173 struct tsif_xfer xfer[2];
174 struct tasklet_struct dma_refill;
Joel Nider6682b382012-07-03 13:59:27 +0300175 struct tasklet_struct clocks_off;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 /* statistics */
177 u32 stat_rx;
178 u32 stat_overflow;
179 u32 stat_lost_sync;
180 u32 stat_timeout;
181 u32 stat_dmov_err;
182 u32 stat_soft_drop;
183 int stat_ifi; /* inter frame interval */
184 u32 stat0, stat1;
185 /* client */
186 void *client_data;
187 void (*client_notify)(void *client_data);
188};
189
190/* ===clocks begin=== */
191
192static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
193{
194 if (tsif_device->tsif_clk) {
195 clk_put(tsif_device->tsif_clk);
196 tsif_device->tsif_clk = NULL;
197 }
198 if (tsif_device->tsif_pclk) {
199 clk_put(tsif_device->tsif_pclk);
200 tsif_device->tsif_pclk = NULL;
201 }
202
203 if (tsif_device->tsif_ref_clk) {
204 clk_put(tsif_device->tsif_ref_clk);
205 tsif_device->tsif_ref_clk = NULL;
206 }
207}
208
209static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
210{
211 struct msm_tsif_platform_data *pdata =
212 tsif_device->pdev->dev.platform_data;
213 int rc = 0;
214
215 if (pdata->tsif_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700216 tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
217 pdata->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 if (IS_ERR(tsif_device->tsif_clk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 rc = PTR_ERR(tsif_device->tsif_clk);
220 tsif_device->tsif_clk = NULL;
221 goto ret;
222 }
223 }
224 if (pdata->tsif_pclk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700225 tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
226 pdata->tsif_pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 if (IS_ERR(tsif_device->tsif_pclk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 rc = PTR_ERR(tsif_device->tsif_pclk);
229 tsif_device->tsif_pclk = NULL;
230 goto ret;
231 }
232 }
233 if (pdata->tsif_ref_clk) {
Matt Wagantall640e5fd2011-08-17 16:08:53 -0700234 tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
235 pdata->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 if (IS_ERR(tsif_device->tsif_ref_clk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 rc = PTR_ERR(tsif_device->tsif_ref_clk);
238 tsif_device->tsif_ref_clk = NULL;
239 goto ret;
240 }
241 }
242 return 0;
243ret:
244 tsif_put_clocks(tsif_device);
245 return rc;
246}
247
248static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
249{
250 if (on) {
251 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300252 clk_prepare_enable(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300254 clk_prepare_enable(tsif_device->tsif_pclk);
255 clk_prepare_enable(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 } else {
257 if (tsif_device->tsif_clk)
Joel Nider6682b382012-07-03 13:59:27 +0300258 clk_disable_unprepare(tsif_device->tsif_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (tsif_device->tsif_pclk)
Joel Nider6682b382012-07-03 13:59:27 +0300260 clk_disable_unprepare(tsif_device->tsif_pclk);
261 clk_disable_unprepare(tsif_device->tsif_ref_clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 }
263}
Joel Nider6682b382012-07-03 13:59:27 +0300264
265static void tsif_clocks_off(unsigned long data)
266{
267 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
268 tsif_clock(tsif_device, 0);
269}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270/* ===clocks end=== */
271/* ===gpio begin=== */
272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273static int tsif_gpios_disable(const struct msm_gpio *table, int size)
274{
275 int rc = 0;
276 int i;
277 const struct msm_gpio *g;
278 for (i = size-1; i >= 0; i--) {
279 int tmp;
280 g = table + i;
Joel Nider951b2832012-05-07 21:13:38 +0300281 tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
282 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
283 GPIO_CFG_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284 if (tmp) {
285 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
286 " <%s> failed: %d\n",
287 g->gpio_cfg, g->label ?: "?", rc);
288 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
289 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
290 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
291 GPIO_DRVSTR(g->gpio_cfg));
292 if (!rc)
293 rc = tmp;
294 }
295 }
296
297 return rc;
298}
299
300static int tsif_gpios_enable(const struct msm_gpio *table, int size)
301{
302 int rc;
303 int i;
304 const struct msm_gpio *g;
305 for (i = 0; i < size; i++) {
306 g = table + i;
307 rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
308 if (rc) {
309 pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
310 " <%s> failed: %d\n",
311 g->gpio_cfg, g->label ?: "?", rc);
312 pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
313 GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
314 GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
315 GPIO_DRVSTR(g->gpio_cfg));
316 goto err;
317 }
318 }
319 return 0;
320err:
321 tsif_gpios_disable(table, i);
322 return rc;
323}
324
325static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
326{
Hamad Kadmanyb18fac52012-09-01 12:57:24 +0300327 int rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 rc = tsif_gpios_enable(table, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 return rc;
330}
331
332static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
333{
334 tsif_gpios_disable(table, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335}
336
337static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
338{
339 struct msm_tsif_platform_data *pdata =
340 tsif_device->pdev->dev.platform_data;
341 return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
342}
343
344static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
345{
346 struct msm_tsif_platform_data *pdata =
347 tsif_device->pdev->dev.platform_data;
348 tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
349}
350
351/* ===gpio end=== */
352
353static int tsif_start_hw(struct msm_tsif_device *tsif_device)
354{
355 u32 ctl = TSIF_STS_CTL_EN_IRQ |
356 TSIF_STS_CTL_EN_TIME_LIM |
357 TSIF_STS_CTL_EN_TCR |
358 TSIF_STS_CTL_EN_DM;
Hamad Kadmany509b7662012-10-18 14:00:39 +0200359
360 if (tsif_device->clock_inverse)
361 ctl |= TSIF_STS_CTL_INV_CLOCK;
362
363 if (tsif_device->data_inverse)
364 ctl |= TSIF_STS_CTL_INV_DATA;
365
366 if (tsif_device->sync_inverse)
367 ctl |= TSIF_STS_CTL_INV_SYNC;
368
369 if (tsif_device->enable_inverse)
370 ctl |= TSIF_STS_CTL_INV_ENABLE;
371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
373 switch (tsif_device->mode) {
374 case 1: /* mode 1 */
375 ctl |= (0 << 5);
376 break;
377 case 2: /* mode 2 */
378 ctl |= (1 << 5);
379 break;
380 case 3: /* manual - control from debugfs */
381 return 0;
382 break;
383 default:
384 return -EINVAL;
385 }
386 iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
387 iowrite32(tsif_device->time_limit,
388 tsif_device->base + TSIF_TIME_LIMIT_OFF);
389 wmb();
390 iowrite32(ctl | TSIF_STS_CTL_START,
391 tsif_device->base + TSIF_STS_CTL_OFF);
392 wmb();
393 ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
394 return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
395}
396
397static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
398{
399 iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
400 wmb();
401}
402
403/* ===DMA begin=== */
404/**
405 * TSIF DMA theory of operation
406 *
407 * Circular memory buffer \a tsif_mem_buffer allocated;
408 * 4 pointers points to and moved forward on:
409 * - \a ri index of first ready to read packet.
410 * Updated by client's call to tsif_reclaim_packets()
411 * - \a wi points to the next packet to be written by DM.
412 * Data below is valid and will not be overriden by DMA.
413 * Moved on DM callback
414 * - \a dmwi points to the next packet not scheduled yet for DM
415 * moved when packet scheduled for DM
416 *
417 * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
418 * at time immediately after scheduling.
419 *
420 * Initially, 2 packets get scheduled for the DM.
421 *
422 * Upon packet receive, DM writes packet to the pre-programmed
423 * location and invoke its callback.
424 *
425 * DM callback moves sets wi pointer to \a xfer->wi;
426 * then it schedules next packet for DM and moves \a dmwi pointer.
427 *
428 * Buffer overflow handling
429 *
430 * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
431 * DMA re-scheduled to the same index.
432 * Callback check and not move \a wi to become equal to \a ri
433 *
434 * On \a read request, data between \a ri and \a wi pointers may be read;
435 * \ri pointer moved accordingly.
436 *
437 * It is always granted, on modulo sizeof(tsif_mem_buffer), that
438 * \a wi is between [\a ri, \a dmwi]
439 *
440 * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
441 *
442 * Number of scheduled packets for DM: (dmwi-wi)
443 */
444
445/**
446 * tsif_dma_schedule - schedule DMA transfers
447 *
448 * @tsif_device: device
449 *
450 * Executed from process context on init, or from tasklet when
451 * re-scheduling upon DMA completion.
452 * This prevent concurrent execution from several CPU's
453 */
454static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
455{
456 int i, dmwi0, dmwi1, found = 0;
457 /* find free entry */
458 for (i = 0; i < 2; i++) {
459 struct tsif_xfer *xfer = &tsif_device->xfer[i];
460 if (xfer->busy)
461 continue;
462 found++;
463 xfer->busy = 1;
464 dmwi0 = tsif_device->dmwi;
465 tsif_device->dmov_cmd[i]->box.dst_row_addr =
466 tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
467 /* proposed value for dmwi */
468 dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
469 /**
470 * If dmwi going to overlap with ri,
471 * overflow occurs because data was not read.
472 * Still get this packet, to not interrupt TSIF
473 * hardware, but do not advance dmwi.
474 *
475 * Upon receive, packet will be dropped.
476 */
477 if (dmwi1 != tsif_device->ri) {
478 tsif_device->dmwi = dmwi1;
479 } else {
480 dev_info(&tsif_device->pdev->dev,
481 "Overflow detected\n");
482 }
483 xfer->wi = tsif_device->dmwi;
484#ifdef CONFIG_TSIF_DEBUG
485 dev_info(&tsif_device->pdev->dev,
486 "schedule xfer[%d] -> [%2d]{%2d}\n",
487 i, dmwi0, xfer->wi);
488#endif
489 /* complete all the writes to box */
490 dma_coherent_pre_ops();
491 msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
492 }
493 if (!found)
494 dev_info(&tsif_device->pdev->dev,
495 "All xfer entries are busy\n");
496}
497
498/**
499 * tsif_dmov_complete_func - DataMover completion callback
500 *
501 * @cmd: original DM command
502 * @result: DM result
503 * @err: optional error buffer
504 *
505 * Executed in IRQ context (Data Mover's IRQ)
506 * DataMover's spinlock @msm_dmov_lock held.
507 */
508static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
509 unsigned int result,
510 struct msm_dmov_errdata *err)
511{
512 int i;
513 u32 data_offset;
514 struct tsif_xfer *xfer;
515 struct msm_tsif_device *tsif_device;
516 int reschedule = 0;
517 if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
518 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
519 return;
520 }
521 /* restore original context */
522 xfer = container_of(cmd, struct tsif_xfer, hdr);
523 tsif_device = xfer->tsif_device;
524 i = xfer - tsif_device->xfer;
525 data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
526 tsif_device->data_buffer_dma;
527
528 /* order reads from the xferred buffer */
529 dma_coherent_post_ops();
530 if (result & DMOV_RSLT_DONE) {
531 int w = data_offset / TSIF_PKT_SIZE;
532 tsif_device->stat_rx++;
533 /*
534 * sowtware overflow when I was scheduled?
535 *
536 * @w is where this xfer was actually written to;
537 * @xfer->wi is where device's @wi will be set;
538 *
539 * if these 2 are equal, we are short in space and
540 * going to overwrite this xfer - this is "soft drop"
541 */
542 if (w == xfer->wi)
543 tsif_device->stat_soft_drop++;
544 reschedule = (tsif_device->state == tsif_state_running);
545#ifdef CONFIG_TSIF_DEBUG
546 /* IFI calculation */
547 /*
548 * update stat_ifi (inter frame interval)
549 *
550 * Calculate time difference between last and 1-st
551 * packets in chunk
552 *
553 * To be removed after tuning
554 */
555 if (TSIF_PKTS_IN_CHUNK > 1) {
556 void *ptr = tsif_device->data_buffer + data_offset;
557 u32 *p0 = ptr;
558 u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
559 TSIF_PKT_SIZE;
560 u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
561 tsif_pkt_status(p0));
562 u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
563 tsif_pkt_status(p1));
564 tsif_device->stat_ifi = (tts1 - tts0) /
565 (TSIF_PKTS_IN_CHUNK - 1);
566 }
567#endif
568 } else {
569 /**
570 * Error or flush
571 *
572 * To recover - re-open TSIF device.
573 */
574 /* mark status "not valid" in data buffer */
575 int n;
576 void *ptr = tsif_device->data_buffer + data_offset;
577 for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
578 u32 *p = ptr + (n * TSIF_PKT_SIZE);
579 /* last dword is status + TTS */
580 p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
581 }
582 if (result & DMOV_RSLT_ERROR) {
583 dev_err(&tsif_device->pdev->dev,
584 "DMA error (0x%08x)\n", result);
585 tsif_device->stat_dmov_err++;
586 /* force device close */
587 if (tsif_device->state == tsif_state_running) {
588 tsif_stop_hw(tsif_device);
589 /*
Joel Nider6682b382012-07-03 13:59:27 +0300590 * This branch is taken only in case of
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 * severe hardware problem (I don't even know
Joel Nider6682b382012-07-03 13:59:27 +0300592 * what should happen for DMOV_RSLT_ERROR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 * thus I prefer code simplicity over
594 * performance.
Joel Nider6682b382012-07-03 13:59:27 +0300595 * Clocks are turned off from outside the
596 * interrupt context.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 */
Joel Nider6682b382012-07-03 13:59:27 +0300598 tasklet_schedule(&tsif_device->clocks_off);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 tsif_device->state = tsif_state_flushing;
600 }
601 }
602 if (result & DMOV_RSLT_FLUSH) {
603 /*
604 * Flushing normally happens in process of
605 * @tsif_stop(), when we are waiting for outstanding
606 * DMA commands to be flushed.
607 */
608 dev_info(&tsif_device->pdev->dev,
609 "DMA channel flushed (0x%08x)\n", result);
610 if (tsif_device->state == tsif_state_flushing) {
611 if ((!tsif_device->xfer[0].busy) &&
612 (!tsif_device->xfer[1].busy)) {
613 tsif_device->state = tsif_state_stopped;
614 }
615 }
616 }
617 if (err)
618 dev_err(&tsif_device->pdev->dev,
619 "Flush data: %08x %08x %08x %08x %08x %08x\n",
620 err->flush[0], err->flush[1], err->flush[2],
621 err->flush[3], err->flush[4], err->flush[5]);
622 }
623 tsif_device->wi = xfer->wi;
624 xfer->busy = 0;
625 if (tsif_device->client_notify)
626 tsif_device->client_notify(tsif_device->client_data);
627 /*
628 * Can't schedule next DMA -
629 * DataMover driver still hold its semaphore,
630 * deadlock will occur.
631 */
632 if (reschedule)
633 tasklet_schedule(&tsif_device->dma_refill);
634}
635
636/**
637 * tsif_dma_refill - tasklet function for tsif_device->dma_refill
638 *
639 * @data: tsif_device
640 *
641 * Reschedule DMA requests
642 *
643 * Executed in tasklet
644 */
645static void tsif_dma_refill(unsigned long data)
646{
647 struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
648 if (tsif_device->state == tsif_state_running)
649 tsif_dma_schedule(tsif_device);
650}
651
652/**
653 * tsif_dma_flush - flush DMA channel
654 *
655 * @tsif_device:
656 *
657 * busy wait till DMA flushed
658 */
659static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
660{
661 if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
662 tsif_device->state = tsif_state_flushing;
663 while (tsif_device->xfer[0].busy ||
664 tsif_device->xfer[1].busy) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700665 msm_dmov_flush(tsif_device->dma, 1);
Joel Nider951b2832012-05-07 21:13:38 +0300666 usleep(10000);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667 }
668 }
669 tsif_device->state = tsif_state_stopped;
670 if (tsif_device->client_notify)
671 tsif_device->client_notify(tsif_device->client_data);
672}
673
674static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
675{
676 int i;
677 tsif_device->state = tsif_state_flushing;
678 tasklet_kill(&tsif_device->dma_refill);
679 tsif_dma_flush(tsif_device);
680 for (i = 0; i < 2; i++) {
681 if (tsif_device->dmov_cmd[i]) {
682 dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
683 tsif_device->dmov_cmd[i],
684 tsif_device->dmov_cmd_dma[i]);
685 tsif_device->dmov_cmd[i] = NULL;
686 }
687 }
688 if (tsif_device->data_buffer) {
689 tsif_device->blob_wrapper_databuf.data = NULL;
690 tsif_device->blob_wrapper_databuf.size = 0;
691 dma_free_coherent(NULL, TSIF_BUF_SIZE,
692 tsif_device->data_buffer,
693 tsif_device->data_buffer_dma);
694 tsif_device->data_buffer = NULL;
695 }
696}
697
698static int tsif_dma_init(struct msm_tsif_device *tsif_device)
699{
700 int i;
701 /* TODO: allocate all DMA memory in one buffer */
702 /* Note: don't pass device,
703 it require coherent_dma_mask id device definition */
704 tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
705 &tsif_device->data_buffer_dma, GFP_KERNEL);
706 if (!tsif_device->data_buffer)
707 goto err;
708 dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
709 tsif_device->data_buffer, tsif_device->data_buffer_dma);
710 tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
711 tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
712 tsif_device->ri = 0;
713 tsif_device->wi = 0;
714 tsif_device->dmwi = 0;
715 for (i = 0; i < 2; i++) {
716 dmov_box *box;
717 struct msm_dmov_cmd *hdr;
718 tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
719 sizeof(struct tsif_dmov_cmd),
720 &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
721 if (!tsif_device->dmov_cmd[i])
722 goto err;
723 dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
724 i, tsif_device->dmov_cmd[i],
725 tsif_device->dmov_cmd_dma[i]);
726 /* dst in 16 LSB, src in 16 MSB */
727 box = &(tsif_device->dmov_cmd[i]->box);
728 box->cmd = CMD_MODE_BOX | CMD_LC |
729 CMD_SRC_CRCI(tsif_device->crci);
730 box->src_row_addr =
731 tsif_device->memres->start + TSIF_DATA_PORT_OFF;
732 box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
733 box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
734 box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
735
736 tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
737 DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
738 offsetof(struct tsif_dmov_cmd, box));
739 tsif_device->xfer[i].tsif_device = tsif_device;
740 hdr = &tsif_device->xfer[i].hdr;
741 hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
742 offsetof(struct tsif_dmov_cmd, box_ptr));
743 hdr->complete_func = tsif_dmov_complete_func;
744 }
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -0700745 msm_dmov_flush(tsif_device->dma, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 return 0;
747err:
748 dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
749 tsif_dma_exit(tsif_device);
750 return -ENOMEM;
751}
752
753/* ===DMA end=== */
754
755/* ===IRQ begin=== */
756
757static irqreturn_t tsif_irq(int irq, void *dev_id)
758{
759 struct msm_tsif_device *tsif_device = dev_id;
760 u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
761 if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
762 TSIF_STS_CTL_OVERFLOW |
763 TSIF_STS_CTL_LOST_SYNC |
764 TSIF_STS_CTL_TIMEOUT))) {
765 dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
766 return IRQ_NONE;
767 }
768 if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
769 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
770 tsif_device->stat_rx++;
771 }
772 if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
773 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
774 tsif_device->stat_overflow++;
775 }
776 if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
777 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
778 tsif_device->stat_lost_sync++;
779 }
780 if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
781 dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
782 tsif_device->stat_timeout++;
783 }
784 iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
785 wmb();
786 return IRQ_HANDLED;
787}
788
789/* ===IRQ end=== */
790
791/* ===Device attributes begin=== */
792
793static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
794 char *buf)
795{
796 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
797 char *state_string;
798 switch (tsif_device->state) {
799 case tsif_state_stopped:
800 state_string = "stopped";
801 break;
802 case tsif_state_running:
803 state_string = "running";
804 break;
805 case tsif_state_flushing:
806 state_string = "flushing";
807 break;
808 default:
809 state_string = "???";
810 }
811 return snprintf(buf, PAGE_SIZE,
812 "Device %s\n"
813 "Mode = %d\n"
814 "Time limit = %d\n"
815 "State %s\n"
816 "Client = %p\n"
817 "Pkt/Buf = %d\n"
818 "Pkt/chunk = %d\n"
Hamad Kadmany509b7662012-10-18 14:00:39 +0200819 "Clock inv = %d\n"
820 "Data inv = %d\n"
821 "Sync inv = %d\n"
822 "Enable inv = %d\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 "--statistics--\n"
824 "Rx chunks = %d\n"
825 "Overflow = %d\n"
826 "Lost sync = %d\n"
827 "Timeout = %d\n"
828 "DMA error = %d\n"
829 "Soft drop = %d\n"
830 "IFI = %d\n"
831 "(0x%08x - 0x%08x) / %d\n"
832 "--debug--\n"
833 "GLBL_CLK_ENA = 0x%08x\n"
834 "ROW_RESET = 0x%08x\n"
835 "CLK_HALT_STATEB = 0x%08x\n"
836 "TV_NS_REG = 0x%08x\n"
837 "TSIF_NS_REG = 0x%08x\n",
838 dev_name(dev),
839 tsif_device->mode,
840 tsif_device->time_limit,
841 state_string,
842 tsif_device->client_data,
843 TSIF_PKTS_IN_BUF,
844 TSIF_PKTS_IN_CHUNK,
Hamad Kadmany509b7662012-10-18 14:00:39 +0200845 tsif_device->clock_inverse,
846 tsif_device->data_inverse,
847 tsif_device->sync_inverse,
848 tsif_device->enable_inverse,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 tsif_device->stat_rx,
850 tsif_device->stat_overflow,
851 tsif_device->stat_lost_sync,
852 tsif_device->stat_timeout,
853 tsif_device->stat_dmov_err,
854 tsif_device->stat_soft_drop,
855 tsif_device->stat_ifi,
856 tsif_device->stat1,
857 tsif_device->stat0,
858 TSIF_PKTS_IN_CHUNK - 1,
859 ioread32(GLBL_CLK_ENA),
860 ioread32(ROW_RESET),
861 ioread32(CLK_HALT_STATEB),
862 ioread32(TV_NS_REG),
863 ioread32(TSIF_NS_REG)
864 );
865}
866/**
867 * set_stats - reset statistics on write
868 *
869 * @dev:
870 * @attr:
871 * @buf:
872 * @count:
873 */
874static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
875 const char *buf, size_t count)
876{
877 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
878 tsif_device->stat_rx = 0;
879 tsif_device->stat_overflow = 0;
880 tsif_device->stat_lost_sync = 0;
881 tsif_device->stat_timeout = 0;
882 tsif_device->stat_dmov_err = 0;
883 tsif_device->stat_soft_drop = 0;
884 tsif_device->stat_ifi = 0;
885 return count;
886}
887static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
888
889static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
890 char *buf)
891{
892 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
893 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
894}
895
896static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
897 const char *buf, size_t count)
898{
899 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
900 int value;
901 int rc;
902 if (1 != sscanf(buf, "%d", &value)) {
903 dev_err(&tsif_device->pdev->dev,
904 "Failed to parse integer: <%s>\n", buf);
905 return -EINVAL;
906 }
907 rc = tsif_set_mode(tsif_device, value);
908 if (!rc)
909 rc = count;
910 return rc;
911}
912static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
913
914static ssize_t show_time_limit(struct device *dev,
915 struct device_attribute *attr,
916 char *buf)
917{
918 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
919 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
920}
921
922static ssize_t set_time_limit(struct device *dev,
923 struct device_attribute *attr,
924 const char *buf, size_t count)
925{
926 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
927 int value;
928 int rc;
929 if (1 != sscanf(buf, "%d", &value)) {
930 dev_err(&tsif_device->pdev->dev,
931 "Failed to parse integer: <%s>\n", buf);
932 return -EINVAL;
933 }
934 rc = tsif_set_time_limit(tsif_device, value);
935 if (!rc)
936 rc = count;
937 return rc;
938}
939static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
940 show_time_limit, set_time_limit);
941
942static ssize_t show_buf_config(struct device *dev,
943 struct device_attribute *attr,
944 char *buf)
945{
946 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
947 return snprintf(buf, PAGE_SIZE, "%d * %d\n",
948 tsif_device->pkts_per_chunk,
949 tsif_device->chunks_per_buf);
950}
951
952static ssize_t set_buf_config(struct device *dev,
953 struct device_attribute *attr,
954 const char *buf, size_t count)
955{
956 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
957 u32 p, c;
958 int rc;
959 if (2 != sscanf(buf, "%d * %d", &p, &c)) {
960 dev_err(&tsif_device->pdev->dev,
961 "Failed to parse integer: <%s>\n", buf);
962 return -EINVAL;
963 }
964 rc = tsif_set_buf_config(tsif_device, p, c);
965 if (!rc)
966 rc = count;
967 return rc;
968}
969static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
970 show_buf_config, set_buf_config);
971
Hamad Kadmany509b7662012-10-18 14:00:39 +0200972static ssize_t show_clk_inverse(struct device *dev,
973 struct device_attribute *attr, char *buf)
974{
975 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
976 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->clock_inverse);
977}
978
979static ssize_t set_clk_inverse(struct device *dev,
980 struct device_attribute *attr, const char *buf, size_t count)
981{
982 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
983 int value;
984 int rc;
985 if (1 != sscanf(buf, "%d", &value)) {
986 dev_err(&tsif_device->pdev->dev,
987 "Failed to parse integer: <%s>\n", buf);
988 return -EINVAL;
989 }
990 rc = tsif_set_clk_inverse(tsif_device, value);
991 if (!rc)
992 rc = count;
993 return rc;
994}
995static DEVICE_ATTR(clk_inverse, S_IRUGO | S_IWUSR,
996 show_clk_inverse, set_clk_inverse);
997
998static ssize_t show_data_inverse(struct device *dev,
999 struct device_attribute *attr, char *buf)
1000{
1001 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1002 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->data_inverse);
1003}
1004
1005static ssize_t set_data_inverse(struct device *dev,
1006 struct device_attribute *attr, const char *buf, size_t count)
1007{
1008 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1009 int value;
1010 int rc;
1011 if (1 != sscanf(buf, "%d", &value)) {
1012 dev_err(&tsif_device->pdev->dev,
1013 "Failed to parse integer: <%s>\n", buf);
1014 return -EINVAL;
1015 }
1016 rc = tsif_set_data_inverse(tsif_device, value);
1017 if (!rc)
1018 rc = count;
1019 return rc;
1020}
1021static DEVICE_ATTR(data_inverse, S_IRUGO | S_IWUSR,
1022 show_data_inverse, set_data_inverse);
1023
1024static ssize_t show_sync_inverse(struct device *dev,
1025 struct device_attribute *attr, char *buf)
1026{
1027 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1028 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->sync_inverse);
1029}
1030
1031static ssize_t set_sync_inverse(struct device *dev,
1032 struct device_attribute *attr, const char *buf, size_t count)
1033{
1034 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1035 int value;
1036 int rc;
1037 if (1 != sscanf(buf, "%d", &value)) {
1038 dev_err(&tsif_device->pdev->dev,
1039 "Failed to parse integer: <%s>\n", buf);
1040 return -EINVAL;
1041 }
1042 rc = tsif_set_sync_inverse(tsif_device, value);
1043 if (!rc)
1044 rc = count;
1045 return rc;
1046}
1047static DEVICE_ATTR(sync_inverse, S_IRUGO | S_IWUSR,
1048 show_sync_inverse, set_sync_inverse);
1049
1050static ssize_t show_enable_inverse(struct device *dev,
1051 struct device_attribute *attr, char *buf)
1052{
1053 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1054 return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->enable_inverse);
1055}
1056
1057static ssize_t set_enable_inverse(struct device *dev,
1058 struct device_attribute *attr, const char *buf, size_t count)
1059{
1060 struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
1061 int value;
1062 int rc;
1063 if (1 != sscanf(buf, "%d", &value)) {
1064 dev_err(&tsif_device->pdev->dev,
1065 "Failed to parse integer: <%s>\n", buf);
1066 return -EINVAL;
1067 }
1068 rc = tsif_set_enable_inverse(tsif_device, value);
1069 if (!rc)
1070 rc = count;
1071 return rc;
1072}
1073static DEVICE_ATTR(enable_inverse, S_IRUGO | S_IWUSR,
1074 show_enable_inverse, set_enable_inverse);
1075
1076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077static struct attribute *dev_attrs[] = {
1078 &dev_attr_stats.attr,
1079 &dev_attr_mode.attr,
1080 &dev_attr_time_limit.attr,
1081 &dev_attr_buf_config.attr,
Hamad Kadmany509b7662012-10-18 14:00:39 +02001082 &dev_attr_clk_inverse.attr,
1083 &dev_attr_data_inverse.attr,
1084 &dev_attr_sync_inverse.attr,
1085 &dev_attr_enable_inverse.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086 NULL,
1087};
1088static struct attribute_group dev_attr_grp = {
1089 .attrs = dev_attrs,
1090};
1091/* ===Device attributes end=== */
1092
1093/* ===debugfs begin=== */
1094
1095static int debugfs_iomem_x32_set(void *data, u64 val)
1096{
1097 iowrite32(val, data);
1098 wmb();
1099 return 0;
1100}
1101
1102static int debugfs_iomem_x32_get(void *data, u64 *val)
1103{
1104 *val = ioread32(data);
1105 return 0;
1106}
1107
1108DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1109 debugfs_iomem_x32_set, "0x%08llx\n");
1110
1111struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
1112 struct dentry *parent, u32 *value)
1113{
1114 return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
1115}
1116
1117static int action_open(struct msm_tsif_device *tsif_device)
1118{
1119 int rc = -EINVAL;
1120 int result;
1121
1122 struct msm_tsif_platform_data *pdata =
1123 tsif_device->pdev->dev.platform_data;
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
1126 if (tsif_device->state != tsif_state_stopped)
1127 return -EAGAIN;
1128 rc = tsif_dma_init(tsif_device);
1129 if (rc) {
1130 dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
1131 return rc;
1132 }
1133 tsif_device->state = tsif_state_running;
Joel Nider951b2832012-05-07 21:13:38 +03001134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135 /*
1136 * DMA should be scheduled prior to TSIF hardware initialization,
1137 * otherwise "bus error" will be reported by Data Mover
1138 */
1139 enable_irq(tsif_device->irq);
1140 tsif_clock(tsif_device, 1);
1141 tsif_dma_schedule(tsif_device);
1142 /*
1143 * init the device if required
1144 */
1145 if (pdata->init)
1146 pdata->init(pdata);
1147 rc = tsif_start_hw(tsif_device);
1148 if (rc) {
1149 dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
1150 tsif_dma_exit(tsif_device);
1151 tsif_clock(tsif_device, 0);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001152 disable_irq(tsif_device->irq);
1153 return rc;
1154 }
1155
1156 /* make sure the GPIO's are set up */
1157 rc = tsif_start_gpios(tsif_device);
1158 if (rc) {
1159 dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
1160 tsif_stop_hw(tsif_device);
1161 tsif_dma_exit(tsif_device);
1162 tsif_clock(tsif_device, 0);
1163 disable_irq(tsif_device->irq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 return rc;
1165 }
1166
1167 result = pm_runtime_get(&tsif_device->pdev->dev);
1168 if (result < 0) {
1169 dev_err(&tsif_device->pdev->dev,
1170 "Runtime PM: Unable to wake up the device, rc = %d\n",
1171 result);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001172 tsif_stop_gpios(tsif_device);
1173 tsif_stop_hw(tsif_device);
1174 tsif_dma_exit(tsif_device);
1175 tsif_clock(tsif_device, 0);
1176 disable_irq(tsif_device->irq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 return result;
1178 }
1179
1180 wake_lock(&tsif_device->wake_lock);
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001181 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182}
1183
1184static int action_close(struct msm_tsif_device *tsif_device)
1185{
1186 dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
1187 (int)tsif_device->state);
Joel Nider951b2832012-05-07 21:13:38 +03001188
1189 /* turn off the GPIO's to prevent new data from entering */
1190 tsif_stop_gpios(tsif_device);
1191
1192 /* we unfortunately must sleep here to give the ADM time to
1193 * complete any outstanding reads after the GPIO's are turned
1194 * off. There is no indication from the ADM hardware that
1195 * there are any outstanding reads on the bus, and if we
1196 * stop the TSIF too quickly, it can cause a bus error.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197 */
Hamad Kadmanyb18fac52012-09-01 12:57:24 +03001198 msleep(250);
Joel Nider951b2832012-05-07 21:13:38 +03001199
1200 /* now we can stop the core */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 tsif_stop_hw(tsif_device);
1202 tsif_dma_exit(tsif_device);
1203 tsif_clock(tsif_device, 0);
1204 disable_irq(tsif_device->irq);
1205
1206 pm_runtime_put(&tsif_device->pdev->dev);
1207 wake_unlock(&tsif_device->wake_lock);
1208 return 0;
1209}
1210
1211
1212static struct {
1213 int (*func)(struct msm_tsif_device *);
1214 const char *name;
1215} actions[] = {
1216 { action_open, "open"},
1217 { action_close, "close"},
1218};
1219
1220static ssize_t tsif_debugfs_action_write(struct file *filp,
1221 const char __user *userbuf,
1222 size_t count, loff_t *f_pos)
1223{
1224 int i;
1225 struct msm_tsif_device *tsif_device = filp->private_data;
1226 char s[40];
1227 int len = min(sizeof(s) - 1, count);
1228 if (copy_from_user(s, userbuf, len))
1229 return -EFAULT;
1230 s[len] = '\0';
1231 dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
1232 for (i = 0; i < ARRAY_SIZE(actions); i++) {
1233 if (!strncmp(s, actions[i].name,
1234 min(count, strlen(actions[i].name)))) {
1235 int rc = actions[i].func(tsif_device);
1236 if (!rc)
1237 rc = count;
1238 return rc;
1239 }
1240 }
1241 return -EINVAL;
1242}
1243
1244static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
1245{
1246 filp->private_data = inode->i_private;
1247 return 0;
1248}
1249
1250static const struct file_operations fops_debugfs_action = {
1251 .open = tsif_debugfs_generic_open,
1252 .write = tsif_debugfs_action_write,
1253};
1254
1255static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
1256 size_t count, loff_t *f_pos)
1257{
1258 static char bufa[200];
1259 static char *buf = bufa;
1260 int sz = sizeof(bufa);
1261 struct msm_tsif_device *tsif_device = filp->private_data;
1262 int len = 0;
1263 if (tsif_device) {
1264 int i;
1265 len += snprintf(buf + len, sz - len,
1266 "ri %3d | wi %3d | dmwi %3d |",
1267 tsif_device->ri, tsif_device->wi,
1268 tsif_device->dmwi);
1269 for (i = 0; i < 2; i++) {
1270 struct tsif_xfer *xfer = &tsif_device->xfer[i];
1271 if (xfer->busy) {
1272 u32 dst =
1273 tsif_device->dmov_cmd[i]->box.dst_row_addr;
1274 u32 base = tsif_device->data_buffer_dma;
1275 int w = (dst - base) / TSIF_PKT_SIZE;
1276 len += snprintf(buf + len, sz - len,
1277 " [%3d]{%3d}",
1278 w, xfer->wi);
1279 } else {
1280 len += snprintf(buf + len, sz - len,
1281 " ---idle---");
1282 }
1283 }
1284 len += snprintf(buf + len, sz - len, "\n");
1285 } else {
1286 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1287 }
1288 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1289}
1290
1291static const struct file_operations fops_debugfs_dma = {
1292 .open = tsif_debugfs_generic_open,
1293 .read = tsif_debugfs_dma_read,
1294};
1295
1296static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
1297 size_t count, loff_t *f_pos)
1298{
1299 static char bufa[300];
1300 static char *buf = bufa;
1301 int sz = sizeof(bufa);
1302 struct msm_tsif_device *tsif_device = filp->private_data;
1303 int len = 0;
1304 if (tsif_device) {
1305 struct msm_tsif_platform_data *pdata =
1306 tsif_device->pdev->dev.platform_data;
1307 int i;
1308 for (i = 0; i < pdata->num_gpios; i++) {
1309 if (pdata->gpios[i].gpio_cfg) {
1310 int x = !!gpio_get_value(GPIO_PIN(
1311 pdata->gpios[i].gpio_cfg));
1312 len += snprintf(buf + len, sz - len,
1313 "%15s: %d\n",
1314 pdata->gpios[i].label, x);
1315 }
1316 }
1317 } else {
1318 len += snprintf(buf + len, sz - len, "No TSIF device???\n");
1319 }
1320 return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
1321}
1322
1323static const struct file_operations fops_debugfs_gpios = {
1324 .open = tsif_debugfs_generic_open,
1325 .read = tsif_debugfs_gpios_read,
1326};
1327
1328
1329static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
1330{
1331 tsif_device->dent_tsif = debugfs_create_dir(
1332 dev_name(&tsif_device->pdev->dev), NULL);
1333 if (tsif_device->dent_tsif) {
1334 int i;
1335 void __iomem *base = tsif_device->base;
1336 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
1337 tsif_device->debugfs_tsif_regs[i] =
1338 debugfs_create_iomem_x32(
1339 debugfs_tsif_regs[i].name,
1340 debugfs_tsif_regs[i].mode,
1341 tsif_device->dent_tsif,
1342 base + debugfs_tsif_regs[i].offset);
1343 }
1344 tsif_device->debugfs_gpio = debugfs_create_file("gpios",
1345 S_IRUGO,
1346 tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
1347 tsif_device->debugfs_action = debugfs_create_file("action",
1348 S_IWUSR,
1349 tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
1350 tsif_device->debugfs_dma = debugfs_create_file("dma",
1351 S_IRUGO,
1352 tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
1353 tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
1354 S_IRUGO,
1355 tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
1356 }
1357}
1358
1359static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
1360{
1361 if (tsif_device->dent_tsif) {
1362 int i;
1363 debugfs_remove_recursive(tsif_device->dent_tsif);
1364 tsif_device->dent_tsif = NULL;
1365 for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
1366 tsif_device->debugfs_tsif_regs[i] = NULL;
1367 tsif_device->debugfs_gpio = NULL;
1368 tsif_device->debugfs_action = NULL;
1369 tsif_device->debugfs_dma = NULL;
1370 tsif_device->debugfs_databuf = NULL;
1371 }
1372}
1373/* ===debugfs end=== */
1374
1375/* ===module begin=== */
1376static LIST_HEAD(tsif_devices);
1377
1378static struct msm_tsif_device *tsif_find_by_id(int id)
1379{
1380 struct msm_tsif_device *tsif_device;
1381 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1382 if (tsif_device->pdev->id == id)
1383 return tsif_device;
1384 }
1385 return NULL;
1386}
1387
1388static int __devinit msm_tsif_probe(struct platform_device *pdev)
1389{
1390 int rc = -ENODEV;
1391 struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
1392 struct msm_tsif_device *tsif_device;
1393 struct resource *res;
1394 /* check device validity */
1395 /* must have platform data */
1396 if (!plat) {
1397 dev_err(&pdev->dev, "Platform data not available\n");
1398 rc = -EINVAL;
1399 goto out;
1400 }
Joel Nider5578bdb2011-08-12 09:37:11 +03001401
1402 if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
1404 rc = -EINVAL;
1405 goto out;
1406 }
1407 /* OK, we will use this device */
1408 tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
1409 if (!tsif_device) {
1410 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
1411 rc = -ENOMEM;
1412 goto out;
1413 }
1414 /* cross links */
1415 tsif_device->pdev = pdev;
1416 platform_set_drvdata(pdev, tsif_device);
1417 tsif_device->mode = 1;
Hamad Kadmany509b7662012-10-18 14:00:39 +02001418 tsif_device->clock_inverse = 0;
1419 tsif_device->data_inverse = 0;
1420 tsif_device->sync_inverse = 0;
1421 tsif_device->enable_inverse = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
1423 tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
1424 tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
1425 (unsigned long)tsif_device);
Joel Nider6682b382012-07-03 13:59:27 +03001426 tasklet_init(&tsif_device->clocks_off, tsif_clocks_off,
1427 (unsigned long)tsif_device);
Liron Kuchfd76775c2013-04-03 15:32:15 +03001428 rc = tsif_get_clocks(tsif_device);
1429 if (rc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001430 goto err_clocks;
1431/* map I/O memory */
1432 tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1433 if (!tsif_device->memres) {
1434 dev_err(&pdev->dev, "Missing MEM resource\n");
1435 rc = -ENXIO;
1436 goto err_rgn;
1437 }
1438 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1439 if (!res) {
1440 dev_err(&pdev->dev, "Missing DMA resource\n");
1441 rc = -ENXIO;
1442 goto err_rgn;
1443 }
1444 tsif_device->dma = res->start;
1445 tsif_device->crci = res->end;
1446 tsif_device->base = ioremap(tsif_device->memres->start,
1447 resource_size(tsif_device->memres));
1448 if (!tsif_device->base) {
1449 dev_err(&pdev->dev, "ioremap failed\n");
1450 goto err_ioremap;
1451 }
1452 dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
1453 tsif_device->memres->start, tsif_device->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454
1455 pm_runtime_set_active(&pdev->dev);
1456 pm_runtime_enable(&pdev->dev);
1457
1458 tsif_debugfs_init(tsif_device);
1459 rc = platform_get_irq(pdev, 0);
1460 if (rc > 0) {
1461 tsif_device->irq = rc;
1462 rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
1463 dev_name(&pdev->dev), tsif_device);
1464 disable_irq(tsif_device->irq);
1465 }
1466 if (rc) {
1467 dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
1468 tsif_device->irq, rc);
1469 goto err_irq;
1470 }
1471 rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
1472 if (rc) {
1473 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1474 goto err_attrs;
1475 }
1476 wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
1477 dev_name(&pdev->dev));
1478 dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
1479 tsif_device->irq, tsif_device->memres->start,
1480 tsif_device->dma, tsif_device->crci);
1481 list_add(&tsif_device->devlist, &tsif_devices);
1482 return 0;
1483/* error path */
1484 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1485err_attrs:
1486 free_irq(tsif_device->irq, tsif_device);
1487err_irq:
1488 tsif_debugfs_exit(tsif_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 iounmap(tsif_device->base);
1490err_ioremap:
1491err_rgn:
1492 tsif_put_clocks(tsif_device);
1493err_clocks:
1494 kfree(tsif_device);
1495out:
1496 return rc;
1497}
1498
1499static int __devexit msm_tsif_remove(struct platform_device *pdev)
1500{
1501 struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
1502 dev_info(&pdev->dev, "Unload\n");
1503 list_del(&tsif_device->devlist);
1504 wake_lock_destroy(&tsif_device->wake_lock);
1505 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1506 free_irq(tsif_device->irq, tsif_device);
1507 tsif_debugfs_exit(tsif_device);
1508 tsif_dma_exit(tsif_device);
1509 tsif_stop_gpios(tsif_device);
1510 iounmap(tsif_device->base);
1511 tsif_put_clocks(tsif_device);
1512
1513 pm_runtime_put(&pdev->dev);
1514 pm_runtime_disable(&pdev->dev);
1515 kfree(tsif_device);
1516 return 0;
1517}
1518
1519static int tsif_runtime_suspend(struct device *dev)
1520{
1521 dev_dbg(dev, "pm_runtime: suspending...\n");
1522 return 0;
1523}
1524
1525static int tsif_runtime_resume(struct device *dev)
1526{
1527 dev_dbg(dev, "pm_runtime: resuming...\n");
1528 return 0;
1529}
1530
1531static const struct dev_pm_ops tsif_dev_pm_ops = {
1532 .runtime_suspend = tsif_runtime_suspend,
1533 .runtime_resume = tsif_runtime_resume,
1534};
1535
1536
1537static struct platform_driver msm_tsif_driver = {
1538 .probe = msm_tsif_probe,
1539 .remove = __exit_p(msm_tsif_remove),
1540 .driver = {
1541 .name = "msm_tsif",
1542 .pm = &tsif_dev_pm_ops,
1543 },
1544};
1545
1546static int __init mod_init(void)
1547{
1548 int rc = platform_driver_register(&msm_tsif_driver);
1549 if (rc)
1550 pr_err("TSIF: platform_driver_register failed: %d\n", rc);
1551 return rc;
1552}
1553
1554static void __exit mod_exit(void)
1555{
1556 platform_driver_unregister(&msm_tsif_driver);
1557}
1558/* ===module end=== */
1559
1560/* public API */
1561
Joel Nider5578bdb2011-08-12 09:37:11 +03001562int tsif_get_active(void)
1563{
1564 struct msm_tsif_device *tsif_device;
1565 list_for_each_entry(tsif_device, &tsif_devices, devlist) {
1566 return tsif_device->pdev->id;
1567 }
1568 return -ENODEV;
1569}
1570EXPORT_SYMBOL(tsif_get_active);
1571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
1573{
1574 struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
Joel Nider5578bdb2011-08-12 09:37:11 +03001575 if (!tsif_device)
1576 return ERR_PTR(-ENODEV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577 if (tsif_device->client_notify || tsif_device->client_data)
1578 return ERR_PTR(-EBUSY);
1579 tsif_device->client_notify = notify;
1580 tsif_device->client_data = data;
1581 /* prevent from unloading */
1582 get_device(&tsif_device->pdev->dev);
1583 return tsif_device;
1584}
1585EXPORT_SYMBOL(tsif_attach);
1586
1587void tsif_detach(void *cookie)
1588{
1589 struct msm_tsif_device *tsif_device = cookie;
1590 tsif_device->client_notify = NULL;
1591 tsif_device->client_data = NULL;
1592 put_device(&tsif_device->pdev->dev);
1593}
1594EXPORT_SYMBOL(tsif_detach);
1595
1596void tsif_get_info(void *cookie, void **pdata, int *psize)
1597{
1598 struct msm_tsif_device *tsif_device = cookie;
1599 if (pdata)
1600 *pdata = tsif_device->data_buffer;
1601 if (psize)
1602 *psize = TSIF_PKTS_IN_BUF;
1603}
1604EXPORT_SYMBOL(tsif_get_info);
1605
1606int tsif_set_mode(void *cookie, int mode)
1607{
1608 struct msm_tsif_device *tsif_device = cookie;
1609 if (tsif_device->state != tsif_state_stopped) {
1610 dev_err(&tsif_device->pdev->dev,
1611 "Can't change mode while device is active\n");
1612 return -EBUSY;
1613 }
1614 switch (mode) {
1615 case 1:
1616 case 2:
1617 case 3:
1618 tsif_device->mode = mode;
1619 break;
1620 default:
1621 dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
1622 return -EINVAL;
1623 }
1624 return 0;
1625}
1626EXPORT_SYMBOL(tsif_set_mode);
1627
1628int tsif_set_time_limit(void *cookie, u32 value)
1629{
1630 struct msm_tsif_device *tsif_device = cookie;
1631 if (tsif_device->state != tsif_state_stopped) {
1632 dev_err(&tsif_device->pdev->dev,
1633 "Can't change time limit while device is active\n");
1634 return -EBUSY;
1635 }
1636 if (value != (value & 0xFFFFFF)) {
1637 dev_err(&tsif_device->pdev->dev,
1638 "Invalid time limit (should be 24 bit): %#x\n", value);
1639 return -EINVAL;
1640 }
1641 tsif_device->time_limit = value;
1642 return 0;
1643}
1644EXPORT_SYMBOL(tsif_set_time_limit);
1645
1646int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
1647{
1648 struct msm_tsif_device *tsif_device = cookie;
1649 if (tsif_device->data_buffer) {
1650 dev_err(&tsif_device->pdev->dev,
1651 "Data buffer already allocated: %p\n",
1652 tsif_device->data_buffer);
1653 return -EBUSY;
1654 }
1655 /* check for crazy user */
1656 if (pkts_in_chunk * chunks_in_buf > 10240) {
1657 dev_err(&tsif_device->pdev->dev,
1658 "Buffer requested is too large: %d * %d\n",
1659 pkts_in_chunk,
1660 chunks_in_buf);
1661 return -EINVAL;
1662 }
1663 /* parameters are OK, execute */
1664 tsif_device->pkts_per_chunk = pkts_in_chunk;
1665 tsif_device->chunks_per_buf = chunks_in_buf;
1666 return 0;
1667}
1668EXPORT_SYMBOL(tsif_set_buf_config);
1669
Hamad Kadmany509b7662012-10-18 14:00:39 +02001670int tsif_set_clk_inverse(void *cookie, int value)
1671{
1672 struct msm_tsif_device *tsif_device = cookie;
1673 if (tsif_device->state != tsif_state_stopped) {
1674 dev_err(&tsif_device->pdev->dev,
1675 "Can't change clock inverse while device is active\n");
1676 return -EBUSY;
1677 }
1678 if ((value != 0) && (value != 1)) {
1679 dev_err(&tsif_device->pdev->dev,
1680 "Invalid parameter, either 0 or 1: %#x\n", value);
1681 return -EINVAL;
1682 }
1683 tsif_device->clock_inverse = value;
1684 return 0;
1685}
1686EXPORT_SYMBOL(tsif_set_clk_inverse);
1687
1688int tsif_set_data_inverse(void *cookie, int value)
1689{
1690 struct msm_tsif_device *tsif_device = cookie;
1691 if (tsif_device->state != tsif_state_stopped) {
1692 dev_err(&tsif_device->pdev->dev,
1693 "Can't change data inverse while device is active\n");
1694 return -EBUSY;
1695 }
1696 if ((value != 0) && (value != 1)) {
1697 dev_err(&tsif_device->pdev->dev,
1698 "Invalid parameter, either 0 or 1: %#x\n", value);
1699 return -EINVAL;
1700 }
1701 tsif_device->data_inverse = value;
1702 return 0;
1703}
1704EXPORT_SYMBOL(tsif_set_data_inverse);
1705
1706int tsif_set_sync_inverse(void *cookie, int value)
1707{
1708 struct msm_tsif_device *tsif_device = cookie;
1709 if (tsif_device->state != tsif_state_stopped) {
1710 dev_err(&tsif_device->pdev->dev,
1711 "Can't change sync inverse while device is active\n");
1712 return -EBUSY;
1713 }
1714 if ((value != 0) && (value != 1)) {
1715 dev_err(&tsif_device->pdev->dev,
1716 "Invalid parameter, either 0 or 1: %#x\n", value);
1717 return -EINVAL;
1718 }
1719 tsif_device->sync_inverse = value;
1720 return 0;
1721}
1722EXPORT_SYMBOL(tsif_set_sync_inverse);
1723
1724int tsif_set_enable_inverse(void *cookie, int value)
1725{
1726 struct msm_tsif_device *tsif_device = cookie;
1727 if (tsif_device->state != tsif_state_stopped) {
1728 dev_err(&tsif_device->pdev->dev,
1729 "Can't change enable inverse while device is active\n");
1730 return -EBUSY;
1731 }
1732 if ((value != 0) && (value != 1)) {
1733 dev_err(&tsif_device->pdev->dev,
1734 "Invalid parameter, either 0 or 1: %#x\n", value);
1735 return -EINVAL;
1736 }
1737 tsif_device->enable_inverse = value;
1738 return 0;
1739}
1740EXPORT_SYMBOL(tsif_set_enable_inverse);
1741
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001742void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
1743{
1744 struct msm_tsif_device *tsif_device = cookie;
1745 if (ri)
1746 *ri = tsif_device->ri;
1747 if (wi)
1748 *wi = tsif_device->wi;
1749 if (state)
1750 *state = tsif_device->state;
1751}
1752EXPORT_SYMBOL(tsif_get_state);
1753
1754int tsif_start(void *cookie)
1755{
1756 struct msm_tsif_device *tsif_device = cookie;
1757 return action_open(tsif_device);
1758}
1759EXPORT_SYMBOL(tsif_start);
1760
1761void tsif_stop(void *cookie)
1762{
1763 struct msm_tsif_device *tsif_device = cookie;
1764 action_close(tsif_device);
1765}
1766EXPORT_SYMBOL(tsif_stop);
1767
Hamad Kadmany68bd27a2013-01-31 14:53:32 +02001768int tsif_get_ref_clk_counter(void *cookie, u32 *tcr_counter)
1769{
1770 struct msm_tsif_device *tsif_device = cookie;
1771
1772 if (!tsif_device || !tcr_counter)
1773 return -EINVAL;
1774
1775 if (tsif_device->state == tsif_state_running)
1776 *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
1777 else
1778 *tcr_counter = 0;
1779
1780 return 0;
1781}
1782EXPORT_SYMBOL(tsif_get_ref_clk_counter);
1783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784void tsif_reclaim_packets(void *cookie, int read_index)
1785{
1786 struct msm_tsif_device *tsif_device = cookie;
1787 tsif_device->ri = read_index;
1788}
1789EXPORT_SYMBOL(tsif_reclaim_packets);
1790
1791module_init(mod_init);
1792module_exit(mod_exit);
1793
1794MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
1795 " Driver for the MSM chipset");
1796MODULE_LICENSE("GPL v2");
1797