blob: 62a3dded90e7aa68068c0f4577acca8f3d6672e7 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
27
28#include <mach/sps.h>
29#include <mach/bam_dmux.h>
30
31#define BAM_CH_LOCAL_OPEN 0x1
32#define BAM_CH_REMOTE_OPEN 0x2
33
34#define BAM_MUX_HDR_MAGIC_NO 0x33fc
35
36#define BAM_MUX_HDR_CMD_DATA 0
37#define BAM_MUX_HDR_CMD_OPEN 1
38#define BAM_MUX_HDR_CMD_CLOSE 2
39
40#define RX_STATE_HDR_QUEUED 0
41#define RX_STATE_DATA_QUEUED 1
42
43
44static int msm_bam_dmux_debug_enable;
45module_param_named(debug_enable, msm_bam_dmux_debug_enable,
46 int, S_IRUGO | S_IWUSR | S_IWGRP);
47
48#if defined(DEBUG)
49static uint32_t bam_dmux_read_cnt;
50static uint32_t bam_dmux_write_cnt;
51static uint32_t bam_dmux_write_cpy_cnt;
52static uint32_t bam_dmux_write_cpy_bytes;
53
54#define DBG(x...) do { \
55 if (msm_bam_dmux_debug_enable) \
56 pr_debug(x); \
57 } while (0)
58
59#define DBG_INC_READ_CNT(x) do { \
60 bam_dmux_read_cnt += (x); \
61 if (msm_bam_dmux_debug_enable) \
62 pr_debug("%s: total read bytes %u\n", \
63 __func__, bam_dmux_read_cnt); \
64 } while (0)
65
66#define DBG_INC_WRITE_CNT(x) do { \
67 bam_dmux_write_cnt += (x); \
68 if (msm_bam_dmux_debug_enable) \
69 pr_debug("%s: total written bytes %u\n", \
70 __func__, bam_dmux_write_cnt); \
71 } while (0)
72
73#define DBG_INC_WRITE_CPY(x) do { \
74 bam_dmux_write_cpy_bytes += (x); \
75 bam_dmux_write_cpy_cnt++; \
76 if (msm_bam_dmux_debug_enable) \
77 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
78 __func__, bam_dmux_write_cpy_cnt, \
79 bam_dmux_write_cpy_bytes); \
80 } while (0)
81#else
82#define DBG(x...) do { } while (0)
83#define DBG_INC_READ_CNT(x...) do { } while (0)
84#define DBG_INC_WRITE_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CPY(x...) do { } while (0)
86#endif
87
88struct bam_ch_info {
89 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -060090 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 void *priv;
92 spinlock_t lock;
93};
94
95struct tx_pkt_info {
96 struct sk_buff *skb;
97 dma_addr_t dma_address;
98 char is_cmd;
99 uint32_t len;
100 struct work_struct work;
101};
102
103struct rx_pkt_info {
104 struct sk_buff *skb;
105 dma_addr_t dma_address;
106 struct work_struct work;
107 struct list_head list_node;
108};
109
110#define A2_NUM_PIPES 6
111#define A2_SUMMING_THRESHOLD 4096
112#define A2_DEFAULT_DESCRIPTORS 32
113#define A2_PHYS_BASE 0x124C2000
114#define A2_PHYS_SIZE 0x2000
115#define BUFFER_SIZE 2048
116#define NUM_BUFFERS 32
117static struct delayed_work bam_init_work;
118static struct sps_bam_props a2_props;
119static struct sps_pipe *bam_tx_pipe;
120static struct sps_pipe *bam_rx_pipe;
121static struct sps_connect tx_connection;
122static struct sps_connect rx_connection;
123static struct sps_mem_buffer tx_desc_mem_buf;
124static struct sps_mem_buffer rx_desc_mem_buf;
125static struct sps_register_event tx_register_event;
126
127static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
128static int bam_mux_initialized;
129
130static LIST_HEAD(bam_rx_pool);
131static DEFINE_MUTEX(bam_rx_pool_lock);
132
133struct bam_mux_hdr {
134 uint16_t magic_num;
135 uint8_t reserved;
136 uint8_t cmd;
137 uint8_t pad_len;
138 uint8_t ch_id;
139 uint16_t pkt_len;
140};
141
142static void bam_mux_write_done(struct work_struct *work);
143static void handle_bam_mux_cmd(struct work_struct *work);
144static void rx_timer_work_func(struct work_struct *work);
145
146static DEFINE_MUTEX(bam_mux_lock);
147static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
148
149static struct workqueue_struct *bam_mux_rx_workqueue;
150static struct workqueue_struct *bam_mux_tx_workqueue;
151
152#define bam_ch_is_open(x) \
153 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
154
155#define bam_ch_is_local_open(x) \
156 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
157
158#define bam_ch_is_remote_open(x) \
159 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
160
161static void queue_rx(void)
162{
163 void *ptr;
164 struct rx_pkt_info *info;
165
166 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
167 if (!info)
168 return; /*need better way to handle this */
169
170 INIT_WORK(&info->work, handle_bam_mux_cmd);
171
172 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
173 ptr = skb_put(info->skb, BUFFER_SIZE);
174
175 mutex_lock(&bam_rx_pool_lock);
176 list_add_tail(&info->list_node, &bam_rx_pool);
177 mutex_unlock(&bam_rx_pool_lock);
178
179 /* need a way to handle error case */
180 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
181 DMA_FROM_DEVICE);
182 sps_transfer_one(bam_rx_pipe, info->dma_address,
183 BUFFER_SIZE, info, 0);
184}
185
186static void bam_mux_process_data(struct sk_buff *rx_skb)
187{
188 unsigned long flags;
189 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600190 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191
192 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
193
194 rx_skb->data = (unsigned char *)(rx_hdr + 1);
195 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
196 rx_skb->len = rx_hdr->pkt_len;
197
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600198 event_data = (unsigned long)(rx_skb);
199
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600201 if (bam_ch[rx_hdr->ch_id].notify)
202 bam_ch[rx_hdr->ch_id].notify(
203 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
204 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 else
206 dev_kfree_skb_any(rx_skb);
207 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
208
209 queue_rx();
210}
211
212static void handle_bam_mux_cmd(struct work_struct *work)
213{
214 unsigned long flags;
215 struct bam_mux_hdr *rx_hdr;
216 struct rx_pkt_info *info;
217 struct sk_buff *rx_skb;
218
219 info = container_of(work, struct rx_pkt_info, work);
220 rx_skb = info->skb;
221 kfree(info);
222
223 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
224
225 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
226 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
227 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
228 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
229 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
230 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
231 " pad %d ch %d len %d\n", __func__,
232 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
233 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
234 dev_kfree_skb_any(rx_skb);
235 queue_rx();
236 return;
237 }
238 switch (rx_hdr->cmd) {
239 case BAM_MUX_HDR_CMD_DATA:
240 DBG_INC_READ_CNT(rx_hdr->pkt_len);
241 bam_mux_process_data(rx_skb);
242 break;
243 case BAM_MUX_HDR_CMD_OPEN:
244 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
245 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
246 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
247 dev_kfree_skb_any(rx_skb);
248 queue_rx();
249 break;
250 case BAM_MUX_HDR_CMD_CLOSE:
251 /* probably should drop pending write */
252 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
253 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
254 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
255 dev_kfree_skb_any(rx_skb);
256 queue_rx();
257 break;
258 default:
259 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
260 " pad %d ch %d len %d\n", __func__,
261 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
262 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
263 dev_kfree_skb_any(rx_skb);
264 queue_rx();
265 return;
266 }
267}
268
269static int bam_mux_write_cmd(void *data, uint32_t len)
270{
271 int rc;
272 struct tx_pkt_info *pkt;
273 dma_addr_t dma_address;
274
275 mutex_lock(&bam_mux_lock);
276 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
277 if (pkt == NULL) {
278 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
279 rc = -ENOMEM;
280 mutex_unlock(&bam_mux_lock);
281 return rc;
282 }
283
284 dma_address = dma_map_single(NULL, data, len,
285 DMA_TO_DEVICE);
286 if (!dma_address) {
287 pr_err("%s: dma_map_single() failed\n", __func__);
288 rc = -ENOMEM;
289 mutex_unlock(&bam_mux_lock);
290 return rc;
291 }
292 pkt->skb = (struct sk_buff *)(data);
293 pkt->len = len;
294 pkt->dma_address = dma_address;
295 pkt->is_cmd = 1;
296 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
297 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
298
299 mutex_unlock(&bam_mux_lock);
300 return rc;
301}
302
303static void bam_mux_write_done(struct work_struct *work)
304{
305 struct sk_buff *skb;
306 struct bam_mux_hdr *hdr;
307 struct tx_pkt_info *info;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600308 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309
310 info = container_of(work, struct tx_pkt_info, work);
311 skb = info->skb;
312 kfree(info);
313 hdr = (struct bam_mux_hdr *)skb->data;
314 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600315 event_data = (unsigned long)(skb);
316 if (bam_ch[hdr->ch_id].notify)
317 bam_ch[hdr->ch_id].notify(
318 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
319 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 else
321 dev_kfree_skb_any(skb);
322}
323
324int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
325{
326 int rc = 0;
327 struct bam_mux_hdr *hdr;
328 unsigned long flags;
329 struct sk_buff *new_skb = NULL;
330 dma_addr_t dma_address;
331 struct tx_pkt_info *pkt;
332
333 if (id >= BAM_DMUX_NUM_CHANNELS)
334 return -EINVAL;
335 if (!skb)
336 return -EINVAL;
337 if (!bam_mux_initialized)
338 return -ENODEV;
339
340 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
341 spin_lock_irqsave(&bam_ch[id].lock, flags);
342 if (!bam_ch_is_open(id)) {
343 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
344 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
345 return -ENODEV;
346 }
347 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
348
349 /* if skb do not have any tailroom for padding,
350 copy the skb into a new expanded skb */
351 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
352 /* revisit, probably dev_alloc_skb and memcpy is effecient */
353 new_skb = skb_copy_expand(skb, skb_headroom(skb),
354 4 - (skb->len & 0x3), GFP_ATOMIC);
355 if (new_skb == NULL) {
356 pr_err("%s: cannot allocate skb\n", __func__);
357 return -ENOMEM;
358 }
359 dev_kfree_skb_any(skb);
360 skb = new_skb;
361 DBG_INC_WRITE_CPY(skb->len);
362 }
363
364 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
365
366 /* caller should allocate for hdr and padding
367 hdr is fine, padding is tricky */
368 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
369 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
370 hdr->reserved = 0;
371 hdr->ch_id = id;
372 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
373 if (skb->len & 0x3)
374 skb_put(skb, 4 - (skb->len & 0x3));
375
376 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
377
378 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
379 __func__, skb->data, skb->tail, skb->len,
380 hdr->pkt_len, hdr->pad_len);
381
382 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
383 if (pkt == NULL) {
384 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
385 if (new_skb)
386 dev_kfree_skb_any(new_skb);
387 return -ENOMEM;
388 }
389
390 dma_address = dma_map_single(NULL, skb->data, skb->len,
391 DMA_TO_DEVICE);
392 if (!dma_address) {
393 pr_err("%s: dma_map_single() failed\n", __func__);
394 if (new_skb)
395 dev_kfree_skb_any(new_skb);
396 kfree(pkt);
397 return -ENOMEM;
398 }
399 pkt->skb = skb;
400 pkt->dma_address = dma_address;
401 pkt->is_cmd = 0;
402 INIT_WORK(&pkt->work, bam_mux_write_done);
403 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
404 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
405 return rc;
406}
407
408int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600409 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410{
411 struct bam_mux_hdr *hdr;
412 unsigned long flags;
413 int rc = 0;
414
415 DBG("%s: opening ch %d\n", __func__, id);
416 if (!bam_mux_initialized)
417 return -ENODEV;
418 if (id >= BAM_DMUX_NUM_CHANNELS)
419 return -EINVAL;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600420 if (notify == NULL)
421 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422
423 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
424 if (hdr == NULL) {
425 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
426 return -ENOMEM;
427 }
428 spin_lock_irqsave(&bam_ch[id].lock, flags);
429 if (bam_ch_is_open(id)) {
430 DBG("%s: Already opened %d\n", __func__, id);
431 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
432 kfree(hdr);
433 goto open_done;
434 }
435 if (!bam_ch_is_remote_open(id)) {
436 DBG("%s: Remote not open; ch: %d\n", __func__, id);
437 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
438 kfree(hdr);
439 rc = -ENODEV;
440 goto open_done;
441 }
442
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600443 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 bam_ch[id].priv = priv;
445 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
446 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
447
448 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
449 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
450 hdr->reserved = 0;
451 hdr->ch_id = id;
452 hdr->pkt_len = 0;
453 hdr->pad_len = 0;
454
455 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
456
457open_done:
458 DBG("%s: opened ch %d\n", __func__, id);
459 return rc;
460}
461
462int msm_bam_dmux_close(uint32_t id)
463{
464 struct bam_mux_hdr *hdr;
465 unsigned long flags;
466 int rc;
467
468 if (id >= BAM_DMUX_NUM_CHANNELS)
469 return -EINVAL;
470 DBG("%s: closing ch %d\n", __func__, id);
471 if (!bam_mux_initialized)
472 return -ENODEV;
473 spin_lock_irqsave(&bam_ch[id].lock, flags);
474
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600475 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476 bam_ch[id].priv = NULL;
477 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
478 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
479
480 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
481 if (hdr == NULL) {
482 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
483 return -ENOMEM;
484 }
485 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
486 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
487 hdr->reserved = 0;
488 hdr->ch_id = id;
489 hdr->pkt_len = 0;
490 hdr->pad_len = 0;
491
492 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
493
494 DBG("%s: closed ch %d\n", __func__, id);
495 return rc;
496}
497
498static void rx_timer_work_func(struct work_struct *work)
499{
500 struct sps_iovec iov;
501 struct list_head *node;
502 struct rx_pkt_info *info;
503
504 while (1) {
505 sps_get_iovec(bam_rx_pipe, &iov);
506 if (iov.addr == 0)
507 break;
508 mutex_lock(&bam_rx_pool_lock);
509 node = bam_rx_pool.next;
510 list_del(node);
511 mutex_unlock(&bam_rx_pool_lock);
512 info = container_of(node, struct rx_pkt_info, list_node);
513 handle_bam_mux_cmd(&info->work);
514 }
515
516 msleep(1);
517 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
518}
519
520static void bam_mux_tx_notify(struct sps_event_notify *notify)
521{
522 struct tx_pkt_info *pkt;
523
524 DBG("%s: event %d notified\n", __func__, notify->event_id);
525
526 switch (notify->event_id) {
527 case SPS_EVENT_EOT:
528 pkt = notify->data.transfer.user;
529 if (!pkt->is_cmd) {
530 dma_unmap_single(NULL, pkt->dma_address,
531 pkt->skb->len,
532 DMA_TO_DEVICE);
533 queue_work(bam_mux_tx_workqueue, &pkt->work);
534 } else {
535 dma_unmap_single(NULL, pkt->dma_address,
536 pkt->len,
537 DMA_TO_DEVICE);
538 kfree(pkt->skb);
539 kfree(pkt);
540 }
541 break;
542 default:
543 pr_err("%s: recieved unexpected event id %d\n", __func__,
544 notify->event_id);
545 }
546}
547
548#ifdef CONFIG_DEBUG_FS
549
550static int debug_tbl(char *buf, int max)
551{
552 int i = 0;
553 int j;
554
555 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
556 i += scnprintf(buf + i, max - i,
557 "ch%02d local open=%s remote open=%s\n",
558 j, bam_ch_is_local_open(j) ? "Y" : "N",
559 bam_ch_is_remote_open(j) ? "Y" : "N");
560 }
561
562 return i;
563}
564
565#define DEBUG_BUFMAX 4096
566static char debug_buffer[DEBUG_BUFMAX];
567
568static ssize_t debug_read(struct file *file, char __user *buf,
569 size_t count, loff_t *ppos)
570{
571 int (*fill)(char *buf, int max) = file->private_data;
572 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
573 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
574}
575
576static int debug_open(struct inode *inode, struct file *file)
577{
578 file->private_data = inode->i_private;
579 return 0;
580}
581
582
583static const struct file_operations debug_ops = {
584 .read = debug_read,
585 .open = debug_open,
586};
587
588static void debug_create(const char *name, mode_t mode,
589 struct dentry *dent,
590 int (*fill)(char *buf, int max))
591{
592 debugfs_create_file(name, mode, dent, fill, &debug_ops);
593}
594
595#endif
596
597static void bam_init(struct work_struct *work)
598{
599 u32 h;
600 dma_addr_t dma_addr;
601 int ret;
602 void *a2_virt_addr;
603 int i;
604
605 /* init BAM */
606 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
607 if (!a2_virt_addr) {
608 pr_err("%s: ioremap failed\n", __func__);
609 ret = -ENOMEM;
610 goto register_bam_failed;
611 }
612 a2_props.phys_addr = A2_PHYS_BASE;
613 a2_props.virt_addr = a2_virt_addr;
614 a2_props.virt_size = A2_PHYS_SIZE;
615 a2_props.irq = A2_BAM_IRQ;
616 a2_props.num_pipes = A2_NUM_PIPES;
617 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
618 /* need to free on tear down */
619 ret = sps_register_bam_device(&a2_props, &h);
620 if (ret < 0) {
621 pr_err("%s: register bam error %d\n", __func__, ret);
622 goto register_bam_failed;
623 }
624
625 bam_tx_pipe = sps_alloc_endpoint();
626 if (bam_tx_pipe == NULL) {
627 pr_err("%s: tx alloc endpoint failed\n", __func__);
628 ret = -ENOMEM;
629 goto register_bam_failed;
630 }
631 ret = sps_get_config(bam_tx_pipe, &tx_connection);
632 if (ret) {
633 pr_err("%s: tx get config failed %d\n", __func__, ret);
634 goto tx_get_config_failed;
635 }
636
637 tx_connection.source = SPS_DEV_HANDLE_MEM;
638 tx_connection.src_pipe_index = 0;
639 tx_connection.destination = h;
640 tx_connection.dest_pipe_index = 4;
641 tx_connection.mode = SPS_MODE_DEST;
642 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
643 tx_desc_mem_buf.size = 0x800; /* 2k */
644 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
645 &dma_addr, 0);
646 if (tx_desc_mem_buf.base == NULL) {
647 pr_err("%s: tx memory alloc failed\n", __func__);
648 ret = -ENOMEM;
649 goto tx_mem_failed;
650 }
651 tx_desc_mem_buf.phys_base = dma_addr;
652 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
653 tx_connection.desc = tx_desc_mem_buf;
654 tx_connection.event_thresh = 0x10;
655
656 ret = sps_connect(bam_tx_pipe, &tx_connection);
657 if (ret < 0) {
658 pr_err("%s: tx connect error %d\n", __func__, ret);
659 goto tx_connect_failed;
660 }
661
662 bam_rx_pipe = sps_alloc_endpoint();
663 if (bam_rx_pipe == NULL) {
664 pr_err("%s: rx alloc endpoint failed\n", __func__);
665 ret = -ENOMEM;
666 goto tx_connect_failed;
667 }
668 ret = sps_get_config(bam_rx_pipe, &rx_connection);
669 if (ret) {
670 pr_err("%s: rx get config failed %d\n", __func__, ret);
671 goto rx_get_config_failed;
672 }
673
674 rx_connection.source = h;
675 rx_connection.src_pipe_index = 5;
676 rx_connection.destination = SPS_DEV_HANDLE_MEM;
677 rx_connection.dest_pipe_index = 1;
678 rx_connection.mode = SPS_MODE_SRC;
679 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
680 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
681 rx_desc_mem_buf.size = 0x800; /* 2k */
682 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
683 &dma_addr, 0);
684 if (rx_desc_mem_buf.base == NULL) {
685 pr_err("%s: rx memory alloc failed\n", __func__);
686 ret = -ENOMEM;
687 goto rx_mem_failed;
688 }
689 rx_desc_mem_buf.phys_base = dma_addr;
690 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
691 rx_connection.desc = rx_desc_mem_buf;
692 rx_connection.event_thresh = 0x10;
693
694 ret = sps_connect(bam_rx_pipe, &rx_connection);
695 if (ret < 0) {
696 pr_err("%s: rx connect error %d\n", __func__, ret);
697 goto rx_connect_failed;
698 }
699
700 tx_register_event.options = SPS_O_EOT;
701 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
702 tx_register_event.xfer_done = NULL;
703 tx_register_event.callback = bam_mux_tx_notify;
704 tx_register_event.user = NULL;
705 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
706 if (ret < 0) {
707 pr_err("%s: tx register event error %d\n", __func__, ret);
708 goto rx_event_reg_failed;
709 }
710
711 bam_mux_initialized = 1;
712 for (i = 0; i < NUM_BUFFERS; ++i)
713 queue_rx();
714
715 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
716 return;
717
718rx_event_reg_failed:
719 sps_disconnect(bam_rx_pipe);
720rx_connect_failed:
721 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
722 rx_desc_mem_buf.phys_base);
723rx_mem_failed:
724 sps_disconnect(bam_tx_pipe);
725rx_get_config_failed:
726 sps_free_endpoint(bam_rx_pipe);
727tx_connect_failed:
728 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
729 tx_desc_mem_buf.phys_base);
730tx_get_config_failed:
731 sps_free_endpoint(bam_tx_pipe);
732tx_mem_failed:
733 sps_deregister_bam_device(h);
734register_bam_failed:
735 /*destroy_workqueue(bam_mux_workqueue);*/
736 /*return ret;*/
737 return;
738}
739static int bam_dmux_probe(struct platform_device *pdev)
740{
741 int rc;
742
743 DBG("%s probe called\n", __func__);
744 if (bam_mux_initialized)
745 return 0;
746
747 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
748 if (!bam_mux_rx_workqueue)
749 return -ENOMEM;
750
751 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
752 if (!bam_mux_tx_workqueue) {
753 destroy_workqueue(bam_mux_rx_workqueue);
754 return -ENOMEM;
755 }
756
757 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
758 spin_lock_init(&bam_ch[rc].lock);
759
760 /* switch over to A2 power status mechanism when avaliable */
761 INIT_DELAYED_WORK(&bam_init_work, bam_init);
762 schedule_delayed_work(&bam_init_work, msecs_to_jiffies(40000));
763
764 return 0;
765}
766
767static struct platform_driver bam_dmux_driver = {
768 .probe = bam_dmux_probe,
769 .driver = {
770 .name = "BAM_RMNT",
771 .owner = THIS_MODULE,
772 },
773};
774
775static int __init bam_dmux_init(void)
776{
777#ifdef CONFIG_DEBUG_FS
778 struct dentry *dent;
779
780 dent = debugfs_create_dir("bam_dmux", 0);
781 if (!IS_ERR(dent))
782 debug_create("tbl", 0444, dent, debug_tbl);
783#endif
784 return platform_driver_register(&bam_dmux_driver);
785}
786
787module_init(bam_dmux_init);
788MODULE_DESCRIPTION("MSM BAM DMUX");
789MODULE_LICENSE("GPL v2");