blob: 7aab6e81fd0d10fa523a55b7d49cbe20c0a4cb3f [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
27
28#include <mach/sps.h>
29#include <mach/bam_dmux.h>
30
31#define BAM_CH_LOCAL_OPEN 0x1
32#define BAM_CH_REMOTE_OPEN 0x2
33
34#define BAM_MUX_HDR_MAGIC_NO 0x33fc
35
36#define BAM_MUX_HDR_CMD_DATA 0
37#define BAM_MUX_HDR_CMD_OPEN 1
38#define BAM_MUX_HDR_CMD_CLOSE 2
39
40#define RX_STATE_HDR_QUEUED 0
41#define RX_STATE_DATA_QUEUED 1
42
43
44static int msm_bam_dmux_debug_enable;
45module_param_named(debug_enable, msm_bam_dmux_debug_enable,
46 int, S_IRUGO | S_IWUSR | S_IWGRP);
47
48#if defined(DEBUG)
49static uint32_t bam_dmux_read_cnt;
50static uint32_t bam_dmux_write_cnt;
51static uint32_t bam_dmux_write_cpy_cnt;
52static uint32_t bam_dmux_write_cpy_bytes;
53
54#define DBG(x...) do { \
55 if (msm_bam_dmux_debug_enable) \
56 pr_debug(x); \
57 } while (0)
58
59#define DBG_INC_READ_CNT(x) do { \
60 bam_dmux_read_cnt += (x); \
61 if (msm_bam_dmux_debug_enable) \
62 pr_debug("%s: total read bytes %u\n", \
63 __func__, bam_dmux_read_cnt); \
64 } while (0)
65
66#define DBG_INC_WRITE_CNT(x) do { \
67 bam_dmux_write_cnt += (x); \
68 if (msm_bam_dmux_debug_enable) \
69 pr_debug("%s: total written bytes %u\n", \
70 __func__, bam_dmux_write_cnt); \
71 } while (0)
72
73#define DBG_INC_WRITE_CPY(x) do { \
74 bam_dmux_write_cpy_bytes += (x); \
75 bam_dmux_write_cpy_cnt++; \
76 if (msm_bam_dmux_debug_enable) \
77 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
78 __func__, bam_dmux_write_cpy_cnt, \
79 bam_dmux_write_cpy_bytes); \
80 } while (0)
81#else
82#define DBG(x...) do { } while (0)
83#define DBG_INC_READ_CNT(x...) do { } while (0)
84#define DBG_INC_WRITE_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CPY(x...) do { } while (0)
86#endif
87
88struct bam_ch_info {
89 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -060090 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 void *priv;
92 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -060093 struct platform_device *pdev;
94 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095};
96
97struct tx_pkt_info {
98 struct sk_buff *skb;
99 dma_addr_t dma_address;
100 char is_cmd;
101 uint32_t len;
102 struct work_struct work;
103};
104
105struct rx_pkt_info {
106 struct sk_buff *skb;
107 dma_addr_t dma_address;
108 struct work_struct work;
109 struct list_head list_node;
110};
111
112#define A2_NUM_PIPES 6
113#define A2_SUMMING_THRESHOLD 4096
114#define A2_DEFAULT_DESCRIPTORS 32
115#define A2_PHYS_BASE 0x124C2000
116#define A2_PHYS_SIZE 0x2000
117#define BUFFER_SIZE 2048
118#define NUM_BUFFERS 32
119static struct delayed_work bam_init_work;
120static struct sps_bam_props a2_props;
121static struct sps_pipe *bam_tx_pipe;
122static struct sps_pipe *bam_rx_pipe;
123static struct sps_connect tx_connection;
124static struct sps_connect rx_connection;
125static struct sps_mem_buffer tx_desc_mem_buf;
126static struct sps_mem_buffer rx_desc_mem_buf;
127static struct sps_register_event tx_register_event;
128
129static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
130static int bam_mux_initialized;
131
132static LIST_HEAD(bam_rx_pool);
133static DEFINE_MUTEX(bam_rx_pool_lock);
134
135struct bam_mux_hdr {
136 uint16_t magic_num;
137 uint8_t reserved;
138 uint8_t cmd;
139 uint8_t pad_len;
140 uint8_t ch_id;
141 uint16_t pkt_len;
142};
143
144static void bam_mux_write_done(struct work_struct *work);
145static void handle_bam_mux_cmd(struct work_struct *work);
146static void rx_timer_work_func(struct work_struct *work);
147
148static DEFINE_MUTEX(bam_mux_lock);
149static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
150
151static struct workqueue_struct *bam_mux_rx_workqueue;
152static struct workqueue_struct *bam_mux_tx_workqueue;
153
154#define bam_ch_is_open(x) \
155 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
156
157#define bam_ch_is_local_open(x) \
158 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
159
160#define bam_ch_is_remote_open(x) \
161 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
162
163static void queue_rx(void)
164{
165 void *ptr;
166 struct rx_pkt_info *info;
167
168 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
169 if (!info)
170 return; /*need better way to handle this */
171
172 INIT_WORK(&info->work, handle_bam_mux_cmd);
173
174 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
175 ptr = skb_put(info->skb, BUFFER_SIZE);
176
177 mutex_lock(&bam_rx_pool_lock);
178 list_add_tail(&info->list_node, &bam_rx_pool);
179 mutex_unlock(&bam_rx_pool_lock);
180
181 /* need a way to handle error case */
182 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
183 DMA_FROM_DEVICE);
184 sps_transfer_one(bam_rx_pipe, info->dma_address,
185 BUFFER_SIZE, info, 0);
186}
187
188static void bam_mux_process_data(struct sk_buff *rx_skb)
189{
190 unsigned long flags;
191 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600192 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193
194 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
195
196 rx_skb->data = (unsigned char *)(rx_hdr + 1);
197 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
198 rx_skb->len = rx_hdr->pkt_len;
199
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600200 event_data = (unsigned long)(rx_skb);
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600203 if (bam_ch[rx_hdr->ch_id].notify)
204 bam_ch[rx_hdr->ch_id].notify(
205 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
206 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 else
208 dev_kfree_skb_any(rx_skb);
209 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
210
211 queue_rx();
212}
213
214static void handle_bam_mux_cmd(struct work_struct *work)
215{
216 unsigned long flags;
217 struct bam_mux_hdr *rx_hdr;
218 struct rx_pkt_info *info;
219 struct sk_buff *rx_skb;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600220 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221
222 info = container_of(work, struct rx_pkt_info, work);
223 rx_skb = info->skb;
224 kfree(info);
225
226 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
227
228 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
229 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
230 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
231 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
232 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
233 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
234 " pad %d ch %d len %d\n", __func__,
235 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
236 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
237 dev_kfree_skb_any(rx_skb);
238 queue_rx();
239 return;
240 }
241 switch (rx_hdr->cmd) {
242 case BAM_MUX_HDR_CMD_DATA:
243 DBG_INC_READ_CNT(rx_hdr->pkt_len);
244 bam_mux_process_data(rx_skb);
245 break;
246 case BAM_MUX_HDR_CMD_OPEN:
247 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
248 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
249 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
250 dev_kfree_skb_any(rx_skb);
251 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600252 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
253 if (ret)
254 pr_err("%s: platform_device_add() error: %d\n",
255 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 break;
257 case BAM_MUX_HDR_CMD_CLOSE:
258 /* probably should drop pending write */
259 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
260 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
261 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
262 dev_kfree_skb_any(rx_skb);
263 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600264 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
265 bam_ch[rx_hdr->ch_id].pdev =
266 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
267 if (!bam_ch[rx_hdr->ch_id].pdev)
268 pr_err("%s: platform_device_alloc failed\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 break;
270 default:
271 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
272 " pad %d ch %d len %d\n", __func__,
273 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
274 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
275 dev_kfree_skb_any(rx_skb);
276 queue_rx();
277 return;
278 }
279}
280
281static int bam_mux_write_cmd(void *data, uint32_t len)
282{
283 int rc;
284 struct tx_pkt_info *pkt;
285 dma_addr_t dma_address;
286
287 mutex_lock(&bam_mux_lock);
288 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
289 if (pkt == NULL) {
290 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
291 rc = -ENOMEM;
292 mutex_unlock(&bam_mux_lock);
293 return rc;
294 }
295
296 dma_address = dma_map_single(NULL, data, len,
297 DMA_TO_DEVICE);
298 if (!dma_address) {
299 pr_err("%s: dma_map_single() failed\n", __func__);
300 rc = -ENOMEM;
301 mutex_unlock(&bam_mux_lock);
302 return rc;
303 }
304 pkt->skb = (struct sk_buff *)(data);
305 pkt->len = len;
306 pkt->dma_address = dma_address;
307 pkt->is_cmd = 1;
308 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
309 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
310
311 mutex_unlock(&bam_mux_lock);
312 return rc;
313}
314
315static void bam_mux_write_done(struct work_struct *work)
316{
317 struct sk_buff *skb;
318 struct bam_mux_hdr *hdr;
319 struct tx_pkt_info *info;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600320 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321
322 info = container_of(work, struct tx_pkt_info, work);
323 skb = info->skb;
324 kfree(info);
325 hdr = (struct bam_mux_hdr *)skb->data;
326 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600327 event_data = (unsigned long)(skb);
328 if (bam_ch[hdr->ch_id].notify)
329 bam_ch[hdr->ch_id].notify(
330 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
331 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 else
333 dev_kfree_skb_any(skb);
334}
335
336int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
337{
338 int rc = 0;
339 struct bam_mux_hdr *hdr;
340 unsigned long flags;
341 struct sk_buff *new_skb = NULL;
342 dma_addr_t dma_address;
343 struct tx_pkt_info *pkt;
344
345 if (id >= BAM_DMUX_NUM_CHANNELS)
346 return -EINVAL;
347 if (!skb)
348 return -EINVAL;
349 if (!bam_mux_initialized)
350 return -ENODEV;
351
352 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
353 spin_lock_irqsave(&bam_ch[id].lock, flags);
354 if (!bam_ch_is_open(id)) {
355 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
356 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
357 return -ENODEV;
358 }
359 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
360
361 /* if skb do not have any tailroom for padding,
362 copy the skb into a new expanded skb */
363 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
364 /* revisit, probably dev_alloc_skb and memcpy is effecient */
365 new_skb = skb_copy_expand(skb, skb_headroom(skb),
366 4 - (skb->len & 0x3), GFP_ATOMIC);
367 if (new_skb == NULL) {
368 pr_err("%s: cannot allocate skb\n", __func__);
369 return -ENOMEM;
370 }
371 dev_kfree_skb_any(skb);
372 skb = new_skb;
373 DBG_INC_WRITE_CPY(skb->len);
374 }
375
376 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
377
378 /* caller should allocate for hdr and padding
379 hdr is fine, padding is tricky */
380 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
381 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
382 hdr->reserved = 0;
383 hdr->ch_id = id;
384 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
385 if (skb->len & 0x3)
386 skb_put(skb, 4 - (skb->len & 0x3));
387
388 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
389
390 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
391 __func__, skb->data, skb->tail, skb->len,
392 hdr->pkt_len, hdr->pad_len);
393
394 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
395 if (pkt == NULL) {
396 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
397 if (new_skb)
398 dev_kfree_skb_any(new_skb);
399 return -ENOMEM;
400 }
401
402 dma_address = dma_map_single(NULL, skb->data, skb->len,
403 DMA_TO_DEVICE);
404 if (!dma_address) {
405 pr_err("%s: dma_map_single() failed\n", __func__);
406 if (new_skb)
407 dev_kfree_skb_any(new_skb);
408 kfree(pkt);
409 return -ENOMEM;
410 }
411 pkt->skb = skb;
412 pkt->dma_address = dma_address;
413 pkt->is_cmd = 0;
414 INIT_WORK(&pkt->work, bam_mux_write_done);
415 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
416 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
417 return rc;
418}
419
420int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600421 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422{
423 struct bam_mux_hdr *hdr;
424 unsigned long flags;
425 int rc = 0;
426
427 DBG("%s: opening ch %d\n", __func__, id);
428 if (!bam_mux_initialized)
429 return -ENODEV;
430 if (id >= BAM_DMUX_NUM_CHANNELS)
431 return -EINVAL;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600432 if (notify == NULL)
433 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434
435 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
436 if (hdr == NULL) {
437 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
438 return -ENOMEM;
439 }
440 spin_lock_irqsave(&bam_ch[id].lock, flags);
441 if (bam_ch_is_open(id)) {
442 DBG("%s: Already opened %d\n", __func__, id);
443 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
444 kfree(hdr);
445 goto open_done;
446 }
447 if (!bam_ch_is_remote_open(id)) {
448 DBG("%s: Remote not open; ch: %d\n", __func__, id);
449 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
450 kfree(hdr);
451 rc = -ENODEV;
452 goto open_done;
453 }
454
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600455 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456 bam_ch[id].priv = priv;
457 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
458 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
459
460 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
461 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
462 hdr->reserved = 0;
463 hdr->ch_id = id;
464 hdr->pkt_len = 0;
465 hdr->pad_len = 0;
466
467 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
468
469open_done:
470 DBG("%s: opened ch %d\n", __func__, id);
471 return rc;
472}
473
474int msm_bam_dmux_close(uint32_t id)
475{
476 struct bam_mux_hdr *hdr;
477 unsigned long flags;
478 int rc;
479
480 if (id >= BAM_DMUX_NUM_CHANNELS)
481 return -EINVAL;
482 DBG("%s: closing ch %d\n", __func__, id);
483 if (!bam_mux_initialized)
484 return -ENODEV;
485 spin_lock_irqsave(&bam_ch[id].lock, flags);
486
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600487 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 bam_ch[id].priv = NULL;
489 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
490 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
491
492 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
493 if (hdr == NULL) {
494 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
495 return -ENOMEM;
496 }
497 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
498 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
499 hdr->reserved = 0;
500 hdr->ch_id = id;
501 hdr->pkt_len = 0;
502 hdr->pad_len = 0;
503
504 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
505
506 DBG("%s: closed ch %d\n", __func__, id);
507 return rc;
508}
509
510static void rx_timer_work_func(struct work_struct *work)
511{
512 struct sps_iovec iov;
513 struct list_head *node;
514 struct rx_pkt_info *info;
515
516 while (1) {
517 sps_get_iovec(bam_rx_pipe, &iov);
518 if (iov.addr == 0)
519 break;
520 mutex_lock(&bam_rx_pool_lock);
521 node = bam_rx_pool.next;
522 list_del(node);
523 mutex_unlock(&bam_rx_pool_lock);
524 info = container_of(node, struct rx_pkt_info, list_node);
525 handle_bam_mux_cmd(&info->work);
526 }
527
528 msleep(1);
529 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
530}
531
532static void bam_mux_tx_notify(struct sps_event_notify *notify)
533{
534 struct tx_pkt_info *pkt;
535
536 DBG("%s: event %d notified\n", __func__, notify->event_id);
537
538 switch (notify->event_id) {
539 case SPS_EVENT_EOT:
540 pkt = notify->data.transfer.user;
541 if (!pkt->is_cmd) {
542 dma_unmap_single(NULL, pkt->dma_address,
543 pkt->skb->len,
544 DMA_TO_DEVICE);
545 queue_work(bam_mux_tx_workqueue, &pkt->work);
546 } else {
547 dma_unmap_single(NULL, pkt->dma_address,
548 pkt->len,
549 DMA_TO_DEVICE);
550 kfree(pkt->skb);
551 kfree(pkt);
552 }
553 break;
554 default:
555 pr_err("%s: recieved unexpected event id %d\n", __func__,
556 notify->event_id);
557 }
558}
559
560#ifdef CONFIG_DEBUG_FS
561
562static int debug_tbl(char *buf, int max)
563{
564 int i = 0;
565 int j;
566
567 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
568 i += scnprintf(buf + i, max - i,
569 "ch%02d local open=%s remote open=%s\n",
570 j, bam_ch_is_local_open(j) ? "Y" : "N",
571 bam_ch_is_remote_open(j) ? "Y" : "N");
572 }
573
574 return i;
575}
576
577#define DEBUG_BUFMAX 4096
578static char debug_buffer[DEBUG_BUFMAX];
579
580static ssize_t debug_read(struct file *file, char __user *buf,
581 size_t count, loff_t *ppos)
582{
583 int (*fill)(char *buf, int max) = file->private_data;
584 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
585 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
586}
587
588static int debug_open(struct inode *inode, struct file *file)
589{
590 file->private_data = inode->i_private;
591 return 0;
592}
593
594
595static const struct file_operations debug_ops = {
596 .read = debug_read,
597 .open = debug_open,
598};
599
600static void debug_create(const char *name, mode_t mode,
601 struct dentry *dent,
602 int (*fill)(char *buf, int max))
603{
604 debugfs_create_file(name, mode, dent, fill, &debug_ops);
605}
606
607#endif
608
609static void bam_init(struct work_struct *work)
610{
611 u32 h;
612 dma_addr_t dma_addr;
613 int ret;
614 void *a2_virt_addr;
615 int i;
616
617 /* init BAM */
618 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
619 if (!a2_virt_addr) {
620 pr_err("%s: ioremap failed\n", __func__);
621 ret = -ENOMEM;
622 goto register_bam_failed;
623 }
624 a2_props.phys_addr = A2_PHYS_BASE;
625 a2_props.virt_addr = a2_virt_addr;
626 a2_props.virt_size = A2_PHYS_SIZE;
627 a2_props.irq = A2_BAM_IRQ;
628 a2_props.num_pipes = A2_NUM_PIPES;
629 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
630 /* need to free on tear down */
631 ret = sps_register_bam_device(&a2_props, &h);
632 if (ret < 0) {
633 pr_err("%s: register bam error %d\n", __func__, ret);
634 goto register_bam_failed;
635 }
636
637 bam_tx_pipe = sps_alloc_endpoint();
638 if (bam_tx_pipe == NULL) {
639 pr_err("%s: tx alloc endpoint failed\n", __func__);
640 ret = -ENOMEM;
641 goto register_bam_failed;
642 }
643 ret = sps_get_config(bam_tx_pipe, &tx_connection);
644 if (ret) {
645 pr_err("%s: tx get config failed %d\n", __func__, ret);
646 goto tx_get_config_failed;
647 }
648
649 tx_connection.source = SPS_DEV_HANDLE_MEM;
650 tx_connection.src_pipe_index = 0;
651 tx_connection.destination = h;
652 tx_connection.dest_pipe_index = 4;
653 tx_connection.mode = SPS_MODE_DEST;
654 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
655 tx_desc_mem_buf.size = 0x800; /* 2k */
656 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
657 &dma_addr, 0);
658 if (tx_desc_mem_buf.base == NULL) {
659 pr_err("%s: tx memory alloc failed\n", __func__);
660 ret = -ENOMEM;
661 goto tx_mem_failed;
662 }
663 tx_desc_mem_buf.phys_base = dma_addr;
664 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
665 tx_connection.desc = tx_desc_mem_buf;
666 tx_connection.event_thresh = 0x10;
667
668 ret = sps_connect(bam_tx_pipe, &tx_connection);
669 if (ret < 0) {
670 pr_err("%s: tx connect error %d\n", __func__, ret);
671 goto tx_connect_failed;
672 }
673
674 bam_rx_pipe = sps_alloc_endpoint();
675 if (bam_rx_pipe == NULL) {
676 pr_err("%s: rx alloc endpoint failed\n", __func__);
677 ret = -ENOMEM;
678 goto tx_connect_failed;
679 }
680 ret = sps_get_config(bam_rx_pipe, &rx_connection);
681 if (ret) {
682 pr_err("%s: rx get config failed %d\n", __func__, ret);
683 goto rx_get_config_failed;
684 }
685
686 rx_connection.source = h;
687 rx_connection.src_pipe_index = 5;
688 rx_connection.destination = SPS_DEV_HANDLE_MEM;
689 rx_connection.dest_pipe_index = 1;
690 rx_connection.mode = SPS_MODE_SRC;
691 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
692 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
693 rx_desc_mem_buf.size = 0x800; /* 2k */
694 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
695 &dma_addr, 0);
696 if (rx_desc_mem_buf.base == NULL) {
697 pr_err("%s: rx memory alloc failed\n", __func__);
698 ret = -ENOMEM;
699 goto rx_mem_failed;
700 }
701 rx_desc_mem_buf.phys_base = dma_addr;
702 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
703 rx_connection.desc = rx_desc_mem_buf;
704 rx_connection.event_thresh = 0x10;
705
706 ret = sps_connect(bam_rx_pipe, &rx_connection);
707 if (ret < 0) {
708 pr_err("%s: rx connect error %d\n", __func__, ret);
709 goto rx_connect_failed;
710 }
711
712 tx_register_event.options = SPS_O_EOT;
713 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
714 tx_register_event.xfer_done = NULL;
715 tx_register_event.callback = bam_mux_tx_notify;
716 tx_register_event.user = NULL;
717 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
718 if (ret < 0) {
719 pr_err("%s: tx register event error %d\n", __func__, ret);
720 goto rx_event_reg_failed;
721 }
722
723 bam_mux_initialized = 1;
724 for (i = 0; i < NUM_BUFFERS; ++i)
725 queue_rx();
726
727 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
728 return;
729
730rx_event_reg_failed:
731 sps_disconnect(bam_rx_pipe);
732rx_connect_failed:
733 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
734 rx_desc_mem_buf.phys_base);
735rx_mem_failed:
736 sps_disconnect(bam_tx_pipe);
737rx_get_config_failed:
738 sps_free_endpoint(bam_rx_pipe);
739tx_connect_failed:
740 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
741 tx_desc_mem_buf.phys_base);
742tx_get_config_failed:
743 sps_free_endpoint(bam_tx_pipe);
744tx_mem_failed:
745 sps_deregister_bam_device(h);
746register_bam_failed:
747 /*destroy_workqueue(bam_mux_workqueue);*/
748 /*return ret;*/
749 return;
750}
751static int bam_dmux_probe(struct platform_device *pdev)
752{
753 int rc;
754
755 DBG("%s probe called\n", __func__);
756 if (bam_mux_initialized)
757 return 0;
758
759 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
760 if (!bam_mux_rx_workqueue)
761 return -ENOMEM;
762
763 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
764 if (!bam_mux_tx_workqueue) {
765 destroy_workqueue(bam_mux_rx_workqueue);
766 return -ENOMEM;
767 }
768
Jeff Hugo7960abd2011-08-02 15:39:38 -0600769 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600771 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
772 "bam_dmux_ch_%d", rc);
773 /* bus 2, ie a2 stream 2 */
774 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
775 if (!bam_ch[rc].pdev) {
776 pr_err("%s: platform device alloc failed\n", __func__);
777 destroy_workqueue(bam_mux_rx_workqueue);
778 destroy_workqueue(bam_mux_tx_workqueue);
779 return -ENOMEM;
780 }
781 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782
783 /* switch over to A2 power status mechanism when avaliable */
784 INIT_DELAYED_WORK(&bam_init_work, bam_init);
785 schedule_delayed_work(&bam_init_work, msecs_to_jiffies(40000));
786
787 return 0;
788}
789
790static struct platform_driver bam_dmux_driver = {
791 .probe = bam_dmux_probe,
792 .driver = {
793 .name = "BAM_RMNT",
794 .owner = THIS_MODULE,
795 },
796};
797
798static int __init bam_dmux_init(void)
799{
800#ifdef CONFIG_DEBUG_FS
801 struct dentry *dent;
802
803 dent = debugfs_create_dir("bam_dmux", 0);
804 if (!IS_ERR(dent))
805 debug_create("tbl", 0444, dent, debug_tbl);
806#endif
807 return platform_driver_register(&bam_dmux_driver);
808}
809
810module_init(bam_dmux_init);
811MODULE_DESCRIPTION("MSM BAM DMUX");
812MODULE_LICENSE("GPL v2");