blob: 304a68714f0358d546a953a75fc82601552feb7e [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#include <mach/sps.h>
30#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060031#include <mach/msm_smsm.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#define BAM_CH_LOCAL_OPEN 0x1
34#define BAM_CH_REMOTE_OPEN 0x2
35
36#define BAM_MUX_HDR_MAGIC_NO 0x33fc
37
38#define BAM_MUX_HDR_CMD_DATA 0
39#define BAM_MUX_HDR_CMD_OPEN 1
40#define BAM_MUX_HDR_CMD_CLOSE 2
41
Jeff Hugo949080a2011-08-30 11:58:56 -060042#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
43#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
44#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
46
47static int msm_bam_dmux_debug_enable;
48module_param_named(debug_enable, msm_bam_dmux_debug_enable,
49 int, S_IRUGO | S_IWUSR | S_IWGRP);
50
51#if defined(DEBUG)
52static uint32_t bam_dmux_read_cnt;
53static uint32_t bam_dmux_write_cnt;
54static uint32_t bam_dmux_write_cpy_cnt;
55static uint32_t bam_dmux_write_cpy_bytes;
56
57#define DBG(x...) do { \
58 if (msm_bam_dmux_debug_enable) \
59 pr_debug(x); \
60 } while (0)
61
62#define DBG_INC_READ_CNT(x) do { \
63 bam_dmux_read_cnt += (x); \
64 if (msm_bam_dmux_debug_enable) \
65 pr_debug("%s: total read bytes %u\n", \
66 __func__, bam_dmux_read_cnt); \
67 } while (0)
68
69#define DBG_INC_WRITE_CNT(x) do { \
70 bam_dmux_write_cnt += (x); \
71 if (msm_bam_dmux_debug_enable) \
72 pr_debug("%s: total written bytes %u\n", \
73 __func__, bam_dmux_write_cnt); \
74 } while (0)
75
76#define DBG_INC_WRITE_CPY(x) do { \
77 bam_dmux_write_cpy_bytes += (x); \
78 bam_dmux_write_cpy_cnt++; \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
81 __func__, bam_dmux_write_cpy_cnt, \
82 bam_dmux_write_cpy_bytes); \
83 } while (0)
84#else
85#define DBG(x...) do { } while (0)
86#define DBG_INC_READ_CNT(x...) do { } while (0)
87#define DBG_INC_WRITE_CNT(x...) do { } while (0)
88#define DBG_INC_WRITE_CPY(x...) do { } while (0)
89#endif
90
91struct bam_ch_info {
92 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -060093 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 void *priv;
95 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -060096 struct platform_device *pdev;
97 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098};
99
100struct tx_pkt_info {
101 struct sk_buff *skb;
102 dma_addr_t dma_address;
103 char is_cmd;
104 uint32_t len;
105 struct work_struct work;
106};
107
108struct rx_pkt_info {
109 struct sk_buff *skb;
110 dma_addr_t dma_address;
111 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600112 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113};
114
115#define A2_NUM_PIPES 6
116#define A2_SUMMING_THRESHOLD 4096
117#define A2_DEFAULT_DESCRIPTORS 32
118#define A2_PHYS_BASE 0x124C2000
119#define A2_PHYS_SIZE 0x2000
120#define BUFFER_SIZE 2048
121#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600123static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124static struct sps_pipe *bam_tx_pipe;
125static struct sps_pipe *bam_rx_pipe;
126static struct sps_connect tx_connection;
127static struct sps_connect rx_connection;
128static struct sps_mem_buffer tx_desc_mem_buf;
129static struct sps_mem_buffer rx_desc_mem_buf;
130static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600131static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132
133static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
134static int bam_mux_initialized;
135
Jeff Hugo949080a2011-08-30 11:58:56 -0600136static int polling_mode;
137
138static LIST_HEAD(bam_rx_pool);
139static DEFINE_MUTEX(bam_rx_pool_lock);
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141struct bam_mux_hdr {
142 uint16_t magic_num;
143 uint8_t reserved;
144 uint8_t cmd;
145 uint8_t pad_len;
146 uint8_t ch_id;
147 uint16_t pkt_len;
148};
149
150static void bam_mux_write_done(struct work_struct *work);
151static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600152static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153
154static DEFINE_MUTEX(bam_mux_lock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600155static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156
157static struct workqueue_struct *bam_mux_rx_workqueue;
158static struct workqueue_struct *bam_mux_tx_workqueue;
159
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600160/* A2 power collaspe */
161#define UL_TIMEOUT_DELAY 1000 /* in ms */
162static void toggle_apps_ack(void);
163static void reconnect_to_bam(void);
164static void disconnect_to_bam(void);
165static void ul_wakeup(void);
166static void ul_timeout(struct work_struct *work);
167static void vote_dfab(void);
168static void unvote_dfab(void);
169
170static int bam_is_connected;
171static DEFINE_MUTEX(wakeup_lock);
172static struct completion ul_wakeup_ack_completion;
173static struct completion bam_connection_completion;
174static struct delayed_work ul_timeout_work;
175static int ul_packet_written;
176static struct clk *dfab_clk;
177static DEFINE_RWLOCK(ul_wakeup_lock);
178/* End A2 power collaspe */
179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define bam_ch_is_open(x) \
181 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
182
183#define bam_ch_is_local_open(x) \
184 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
185
186#define bam_ch_is_remote_open(x) \
187 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
188
189static void queue_rx(void)
190{
191 void *ptr;
192 struct rx_pkt_info *info;
193
194 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
195 if (!info)
196 return; /*need better way to handle this */
197
198 INIT_WORK(&info->work, handle_bam_mux_cmd);
199
200 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
201 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600202
203 mutex_lock(&bam_rx_pool_lock);
204 list_add_tail(&info->list_node, &bam_rx_pool);
205 mutex_unlock(&bam_rx_pool_lock);
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 /* need a way to handle error case */
208 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
209 DMA_FROM_DEVICE);
210 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600211 BUFFER_SIZE, info,
212 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213}
214
215static void bam_mux_process_data(struct sk_buff *rx_skb)
216{
217 unsigned long flags;
218 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600219 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220
221 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
222
223 rx_skb->data = (unsigned char *)(rx_hdr + 1);
224 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
225 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600226 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600228 event_data = (unsigned long)(rx_skb);
229
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600231 if (bam_ch[rx_hdr->ch_id].notify)
232 bam_ch[rx_hdr->ch_id].notify(
233 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
234 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 else
236 dev_kfree_skb_any(rx_skb);
237 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
238
239 queue_rx();
240}
241
242static void handle_bam_mux_cmd(struct work_struct *work)
243{
244 unsigned long flags;
245 struct bam_mux_hdr *rx_hdr;
246 struct rx_pkt_info *info;
247 struct sk_buff *rx_skb;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600248 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249
250 info = container_of(work, struct rx_pkt_info, work);
251 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600252 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 kfree(info);
254
255 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
256
257 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
258 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
259 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
260 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
261 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
262 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
263 " pad %d ch %d len %d\n", __func__,
264 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
265 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
266 dev_kfree_skb_any(rx_skb);
267 queue_rx();
268 return;
269 }
270 switch (rx_hdr->cmd) {
271 case BAM_MUX_HDR_CMD_DATA:
272 DBG_INC_READ_CNT(rx_hdr->pkt_len);
273 bam_mux_process_data(rx_skb);
274 break;
275 case BAM_MUX_HDR_CMD_OPEN:
276 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
277 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
278 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
279 dev_kfree_skb_any(rx_skb);
280 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600281 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
282 if (ret)
283 pr_err("%s: platform_device_add() error: %d\n",
284 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 break;
286 case BAM_MUX_HDR_CMD_CLOSE:
287 /* probably should drop pending write */
288 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
289 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
290 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
291 dev_kfree_skb_any(rx_skb);
292 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600293 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
294 bam_ch[rx_hdr->ch_id].pdev =
295 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
296 if (!bam_ch[rx_hdr->ch_id].pdev)
297 pr_err("%s: platform_device_alloc failed\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298 break;
299 default:
300 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
301 " pad %d ch %d len %d\n", __func__,
302 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
303 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
304 dev_kfree_skb_any(rx_skb);
305 queue_rx();
306 return;
307 }
308}
309
310static int bam_mux_write_cmd(void *data, uint32_t len)
311{
312 int rc;
313 struct tx_pkt_info *pkt;
314 dma_addr_t dma_address;
315
316 mutex_lock(&bam_mux_lock);
317 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
318 if (pkt == NULL) {
319 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
320 rc = -ENOMEM;
321 mutex_unlock(&bam_mux_lock);
322 return rc;
323 }
324
325 dma_address = dma_map_single(NULL, data, len,
326 DMA_TO_DEVICE);
327 if (!dma_address) {
328 pr_err("%s: dma_map_single() failed\n", __func__);
329 rc = -ENOMEM;
330 mutex_unlock(&bam_mux_lock);
331 return rc;
332 }
333 pkt->skb = (struct sk_buff *)(data);
334 pkt->len = len;
335 pkt->dma_address = dma_address;
336 pkt->is_cmd = 1;
337 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
338 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
339
340 mutex_unlock(&bam_mux_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600341 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 return rc;
343}
344
345static void bam_mux_write_done(struct work_struct *work)
346{
347 struct sk_buff *skb;
348 struct bam_mux_hdr *hdr;
349 struct tx_pkt_info *info;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600350 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352 info = container_of(work, struct tx_pkt_info, work);
353 skb = info->skb;
354 kfree(info);
355 hdr = (struct bam_mux_hdr *)skb->data;
356 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600357 event_data = (unsigned long)(skb);
358 if (bam_ch[hdr->ch_id].notify)
359 bam_ch[hdr->ch_id].notify(
360 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
361 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 else
363 dev_kfree_skb_any(skb);
364}
365
366int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
367{
368 int rc = 0;
369 struct bam_mux_hdr *hdr;
370 unsigned long flags;
371 struct sk_buff *new_skb = NULL;
372 dma_addr_t dma_address;
373 struct tx_pkt_info *pkt;
374
375 if (id >= BAM_DMUX_NUM_CHANNELS)
376 return -EINVAL;
377 if (!skb)
378 return -EINVAL;
379 if (!bam_mux_initialized)
380 return -ENODEV;
381
382 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
383 spin_lock_irqsave(&bam_ch[id].lock, flags);
384 if (!bam_ch_is_open(id)) {
385 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
386 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
387 return -ENODEV;
388 }
389 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
390
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600391 read_lock(&ul_wakeup_lock);
392 if (!bam_is_connected)
393 ul_wakeup();
394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 /* if skb do not have any tailroom for padding,
396 copy the skb into a new expanded skb */
397 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
398 /* revisit, probably dev_alloc_skb and memcpy is effecient */
399 new_skb = skb_copy_expand(skb, skb_headroom(skb),
400 4 - (skb->len & 0x3), GFP_ATOMIC);
401 if (new_skb == NULL) {
402 pr_err("%s: cannot allocate skb\n", __func__);
403 return -ENOMEM;
404 }
405 dev_kfree_skb_any(skb);
406 skb = new_skb;
407 DBG_INC_WRITE_CPY(skb->len);
408 }
409
410 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
411
412 /* caller should allocate for hdr and padding
413 hdr is fine, padding is tricky */
414 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
415 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
416 hdr->reserved = 0;
417 hdr->ch_id = id;
418 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
419 if (skb->len & 0x3)
420 skb_put(skb, 4 - (skb->len & 0x3));
421
422 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
423
424 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
425 __func__, skb->data, skb->tail, skb->len,
426 hdr->pkt_len, hdr->pad_len);
427
428 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
429 if (pkt == NULL) {
430 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
431 if (new_skb)
432 dev_kfree_skb_any(new_skb);
433 return -ENOMEM;
434 }
435
436 dma_address = dma_map_single(NULL, skb->data, skb->len,
437 DMA_TO_DEVICE);
438 if (!dma_address) {
439 pr_err("%s: dma_map_single() failed\n", __func__);
440 if (new_skb)
441 dev_kfree_skb_any(new_skb);
442 kfree(pkt);
443 return -ENOMEM;
444 }
445 pkt->skb = skb;
446 pkt->dma_address = dma_address;
447 pkt->is_cmd = 0;
448 INIT_WORK(&pkt->work, bam_mux_write_done);
449 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
450 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600451 ul_packet_written = 1;
452 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 return rc;
454}
455
456int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600457 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458{
459 struct bam_mux_hdr *hdr;
460 unsigned long flags;
461 int rc = 0;
462
463 DBG("%s: opening ch %d\n", __func__, id);
464 if (!bam_mux_initialized)
465 return -ENODEV;
466 if (id >= BAM_DMUX_NUM_CHANNELS)
467 return -EINVAL;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600468 if (notify == NULL)
469 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470
471 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
472 if (hdr == NULL) {
473 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
474 return -ENOMEM;
475 }
476 spin_lock_irqsave(&bam_ch[id].lock, flags);
477 if (bam_ch_is_open(id)) {
478 DBG("%s: Already opened %d\n", __func__, id);
479 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
480 kfree(hdr);
481 goto open_done;
482 }
483 if (!bam_ch_is_remote_open(id)) {
484 DBG("%s: Remote not open; ch: %d\n", __func__, id);
485 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
486 kfree(hdr);
487 rc = -ENODEV;
488 goto open_done;
489 }
490
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600491 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 bam_ch[id].priv = priv;
493 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
494 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
495
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600496 read_lock(&ul_wakeup_lock);
497 if (!bam_is_connected)
498 ul_wakeup();
499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
501 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
502 hdr->reserved = 0;
503 hdr->ch_id = id;
504 hdr->pkt_len = 0;
505 hdr->pad_len = 0;
506
507 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600508 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509
510open_done:
511 DBG("%s: opened ch %d\n", __func__, id);
512 return rc;
513}
514
515int msm_bam_dmux_close(uint32_t id)
516{
517 struct bam_mux_hdr *hdr;
518 unsigned long flags;
519 int rc;
520
521 if (id >= BAM_DMUX_NUM_CHANNELS)
522 return -EINVAL;
523 DBG("%s: closing ch %d\n", __func__, id);
524 if (!bam_mux_initialized)
525 return -ENODEV;
526 spin_lock_irqsave(&bam_ch[id].lock, flags);
527
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600528 read_lock(&ul_wakeup_lock);
529 if (!bam_is_connected)
530 ul_wakeup();
531
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600532 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 bam_ch[id].priv = NULL;
534 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
535 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
536
537 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
538 if (hdr == NULL) {
539 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
540 return -ENOMEM;
541 }
542 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
543 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
544 hdr->reserved = 0;
545 hdr->ch_id = id;
546 hdr->pkt_len = 0;
547 hdr->pad_len = 0;
548
549 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600550 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551
552 DBG("%s: closed ch %d\n", __func__, id);
553 return rc;
554}
555
Jeff Hugo949080a2011-08-30 11:58:56 -0600556static void rx_timer_work_func(struct work_struct *work)
557{
558 struct sps_iovec iov;
559 struct list_head *node;
560 struct rx_pkt_info *info;
561 int inactive_cycles = 0;
562 int ret;
563 struct sps_connect cur_rx_conn;
564
565 while (1) { /* timer loop */
566 ++inactive_cycles;
567 while (1) { /* deplete queue loop */
568 sps_get_iovec(bam_rx_pipe, &iov);
569 if (iov.addr == 0)
570 break;
571 inactive_cycles = 0;
572 mutex_lock(&bam_rx_pool_lock);
573 node = bam_rx_pool.next;
574 list_del(node);
575 mutex_unlock(&bam_rx_pool_lock);
576 info = container_of(node, struct rx_pkt_info,
577 list_node);
578 handle_bam_mux_cmd(&info->work);
579 }
580
581 if (inactive_cycles == POLLING_INACTIVITY) {
582 /*
583 * attempt to enable interrupts in this pipe
584 * if enabling interrupts fails, continue polling
585 */
586 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
587 if (ret) {
588 pr_err("%s: sps_get_config() failed, interrupts"
589 " not enabled\n", __func__);
590 queue_work(bam_mux_rx_workqueue,
591 &rx_timer_work);
592 return;
593 } else {
594 rx_register_event.options = SPS_O_EOT;
595 /* should check return value */
596 sps_register_event(bam_rx_pipe,
597 &rx_register_event);
598 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
599 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
600 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
601 if (ret) {
602 pr_err("%s: sps_set_config() failed, "
603 "interrupts not enabled\n",
604 __func__);
605 queue_work(bam_mux_rx_workqueue,
606 &rx_timer_work);
607 return;
608 }
609 polling_mode = 0;
610 }
611 /* handle race condition - missed packet? */
612 sps_get_iovec(bam_rx_pipe, &iov);
613 if (iov.addr == 0)
614 return;
615 inactive_cycles = 0;
616 mutex_lock(&bam_rx_pool_lock);
617 node = bam_rx_pool.next;
618 list_del(node);
619 mutex_unlock(&bam_rx_pool_lock);
620 info = container_of(node, struct rx_pkt_info,
621 list_node);
622 handle_bam_mux_cmd(&info->work);
623 return;
624 }
625
626 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
627 }
628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630static void bam_mux_tx_notify(struct sps_event_notify *notify)
631{
632 struct tx_pkt_info *pkt;
633
634 DBG("%s: event %d notified\n", __func__, notify->event_id);
635
636 switch (notify->event_id) {
637 case SPS_EVENT_EOT:
638 pkt = notify->data.transfer.user;
639 if (!pkt->is_cmd) {
640 dma_unmap_single(NULL, pkt->dma_address,
641 pkt->skb->len,
642 DMA_TO_DEVICE);
643 queue_work(bam_mux_tx_workqueue, &pkt->work);
644 } else {
645 dma_unmap_single(NULL, pkt->dma_address,
646 pkt->len,
647 DMA_TO_DEVICE);
648 kfree(pkt->skb);
649 kfree(pkt);
650 }
651 break;
652 default:
653 pr_err("%s: recieved unexpected event id %d\n", __func__,
654 notify->event_id);
655 }
656}
657
Jeff Hugo33dbc002011-08-25 15:52:53 -0600658static void bam_mux_rx_notify(struct sps_event_notify *notify)
659{
Jeff Hugo949080a2011-08-30 11:58:56 -0600660 int ret;
661 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600662
663 DBG("%s: event %d notified\n", __func__, notify->event_id);
664
Jeff Hugo33dbc002011-08-25 15:52:53 -0600665 switch (notify->event_id) {
666 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -0600667 /* attempt to disable interrupts in this pipe */
668 if (!polling_mode) {
669 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
670 if (ret) {
671 pr_err("%s: sps_get_config() failed, interrupts"
672 " not disabled\n", __func__);
673 break;
674 }
675 rx_register_event.options = 0;
676 ret = sps_register_event(bam_rx_pipe,
677 &rx_register_event);
678 if (ret) {
679 pr_err("%s: sps_register_event ret = %d\n",
680 __func__, ret);
681 break;
682 }
683 cur_rx_conn.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
684 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
685 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
686 if (ret) {
687 pr_err("%s: sps_set_config() failed, interrupts"
688 " not disabled\n", __func__);
689 break;
690 }
691 polling_mode = 1;
692 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
693 }
Jeff Hugo33dbc002011-08-25 15:52:53 -0600694 break;
695 default:
696 pr_err("%s: recieved unexpected event id %d\n", __func__,
697 notify->event_id);
698 }
699}
700
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701#ifdef CONFIG_DEBUG_FS
702
703static int debug_tbl(char *buf, int max)
704{
705 int i = 0;
706 int j;
707
708 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
709 i += scnprintf(buf + i, max - i,
710 "ch%02d local open=%s remote open=%s\n",
711 j, bam_ch_is_local_open(j) ? "Y" : "N",
712 bam_ch_is_remote_open(j) ? "Y" : "N");
713 }
714
715 return i;
716}
717
718#define DEBUG_BUFMAX 4096
719static char debug_buffer[DEBUG_BUFMAX];
720
721static ssize_t debug_read(struct file *file, char __user *buf,
722 size_t count, loff_t *ppos)
723{
724 int (*fill)(char *buf, int max) = file->private_data;
725 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
726 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
727}
728
729static int debug_open(struct inode *inode, struct file *file)
730{
731 file->private_data = inode->i_private;
732 return 0;
733}
734
735
736static const struct file_operations debug_ops = {
737 .read = debug_read,
738 .open = debug_open,
739};
740
741static void debug_create(const char *name, mode_t mode,
742 struct dentry *dent,
743 int (*fill)(char *buf, int max))
744{
745 debugfs_create_file(name, mode, dent, fill, &debug_ops);
746}
747
748#endif
749
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600750static void ul_timeout(struct work_struct *work)
751{
752 write_lock(&ul_wakeup_lock);
753 if (ul_packet_written) {
754 ul_packet_written = 0;
755 schedule_delayed_work(&ul_timeout_work,
756 msecs_to_jiffies(UL_TIMEOUT_DELAY));
757 } else {
758 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
759 bam_is_connected = 0;
760 }
761 write_unlock(&ul_wakeup_lock);
762}
763static void ul_wakeup(void)
764{
765 mutex_lock(&wakeup_lock);
766 if (bam_is_connected) { /* bam got connected before lock grabbed */
767 mutex_unlock(&wakeup_lock);
768 return;
769 }
770 INIT_COMPLETION(ul_wakeup_ack_completion);
771 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
772 wait_for_completion_interruptible_timeout(&ul_wakeup_ack_completion,
773 HZ);
774 wait_for_completion_interruptible_timeout(&bam_connection_completion,
775 HZ);
776
777 bam_is_connected = 1;
778 schedule_delayed_work(&ul_timeout_work,
779 msecs_to_jiffies(UL_TIMEOUT_DELAY));
780 mutex_unlock(&wakeup_lock);
781}
782
783static void reconnect_to_bam(void)
784{
785 int i;
786
787 vote_dfab();
788 i = sps_device_reset(a2_device_handle);
789 if (i)
790 pr_err("%s: device reset failed rc = %d\n", __func__, i);
791 i = sps_connect(bam_tx_pipe, &tx_connection);
792 if (i)
793 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
794 i = sps_connect(bam_rx_pipe, &rx_connection);
795 if (i)
796 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
797 i = sps_register_event(bam_tx_pipe, &tx_register_event);
798 if (i)
799 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
800 i = sps_register_event(bam_rx_pipe, &rx_register_event);
801 if (i)
802 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
803 for (i = 0; i < NUM_BUFFERS; ++i)
804 queue_rx();
805 toggle_apps_ack();
806 complete_all(&bam_connection_completion);
807}
808
809static void disconnect_to_bam(void)
810{
811 struct list_head *node;
812 struct rx_pkt_info *info;
813
814 INIT_COMPLETION(bam_connection_completion);
815 sps_disconnect(bam_tx_pipe);
816 sps_disconnect(bam_rx_pipe);
817 unvote_dfab();
818 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
819 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
820 while (!list_empty(&bam_rx_pool)) {
821 node = bam_rx_pool.next;
822 list_del(node);
823 info = container_of(node, struct rx_pkt_info, list_node);
824 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
825 DMA_FROM_DEVICE);
826 dev_kfree_skb_any(info->skb);
827 kfree(info);
828 }
829}
830
831static void vote_dfab(void)
832{
833 int rc;
834
835 rc = clk_enable(dfab_clk);
836 if (rc)
837 pr_err("bam_dmux vote for dfab failed rc = %d\n", rc);
838}
839
840static void unvote_dfab(void)
841{
842 clk_disable(dfab_clk);
843}
844
Jeff Hugoade1f842011-08-03 15:53:59 -0600845static void bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846{
847 u32 h;
848 dma_addr_t dma_addr;
849 int ret;
850 void *a2_virt_addr;
851 int i;
852
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600853 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 /* init BAM */
855 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
856 if (!a2_virt_addr) {
857 pr_err("%s: ioremap failed\n", __func__);
858 ret = -ENOMEM;
859 goto register_bam_failed;
860 }
861 a2_props.phys_addr = A2_PHYS_BASE;
862 a2_props.virt_addr = a2_virt_addr;
863 a2_props.virt_size = A2_PHYS_SIZE;
864 a2_props.irq = A2_BAM_IRQ;
865 a2_props.num_pipes = A2_NUM_PIPES;
866 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
867 /* need to free on tear down */
868 ret = sps_register_bam_device(&a2_props, &h);
869 if (ret < 0) {
870 pr_err("%s: register bam error %d\n", __func__, ret);
871 goto register_bam_failed;
872 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600873 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874
875 bam_tx_pipe = sps_alloc_endpoint();
876 if (bam_tx_pipe == NULL) {
877 pr_err("%s: tx alloc endpoint failed\n", __func__);
878 ret = -ENOMEM;
879 goto register_bam_failed;
880 }
881 ret = sps_get_config(bam_tx_pipe, &tx_connection);
882 if (ret) {
883 pr_err("%s: tx get config failed %d\n", __func__, ret);
884 goto tx_get_config_failed;
885 }
886
887 tx_connection.source = SPS_DEV_HANDLE_MEM;
888 tx_connection.src_pipe_index = 0;
889 tx_connection.destination = h;
890 tx_connection.dest_pipe_index = 4;
891 tx_connection.mode = SPS_MODE_DEST;
892 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
893 tx_desc_mem_buf.size = 0x800; /* 2k */
894 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
895 &dma_addr, 0);
896 if (tx_desc_mem_buf.base == NULL) {
897 pr_err("%s: tx memory alloc failed\n", __func__);
898 ret = -ENOMEM;
899 goto tx_mem_failed;
900 }
901 tx_desc_mem_buf.phys_base = dma_addr;
902 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
903 tx_connection.desc = tx_desc_mem_buf;
904 tx_connection.event_thresh = 0x10;
905
906 ret = sps_connect(bam_tx_pipe, &tx_connection);
907 if (ret < 0) {
908 pr_err("%s: tx connect error %d\n", __func__, ret);
909 goto tx_connect_failed;
910 }
911
912 bam_rx_pipe = sps_alloc_endpoint();
913 if (bam_rx_pipe == NULL) {
914 pr_err("%s: rx alloc endpoint failed\n", __func__);
915 ret = -ENOMEM;
916 goto tx_connect_failed;
917 }
918 ret = sps_get_config(bam_rx_pipe, &rx_connection);
919 if (ret) {
920 pr_err("%s: rx get config failed %d\n", __func__, ret);
921 goto rx_get_config_failed;
922 }
923
924 rx_connection.source = h;
925 rx_connection.src_pipe_index = 5;
926 rx_connection.destination = SPS_DEV_HANDLE_MEM;
927 rx_connection.dest_pipe_index = 1;
928 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -0600929 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
930 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 rx_desc_mem_buf.size = 0x800; /* 2k */
932 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
933 &dma_addr, 0);
934 if (rx_desc_mem_buf.base == NULL) {
935 pr_err("%s: rx memory alloc failed\n", __func__);
936 ret = -ENOMEM;
937 goto rx_mem_failed;
938 }
939 rx_desc_mem_buf.phys_base = dma_addr;
940 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
941 rx_connection.desc = rx_desc_mem_buf;
942 rx_connection.event_thresh = 0x10;
943
944 ret = sps_connect(bam_rx_pipe, &rx_connection);
945 if (ret < 0) {
946 pr_err("%s: rx connect error %d\n", __func__, ret);
947 goto rx_connect_failed;
948 }
949
950 tx_register_event.options = SPS_O_EOT;
951 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
952 tx_register_event.xfer_done = NULL;
953 tx_register_event.callback = bam_mux_tx_notify;
954 tx_register_event.user = NULL;
955 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
956 if (ret < 0) {
957 pr_err("%s: tx register event error %d\n", __func__, ret);
958 goto rx_event_reg_failed;
959 }
960
Jeff Hugo33dbc002011-08-25 15:52:53 -0600961 rx_register_event.options = SPS_O_EOT;
962 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
963 rx_register_event.xfer_done = NULL;
964 rx_register_event.callback = bam_mux_rx_notify;
965 rx_register_event.user = NULL;
966 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
967 if (ret < 0) {
968 pr_err("%s: tx register event error %d\n", __func__, ret);
969 goto rx_event_reg_failed;
970 }
971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972 bam_mux_initialized = 1;
973 for (i = 0; i < NUM_BUFFERS; ++i)
974 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600975 toggle_apps_ack();
976 complete_all(&bam_connection_completion);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 return;
978
979rx_event_reg_failed:
980 sps_disconnect(bam_rx_pipe);
981rx_connect_failed:
982 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
983 rx_desc_mem_buf.phys_base);
984rx_mem_failed:
985 sps_disconnect(bam_tx_pipe);
986rx_get_config_failed:
987 sps_free_endpoint(bam_rx_pipe);
988tx_connect_failed:
989 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
990 tx_desc_mem_buf.phys_base);
991tx_get_config_failed:
992 sps_free_endpoint(bam_tx_pipe);
993tx_mem_failed:
994 sps_deregister_bam_device(h);
995register_bam_failed:
996 /*destroy_workqueue(bam_mux_workqueue);*/
997 /*return ret;*/
998 return;
999}
Jeff Hugoade1f842011-08-03 15:53:59 -06001000
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001001static void toggle_apps_ack(void)
1002{
1003 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
1004 smsm_change_state(SMSM_APPS_STATE,
1005 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
1006 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
1007 clear_bit = ~clear_bit;
1008}
1009
Jeff Hugoade1f842011-08-03 15:53:59 -06001010static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
1011{
1012 DBG("%s: smsm activity\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001013 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL)
1014 reconnect_to_bam();
1015 else if (bam_mux_initialized && !(new_state & SMSM_A2_POWER_CONTROL))
1016 disconnect_to_bam();
Jeff Hugoade1f842011-08-03 15:53:59 -06001017 else if (new_state & SMSM_A2_POWER_CONTROL)
1018 bam_init();
1019 else
1020 pr_err("%s: unsupported state change\n", __func__);
1021
1022}
1023
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001024static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
1025 uint32_t new_state)
1026{
1027 complete_all(&ul_wakeup_ack_completion);
1028}
1029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030static int bam_dmux_probe(struct platform_device *pdev)
1031{
1032 int rc;
1033
1034 DBG("%s probe called\n", __func__);
1035 if (bam_mux_initialized)
1036 return 0;
1037
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001038 dfab_clk = clk_get(&pdev->dev, "dfab_clk");
1039 if (IS_ERR(dfab_clk)) {
1040 pr_err("%s: did not get dfab clock\n", __func__);
1041 return -EFAULT;
1042 }
1043
1044 rc = clk_set_rate(dfab_clk, 64000000);
1045 if (rc)
1046 pr_err("%s: unable to set dfab clock rate\n", __func__);
1047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
1049 if (!bam_mux_rx_workqueue)
1050 return -ENOMEM;
1051
1052 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
1053 if (!bam_mux_tx_workqueue) {
1054 destroy_workqueue(bam_mux_rx_workqueue);
1055 return -ENOMEM;
1056 }
1057
Jeff Hugo7960abd2011-08-02 15:39:38 -06001058 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06001060 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
1061 "bam_dmux_ch_%d", rc);
1062 /* bus 2, ie a2 stream 2 */
1063 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
1064 if (!bam_ch[rc].pdev) {
1065 pr_err("%s: platform device alloc failed\n", __func__);
1066 destroy_workqueue(bam_mux_rx_workqueue);
1067 destroy_workqueue(bam_mux_tx_workqueue);
1068 return -ENOMEM;
1069 }
1070 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001072 init_completion(&ul_wakeup_ack_completion);
1073 init_completion(&bam_connection_completion);
1074 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
1075
Jeff Hugoade1f842011-08-03 15:53:59 -06001076 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
1077 bam_dmux_smsm_cb, NULL);
1078
1079 if (rc) {
1080 destroy_workqueue(bam_mux_rx_workqueue);
1081 destroy_workqueue(bam_mux_tx_workqueue);
1082 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
1083 return -ENOMEM;
1084 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001086 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
1087 bam_dmux_smsm_ack_cb, NULL);
1088
1089 if (rc) {
1090 destroy_workqueue(bam_mux_rx_workqueue);
1091 destroy_workqueue(bam_mux_tx_workqueue);
1092 smsm_state_cb_deregister(SMSM_MODEM_STATE,
1093 SMSM_A2_POWER_CONTROL,
1094 bam_dmux_smsm_cb, NULL);
1095 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
1096 rc);
1097 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
1098 platform_device_put(bam_ch[rc].pdev);
1099 return -ENOMEM;
1100 }
1101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001102 return 0;
1103}
1104
1105static struct platform_driver bam_dmux_driver = {
1106 .probe = bam_dmux_probe,
1107 .driver = {
1108 .name = "BAM_RMNT",
1109 .owner = THIS_MODULE,
1110 },
1111};
1112
1113static int __init bam_dmux_init(void)
1114{
1115#ifdef CONFIG_DEBUG_FS
1116 struct dentry *dent;
1117
1118 dent = debugfs_create_dir("bam_dmux", 0);
1119 if (!IS_ERR(dent))
1120 debug_create("tbl", 0444, dent, debug_tbl);
1121#endif
1122 return platform_driver_register(&bam_dmux_driver);
1123}
1124
Jeff Hugoade1f842011-08-03 15:53:59 -06001125late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126MODULE_DESCRIPTION("MSM BAM DMUX");
1127MODULE_LICENSE("GPL v2");