blob: 59503e150a521a41d4c619b44a2900adba257888 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#include <mach/sps.h>
30#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060031#include <mach/msm_smsm.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#define BAM_CH_LOCAL_OPEN 0x1
34#define BAM_CH_REMOTE_OPEN 0x2
35
36#define BAM_MUX_HDR_MAGIC_NO 0x33fc
37
38#define BAM_MUX_HDR_CMD_DATA 0
39#define BAM_MUX_HDR_CMD_OPEN 1
40#define BAM_MUX_HDR_CMD_CLOSE 2
41
Jeff Hugo949080a2011-08-30 11:58:56 -060042#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
43#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
44#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
46
47static int msm_bam_dmux_debug_enable;
48module_param_named(debug_enable, msm_bam_dmux_debug_enable,
49 int, S_IRUGO | S_IWUSR | S_IWGRP);
50
51#if defined(DEBUG)
52static uint32_t bam_dmux_read_cnt;
53static uint32_t bam_dmux_write_cnt;
54static uint32_t bam_dmux_write_cpy_cnt;
55static uint32_t bam_dmux_write_cpy_bytes;
56
57#define DBG(x...) do { \
58 if (msm_bam_dmux_debug_enable) \
59 pr_debug(x); \
60 } while (0)
61
62#define DBG_INC_READ_CNT(x) do { \
63 bam_dmux_read_cnt += (x); \
64 if (msm_bam_dmux_debug_enable) \
65 pr_debug("%s: total read bytes %u\n", \
66 __func__, bam_dmux_read_cnt); \
67 } while (0)
68
69#define DBG_INC_WRITE_CNT(x) do { \
70 bam_dmux_write_cnt += (x); \
71 if (msm_bam_dmux_debug_enable) \
72 pr_debug("%s: total written bytes %u\n", \
73 __func__, bam_dmux_write_cnt); \
74 } while (0)
75
76#define DBG_INC_WRITE_CPY(x) do { \
77 bam_dmux_write_cpy_bytes += (x); \
78 bam_dmux_write_cpy_cnt++; \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
81 __func__, bam_dmux_write_cpy_cnt, \
82 bam_dmux_write_cpy_bytes); \
83 } while (0)
84#else
85#define DBG(x...) do { } while (0)
86#define DBG_INC_READ_CNT(x...) do { } while (0)
87#define DBG_INC_WRITE_CNT(x...) do { } while (0)
88#define DBG_INC_WRITE_CPY(x...) do { } while (0)
89#endif
90
91struct bam_ch_info {
92 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -060093 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 void *priv;
95 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -060096 struct platform_device *pdev;
97 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098};
99
100struct tx_pkt_info {
101 struct sk_buff *skb;
102 dma_addr_t dma_address;
103 char is_cmd;
104 uint32_t len;
105 struct work_struct work;
106};
107
108struct rx_pkt_info {
109 struct sk_buff *skb;
110 dma_addr_t dma_address;
111 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600112 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113};
114
115#define A2_NUM_PIPES 6
116#define A2_SUMMING_THRESHOLD 4096
117#define A2_DEFAULT_DESCRIPTORS 32
118#define A2_PHYS_BASE 0x124C2000
119#define A2_PHYS_SIZE 0x2000
120#define BUFFER_SIZE 2048
121#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600123static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124static struct sps_pipe *bam_tx_pipe;
125static struct sps_pipe *bam_rx_pipe;
126static struct sps_connect tx_connection;
127static struct sps_connect rx_connection;
128static struct sps_mem_buffer tx_desc_mem_buf;
129static struct sps_mem_buffer rx_desc_mem_buf;
130static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600131static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132
133static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
134static int bam_mux_initialized;
135
Jeff Hugo949080a2011-08-30 11:58:56 -0600136static int polling_mode;
137
138static LIST_HEAD(bam_rx_pool);
139static DEFINE_MUTEX(bam_rx_pool_lock);
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141struct bam_mux_hdr {
142 uint16_t magic_num;
143 uint8_t reserved;
144 uint8_t cmd;
145 uint8_t pad_len;
146 uint8_t ch_id;
147 uint16_t pkt_len;
148};
149
150static void bam_mux_write_done(struct work_struct *work);
151static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600152static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153
154static DEFINE_MUTEX(bam_mux_lock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600155static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156
157static struct workqueue_struct *bam_mux_rx_workqueue;
158static struct workqueue_struct *bam_mux_tx_workqueue;
159
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600160/* A2 power collaspe */
161#define UL_TIMEOUT_DELAY 1000 /* in ms */
162static void toggle_apps_ack(void);
163static void reconnect_to_bam(void);
164static void disconnect_to_bam(void);
165static void ul_wakeup(void);
166static void ul_timeout(struct work_struct *work);
167static void vote_dfab(void);
168static void unvote_dfab(void);
169
170static int bam_is_connected;
171static DEFINE_MUTEX(wakeup_lock);
172static struct completion ul_wakeup_ack_completion;
173static struct completion bam_connection_completion;
174static struct delayed_work ul_timeout_work;
175static int ul_packet_written;
176static struct clk *dfab_clk;
177static DEFINE_RWLOCK(ul_wakeup_lock);
178/* End A2 power collaspe */
179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define bam_ch_is_open(x) \
181 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
182
183#define bam_ch_is_local_open(x) \
184 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
185
186#define bam_ch_is_remote_open(x) \
187 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
188
189static void queue_rx(void)
190{
191 void *ptr;
192 struct rx_pkt_info *info;
193
194 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
195 if (!info)
196 return; /*need better way to handle this */
197
198 INIT_WORK(&info->work, handle_bam_mux_cmd);
199
200 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
201 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600202
203 mutex_lock(&bam_rx_pool_lock);
204 list_add_tail(&info->list_node, &bam_rx_pool);
205 mutex_unlock(&bam_rx_pool_lock);
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 /* need a way to handle error case */
208 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
209 DMA_FROM_DEVICE);
210 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600211 BUFFER_SIZE, info,
212 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213}
214
215static void bam_mux_process_data(struct sk_buff *rx_skb)
216{
217 unsigned long flags;
218 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600219 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220
221 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
222
223 rx_skb->data = (unsigned char *)(rx_hdr + 1);
224 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
225 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600226 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600228 event_data = (unsigned long)(rx_skb);
229
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600231 if (bam_ch[rx_hdr->ch_id].notify)
232 bam_ch[rx_hdr->ch_id].notify(
233 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
234 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 else
236 dev_kfree_skb_any(rx_skb);
237 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
238
239 queue_rx();
240}
241
242static void handle_bam_mux_cmd(struct work_struct *work)
243{
244 unsigned long flags;
245 struct bam_mux_hdr *rx_hdr;
246 struct rx_pkt_info *info;
247 struct sk_buff *rx_skb;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600248 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249
250 info = container_of(work, struct rx_pkt_info, work);
251 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600252 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 kfree(info);
254
255 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
256
257 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
258 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
259 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
260 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
261 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
262 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
263 " pad %d ch %d len %d\n", __func__,
264 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
265 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
266 dev_kfree_skb_any(rx_skb);
267 queue_rx();
268 return;
269 }
270 switch (rx_hdr->cmd) {
271 case BAM_MUX_HDR_CMD_DATA:
272 DBG_INC_READ_CNT(rx_hdr->pkt_len);
273 bam_mux_process_data(rx_skb);
274 break;
275 case BAM_MUX_HDR_CMD_OPEN:
276 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
277 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
278 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
279 dev_kfree_skb_any(rx_skb);
280 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600281 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
282 if (ret)
283 pr_err("%s: platform_device_add() error: %d\n",
284 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 break;
286 case BAM_MUX_HDR_CMD_CLOSE:
287 /* probably should drop pending write */
288 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
289 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
290 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
291 dev_kfree_skb_any(rx_skb);
292 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600293 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
294 bam_ch[rx_hdr->ch_id].pdev =
295 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
296 if (!bam_ch[rx_hdr->ch_id].pdev)
297 pr_err("%s: platform_device_alloc failed\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298 break;
299 default:
300 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
301 " pad %d ch %d len %d\n", __func__,
302 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
303 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
304 dev_kfree_skb_any(rx_skb);
305 queue_rx();
306 return;
307 }
308}
309
310static int bam_mux_write_cmd(void *data, uint32_t len)
311{
312 int rc;
313 struct tx_pkt_info *pkt;
314 dma_addr_t dma_address;
315
316 mutex_lock(&bam_mux_lock);
317 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
318 if (pkt == NULL) {
319 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
320 rc = -ENOMEM;
321 mutex_unlock(&bam_mux_lock);
322 return rc;
323 }
324
325 dma_address = dma_map_single(NULL, data, len,
326 DMA_TO_DEVICE);
327 if (!dma_address) {
328 pr_err("%s: dma_map_single() failed\n", __func__);
329 rc = -ENOMEM;
330 mutex_unlock(&bam_mux_lock);
331 return rc;
332 }
333 pkt->skb = (struct sk_buff *)(data);
334 pkt->len = len;
335 pkt->dma_address = dma_address;
336 pkt->is_cmd = 1;
337 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
338 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
339
340 mutex_unlock(&bam_mux_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600341 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 return rc;
343}
344
345static void bam_mux_write_done(struct work_struct *work)
346{
347 struct sk_buff *skb;
348 struct bam_mux_hdr *hdr;
349 struct tx_pkt_info *info;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600350 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352 info = container_of(work, struct tx_pkt_info, work);
353 skb = info->skb;
354 kfree(info);
355 hdr = (struct bam_mux_hdr *)skb->data;
356 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600357 event_data = (unsigned long)(skb);
358 if (bam_ch[hdr->ch_id].notify)
359 bam_ch[hdr->ch_id].notify(
360 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
361 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 else
363 dev_kfree_skb_any(skb);
364}
365
366int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
367{
368 int rc = 0;
369 struct bam_mux_hdr *hdr;
370 unsigned long flags;
371 struct sk_buff *new_skb = NULL;
372 dma_addr_t dma_address;
373 struct tx_pkt_info *pkt;
374
375 if (id >= BAM_DMUX_NUM_CHANNELS)
376 return -EINVAL;
377 if (!skb)
378 return -EINVAL;
379 if (!bam_mux_initialized)
380 return -ENODEV;
381
382 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
383 spin_lock_irqsave(&bam_ch[id].lock, flags);
384 if (!bam_ch_is_open(id)) {
385 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
386 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
387 return -ENODEV;
388 }
389 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
390
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600391 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600392 if (!bam_is_connected) {
393 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600394 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600395 read_lock(&ul_wakeup_lock);
396 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 /* if skb do not have any tailroom for padding,
399 copy the skb into a new expanded skb */
400 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
401 /* revisit, probably dev_alloc_skb and memcpy is effecient */
402 new_skb = skb_copy_expand(skb, skb_headroom(skb),
403 4 - (skb->len & 0x3), GFP_ATOMIC);
404 if (new_skb == NULL) {
405 pr_err("%s: cannot allocate skb\n", __func__);
406 return -ENOMEM;
407 }
408 dev_kfree_skb_any(skb);
409 skb = new_skb;
410 DBG_INC_WRITE_CPY(skb->len);
411 }
412
413 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
414
415 /* caller should allocate for hdr and padding
416 hdr is fine, padding is tricky */
417 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
418 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
419 hdr->reserved = 0;
420 hdr->ch_id = id;
421 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
422 if (skb->len & 0x3)
423 skb_put(skb, 4 - (skb->len & 0x3));
424
425 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
426
427 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
428 __func__, skb->data, skb->tail, skb->len,
429 hdr->pkt_len, hdr->pad_len);
430
431 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
432 if (pkt == NULL) {
433 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
434 if (new_skb)
435 dev_kfree_skb_any(new_skb);
436 return -ENOMEM;
437 }
438
439 dma_address = dma_map_single(NULL, skb->data, skb->len,
440 DMA_TO_DEVICE);
441 if (!dma_address) {
442 pr_err("%s: dma_map_single() failed\n", __func__);
443 if (new_skb)
444 dev_kfree_skb_any(new_skb);
445 kfree(pkt);
446 return -ENOMEM;
447 }
448 pkt->skb = skb;
449 pkt->dma_address = dma_address;
450 pkt->is_cmd = 0;
451 INIT_WORK(&pkt->work, bam_mux_write_done);
452 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
453 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600454 ul_packet_written = 1;
455 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456 return rc;
457}
458
459int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600460 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461{
462 struct bam_mux_hdr *hdr;
463 unsigned long flags;
464 int rc = 0;
465
466 DBG("%s: opening ch %d\n", __func__, id);
467 if (!bam_mux_initialized)
468 return -ENODEV;
469 if (id >= BAM_DMUX_NUM_CHANNELS)
470 return -EINVAL;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600471 if (notify == NULL)
472 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473
474 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
475 if (hdr == NULL) {
476 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
477 return -ENOMEM;
478 }
479 spin_lock_irqsave(&bam_ch[id].lock, flags);
480 if (bam_ch_is_open(id)) {
481 DBG("%s: Already opened %d\n", __func__, id);
482 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
483 kfree(hdr);
484 goto open_done;
485 }
486 if (!bam_ch_is_remote_open(id)) {
487 DBG("%s: Remote not open; ch: %d\n", __func__, id);
488 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
489 kfree(hdr);
490 rc = -ENODEV;
491 goto open_done;
492 }
493
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600494 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 bam_ch[id].priv = priv;
496 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
497 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
498
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600499 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600500 if (!bam_is_connected) {
501 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600502 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600503 read_lock(&ul_wakeup_lock);
504 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
507 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
508 hdr->reserved = 0;
509 hdr->ch_id = id;
510 hdr->pkt_len = 0;
511 hdr->pad_len = 0;
512
513 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600514 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
516open_done:
517 DBG("%s: opened ch %d\n", __func__, id);
518 return rc;
519}
520
521int msm_bam_dmux_close(uint32_t id)
522{
523 struct bam_mux_hdr *hdr;
524 unsigned long flags;
525 int rc;
526
527 if (id >= BAM_DMUX_NUM_CHANNELS)
528 return -EINVAL;
529 DBG("%s: closing ch %d\n", __func__, id);
530 if (!bam_mux_initialized)
531 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600533 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600534 if (!bam_is_connected) {
535 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600536 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600537 read_lock(&ul_wakeup_lock);
538 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600539
Jeff Hugo061ce672011-10-21 17:15:32 -0600540 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600541 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 bam_ch[id].priv = NULL;
543 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
544 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
545
546 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
547 if (hdr == NULL) {
548 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
549 return -ENOMEM;
550 }
551 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
552 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
553 hdr->reserved = 0;
554 hdr->ch_id = id;
555 hdr->pkt_len = 0;
556 hdr->pad_len = 0;
557
558 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600559 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560
561 DBG("%s: closed ch %d\n", __func__, id);
562 return rc;
563}
564
Jeff Hugo949080a2011-08-30 11:58:56 -0600565static void rx_timer_work_func(struct work_struct *work)
566{
567 struct sps_iovec iov;
568 struct list_head *node;
569 struct rx_pkt_info *info;
570 int inactive_cycles = 0;
571 int ret;
572 struct sps_connect cur_rx_conn;
573
574 while (1) { /* timer loop */
575 ++inactive_cycles;
576 while (1) { /* deplete queue loop */
577 sps_get_iovec(bam_rx_pipe, &iov);
578 if (iov.addr == 0)
579 break;
580 inactive_cycles = 0;
581 mutex_lock(&bam_rx_pool_lock);
582 node = bam_rx_pool.next;
583 list_del(node);
584 mutex_unlock(&bam_rx_pool_lock);
585 info = container_of(node, struct rx_pkt_info,
586 list_node);
587 handle_bam_mux_cmd(&info->work);
588 }
589
590 if (inactive_cycles == POLLING_INACTIVITY) {
591 /*
592 * attempt to enable interrupts in this pipe
593 * if enabling interrupts fails, continue polling
594 */
595 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
596 if (ret) {
597 pr_err("%s: sps_get_config() failed, interrupts"
598 " not enabled\n", __func__);
599 queue_work(bam_mux_rx_workqueue,
600 &rx_timer_work);
601 return;
602 } else {
603 rx_register_event.options = SPS_O_EOT;
604 /* should check return value */
605 sps_register_event(bam_rx_pipe,
606 &rx_register_event);
607 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
608 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
609 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
610 if (ret) {
611 pr_err("%s: sps_set_config() failed, "
612 "interrupts not enabled\n",
613 __func__);
614 queue_work(bam_mux_rx_workqueue,
615 &rx_timer_work);
616 return;
617 }
618 polling_mode = 0;
619 }
620 /* handle race condition - missed packet? */
621 sps_get_iovec(bam_rx_pipe, &iov);
622 if (iov.addr == 0)
623 return;
624 inactive_cycles = 0;
625 mutex_lock(&bam_rx_pool_lock);
626 node = bam_rx_pool.next;
627 list_del(node);
628 mutex_unlock(&bam_rx_pool_lock);
629 info = container_of(node, struct rx_pkt_info,
630 list_node);
631 handle_bam_mux_cmd(&info->work);
632 return;
633 }
634
635 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
636 }
637}
638
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639static void bam_mux_tx_notify(struct sps_event_notify *notify)
640{
641 struct tx_pkt_info *pkt;
642
643 DBG("%s: event %d notified\n", __func__, notify->event_id);
644
645 switch (notify->event_id) {
646 case SPS_EVENT_EOT:
647 pkt = notify->data.transfer.user;
648 if (!pkt->is_cmd) {
649 dma_unmap_single(NULL, pkt->dma_address,
650 pkt->skb->len,
651 DMA_TO_DEVICE);
652 queue_work(bam_mux_tx_workqueue, &pkt->work);
653 } else {
654 dma_unmap_single(NULL, pkt->dma_address,
655 pkt->len,
656 DMA_TO_DEVICE);
657 kfree(pkt->skb);
658 kfree(pkt);
659 }
660 break;
661 default:
662 pr_err("%s: recieved unexpected event id %d\n", __func__,
663 notify->event_id);
664 }
665}
666
Jeff Hugo33dbc002011-08-25 15:52:53 -0600667static void bam_mux_rx_notify(struct sps_event_notify *notify)
668{
Jeff Hugo949080a2011-08-30 11:58:56 -0600669 int ret;
670 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600671
672 DBG("%s: event %d notified\n", __func__, notify->event_id);
673
Jeff Hugo33dbc002011-08-25 15:52:53 -0600674 switch (notify->event_id) {
675 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -0600676 /* attempt to disable interrupts in this pipe */
677 if (!polling_mode) {
678 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
679 if (ret) {
680 pr_err("%s: sps_get_config() failed, interrupts"
681 " not disabled\n", __func__);
682 break;
683 }
684 rx_register_event.options = 0;
685 ret = sps_register_event(bam_rx_pipe,
686 &rx_register_event);
687 if (ret) {
688 pr_err("%s: sps_register_event ret = %d\n",
689 __func__, ret);
690 break;
691 }
692 cur_rx_conn.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
693 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
694 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
695 if (ret) {
696 pr_err("%s: sps_set_config() failed, interrupts"
697 " not disabled\n", __func__);
698 break;
699 }
700 polling_mode = 1;
701 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
702 }
Jeff Hugo33dbc002011-08-25 15:52:53 -0600703 break;
704 default:
705 pr_err("%s: recieved unexpected event id %d\n", __func__,
706 notify->event_id);
707 }
708}
709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710#ifdef CONFIG_DEBUG_FS
711
712static int debug_tbl(char *buf, int max)
713{
714 int i = 0;
715 int j;
716
717 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
718 i += scnprintf(buf + i, max - i,
719 "ch%02d local open=%s remote open=%s\n",
720 j, bam_ch_is_local_open(j) ? "Y" : "N",
721 bam_ch_is_remote_open(j) ? "Y" : "N");
722 }
723
724 return i;
725}
726
727#define DEBUG_BUFMAX 4096
728static char debug_buffer[DEBUG_BUFMAX];
729
730static ssize_t debug_read(struct file *file, char __user *buf,
731 size_t count, loff_t *ppos)
732{
733 int (*fill)(char *buf, int max) = file->private_data;
734 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
735 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
736}
737
738static int debug_open(struct inode *inode, struct file *file)
739{
740 file->private_data = inode->i_private;
741 return 0;
742}
743
744
745static const struct file_operations debug_ops = {
746 .read = debug_read,
747 .open = debug_open,
748};
749
750static void debug_create(const char *name, mode_t mode,
751 struct dentry *dent,
752 int (*fill)(char *buf, int max))
753{
754 debugfs_create_file(name, mode, dent, fill, &debug_ops);
755}
756
757#endif
758
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600759static void ul_timeout(struct work_struct *work)
760{
761 write_lock(&ul_wakeup_lock);
762 if (ul_packet_written) {
763 ul_packet_written = 0;
764 schedule_delayed_work(&ul_timeout_work,
765 msecs_to_jiffies(UL_TIMEOUT_DELAY));
766 } else {
767 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
768 bam_is_connected = 0;
769 }
770 write_unlock(&ul_wakeup_lock);
771}
772static void ul_wakeup(void)
773{
774 mutex_lock(&wakeup_lock);
775 if (bam_is_connected) { /* bam got connected before lock grabbed */
776 mutex_unlock(&wakeup_lock);
777 return;
778 }
779 INIT_COMPLETION(ul_wakeup_ack_completion);
780 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
781 wait_for_completion_interruptible_timeout(&ul_wakeup_ack_completion,
782 HZ);
783 wait_for_completion_interruptible_timeout(&bam_connection_completion,
784 HZ);
785
786 bam_is_connected = 1;
787 schedule_delayed_work(&ul_timeout_work,
788 msecs_to_jiffies(UL_TIMEOUT_DELAY));
789 mutex_unlock(&wakeup_lock);
790}
791
792static void reconnect_to_bam(void)
793{
794 int i;
795
796 vote_dfab();
797 i = sps_device_reset(a2_device_handle);
798 if (i)
799 pr_err("%s: device reset failed rc = %d\n", __func__, i);
800 i = sps_connect(bam_tx_pipe, &tx_connection);
801 if (i)
802 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
803 i = sps_connect(bam_rx_pipe, &rx_connection);
804 if (i)
805 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
806 i = sps_register_event(bam_tx_pipe, &tx_register_event);
807 if (i)
808 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
809 i = sps_register_event(bam_rx_pipe, &rx_register_event);
810 if (i)
811 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
812 for (i = 0; i < NUM_BUFFERS; ++i)
813 queue_rx();
814 toggle_apps_ack();
815 complete_all(&bam_connection_completion);
816}
817
818static void disconnect_to_bam(void)
819{
820 struct list_head *node;
821 struct rx_pkt_info *info;
822
823 INIT_COMPLETION(bam_connection_completion);
824 sps_disconnect(bam_tx_pipe);
825 sps_disconnect(bam_rx_pipe);
826 unvote_dfab();
827 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
828 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
829 while (!list_empty(&bam_rx_pool)) {
830 node = bam_rx_pool.next;
831 list_del(node);
832 info = container_of(node, struct rx_pkt_info, list_node);
833 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
834 DMA_FROM_DEVICE);
835 dev_kfree_skb_any(info->skb);
836 kfree(info);
837 }
838}
839
840static void vote_dfab(void)
841{
842 int rc;
843
844 rc = clk_enable(dfab_clk);
845 if (rc)
846 pr_err("bam_dmux vote for dfab failed rc = %d\n", rc);
847}
848
849static void unvote_dfab(void)
850{
851 clk_disable(dfab_clk);
852}
853
Jeff Hugoade1f842011-08-03 15:53:59 -0600854static void bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855{
856 u32 h;
857 dma_addr_t dma_addr;
858 int ret;
859 void *a2_virt_addr;
860 int i;
861
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600862 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 /* init BAM */
864 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
865 if (!a2_virt_addr) {
866 pr_err("%s: ioremap failed\n", __func__);
867 ret = -ENOMEM;
868 goto register_bam_failed;
869 }
870 a2_props.phys_addr = A2_PHYS_BASE;
871 a2_props.virt_addr = a2_virt_addr;
872 a2_props.virt_size = A2_PHYS_SIZE;
873 a2_props.irq = A2_BAM_IRQ;
874 a2_props.num_pipes = A2_NUM_PIPES;
875 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
876 /* need to free on tear down */
877 ret = sps_register_bam_device(&a2_props, &h);
878 if (ret < 0) {
879 pr_err("%s: register bam error %d\n", __func__, ret);
880 goto register_bam_failed;
881 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600882 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883
884 bam_tx_pipe = sps_alloc_endpoint();
885 if (bam_tx_pipe == NULL) {
886 pr_err("%s: tx alloc endpoint failed\n", __func__);
887 ret = -ENOMEM;
888 goto register_bam_failed;
889 }
890 ret = sps_get_config(bam_tx_pipe, &tx_connection);
891 if (ret) {
892 pr_err("%s: tx get config failed %d\n", __func__, ret);
893 goto tx_get_config_failed;
894 }
895
896 tx_connection.source = SPS_DEV_HANDLE_MEM;
897 tx_connection.src_pipe_index = 0;
898 tx_connection.destination = h;
899 tx_connection.dest_pipe_index = 4;
900 tx_connection.mode = SPS_MODE_DEST;
901 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
902 tx_desc_mem_buf.size = 0x800; /* 2k */
903 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
904 &dma_addr, 0);
905 if (tx_desc_mem_buf.base == NULL) {
906 pr_err("%s: tx memory alloc failed\n", __func__);
907 ret = -ENOMEM;
908 goto tx_mem_failed;
909 }
910 tx_desc_mem_buf.phys_base = dma_addr;
911 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
912 tx_connection.desc = tx_desc_mem_buf;
913 tx_connection.event_thresh = 0x10;
914
915 ret = sps_connect(bam_tx_pipe, &tx_connection);
916 if (ret < 0) {
917 pr_err("%s: tx connect error %d\n", __func__, ret);
918 goto tx_connect_failed;
919 }
920
921 bam_rx_pipe = sps_alloc_endpoint();
922 if (bam_rx_pipe == NULL) {
923 pr_err("%s: rx alloc endpoint failed\n", __func__);
924 ret = -ENOMEM;
925 goto tx_connect_failed;
926 }
927 ret = sps_get_config(bam_rx_pipe, &rx_connection);
928 if (ret) {
929 pr_err("%s: rx get config failed %d\n", __func__, ret);
930 goto rx_get_config_failed;
931 }
932
933 rx_connection.source = h;
934 rx_connection.src_pipe_index = 5;
935 rx_connection.destination = SPS_DEV_HANDLE_MEM;
936 rx_connection.dest_pipe_index = 1;
937 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -0600938 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
939 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 rx_desc_mem_buf.size = 0x800; /* 2k */
941 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
942 &dma_addr, 0);
943 if (rx_desc_mem_buf.base == NULL) {
944 pr_err("%s: rx memory alloc failed\n", __func__);
945 ret = -ENOMEM;
946 goto rx_mem_failed;
947 }
948 rx_desc_mem_buf.phys_base = dma_addr;
949 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
950 rx_connection.desc = rx_desc_mem_buf;
951 rx_connection.event_thresh = 0x10;
952
953 ret = sps_connect(bam_rx_pipe, &rx_connection);
954 if (ret < 0) {
955 pr_err("%s: rx connect error %d\n", __func__, ret);
956 goto rx_connect_failed;
957 }
958
959 tx_register_event.options = SPS_O_EOT;
960 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
961 tx_register_event.xfer_done = NULL;
962 tx_register_event.callback = bam_mux_tx_notify;
963 tx_register_event.user = NULL;
964 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
965 if (ret < 0) {
966 pr_err("%s: tx register event error %d\n", __func__, ret);
967 goto rx_event_reg_failed;
968 }
969
Jeff Hugo33dbc002011-08-25 15:52:53 -0600970 rx_register_event.options = SPS_O_EOT;
971 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
972 rx_register_event.xfer_done = NULL;
973 rx_register_event.callback = bam_mux_rx_notify;
974 rx_register_event.user = NULL;
975 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
976 if (ret < 0) {
977 pr_err("%s: tx register event error %d\n", __func__, ret);
978 goto rx_event_reg_failed;
979 }
980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 bam_mux_initialized = 1;
982 for (i = 0; i < NUM_BUFFERS; ++i)
983 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600984 toggle_apps_ack();
985 complete_all(&bam_connection_completion);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 return;
987
988rx_event_reg_failed:
989 sps_disconnect(bam_rx_pipe);
990rx_connect_failed:
991 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
992 rx_desc_mem_buf.phys_base);
993rx_mem_failed:
994 sps_disconnect(bam_tx_pipe);
995rx_get_config_failed:
996 sps_free_endpoint(bam_rx_pipe);
997tx_connect_failed:
998 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
999 tx_desc_mem_buf.phys_base);
1000tx_get_config_failed:
1001 sps_free_endpoint(bam_tx_pipe);
1002tx_mem_failed:
1003 sps_deregister_bam_device(h);
1004register_bam_failed:
1005 /*destroy_workqueue(bam_mux_workqueue);*/
1006 /*return ret;*/
1007 return;
1008}
Jeff Hugoade1f842011-08-03 15:53:59 -06001009
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001010static void toggle_apps_ack(void)
1011{
1012 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
1013 smsm_change_state(SMSM_APPS_STATE,
1014 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
1015 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
1016 clear_bit = ~clear_bit;
1017}
1018
Jeff Hugoade1f842011-08-03 15:53:59 -06001019static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
1020{
1021 DBG("%s: smsm activity\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001022 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL)
1023 reconnect_to_bam();
1024 else if (bam_mux_initialized && !(new_state & SMSM_A2_POWER_CONTROL))
1025 disconnect_to_bam();
Jeff Hugoade1f842011-08-03 15:53:59 -06001026 else if (new_state & SMSM_A2_POWER_CONTROL)
1027 bam_init();
1028 else
1029 pr_err("%s: unsupported state change\n", __func__);
1030
1031}
1032
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001033static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
1034 uint32_t new_state)
1035{
1036 complete_all(&ul_wakeup_ack_completion);
1037}
1038
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039static int bam_dmux_probe(struct platform_device *pdev)
1040{
1041 int rc;
1042
1043 DBG("%s probe called\n", __func__);
1044 if (bam_mux_initialized)
1045 return 0;
1046
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001047 dfab_clk = clk_get(&pdev->dev, "dfab_clk");
1048 if (IS_ERR(dfab_clk)) {
1049 pr_err("%s: did not get dfab clock\n", __func__);
1050 return -EFAULT;
1051 }
1052
1053 rc = clk_set_rate(dfab_clk, 64000000);
1054 if (rc)
1055 pr_err("%s: unable to set dfab clock rate\n", __func__);
1056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
1058 if (!bam_mux_rx_workqueue)
1059 return -ENOMEM;
1060
1061 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
1062 if (!bam_mux_tx_workqueue) {
1063 destroy_workqueue(bam_mux_rx_workqueue);
1064 return -ENOMEM;
1065 }
1066
Jeff Hugo7960abd2011-08-02 15:39:38 -06001067 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06001069 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
1070 "bam_dmux_ch_%d", rc);
1071 /* bus 2, ie a2 stream 2 */
1072 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
1073 if (!bam_ch[rc].pdev) {
1074 pr_err("%s: platform device alloc failed\n", __func__);
1075 destroy_workqueue(bam_mux_rx_workqueue);
1076 destroy_workqueue(bam_mux_tx_workqueue);
1077 return -ENOMEM;
1078 }
1079 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001081 init_completion(&ul_wakeup_ack_completion);
1082 init_completion(&bam_connection_completion);
1083 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
1084
Jeff Hugoade1f842011-08-03 15:53:59 -06001085 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
1086 bam_dmux_smsm_cb, NULL);
1087
1088 if (rc) {
1089 destroy_workqueue(bam_mux_rx_workqueue);
1090 destroy_workqueue(bam_mux_tx_workqueue);
1091 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
1092 return -ENOMEM;
1093 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001095 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
1096 bam_dmux_smsm_ack_cb, NULL);
1097
1098 if (rc) {
1099 destroy_workqueue(bam_mux_rx_workqueue);
1100 destroy_workqueue(bam_mux_tx_workqueue);
1101 smsm_state_cb_deregister(SMSM_MODEM_STATE,
1102 SMSM_A2_POWER_CONTROL,
1103 bam_dmux_smsm_cb, NULL);
1104 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
1105 rc);
1106 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
1107 platform_device_put(bam_ch[rc].pdev);
1108 return -ENOMEM;
1109 }
1110
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 return 0;
1112}
1113
1114static struct platform_driver bam_dmux_driver = {
1115 .probe = bam_dmux_probe,
1116 .driver = {
1117 .name = "BAM_RMNT",
1118 .owner = THIS_MODULE,
1119 },
1120};
1121
1122static int __init bam_dmux_init(void)
1123{
1124#ifdef CONFIG_DEBUG_FS
1125 struct dentry *dent;
1126
1127 dent = debugfs_create_dir("bam_dmux", 0);
1128 if (!IS_ERR(dent))
1129 debug_create("tbl", 0444, dent, debug_tbl);
1130#endif
1131 return platform_driver_register(&bam_dmux_driver);
1132}
1133
Jeff Hugoade1f842011-08-03 15:53:59 -06001134late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135MODULE_DESCRIPTION("MSM BAM DMUX");
1136MODULE_LICENSE("GPL v2");