blob: 728dbf1379a7c517556a36f6a36654bb911b6582 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
27
28#include <mach/sps.h>
29#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060030#include <mach/msm_smsm.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32#define BAM_CH_LOCAL_OPEN 0x1
33#define BAM_CH_REMOTE_OPEN 0x2
34
35#define BAM_MUX_HDR_MAGIC_NO 0x33fc
36
37#define BAM_MUX_HDR_CMD_DATA 0
38#define BAM_MUX_HDR_CMD_OPEN 1
39#define BAM_MUX_HDR_CMD_CLOSE 2
40
Jeff Hugo949080a2011-08-30 11:58:56 -060041#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
42#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
43#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044
45
46static int msm_bam_dmux_debug_enable;
47module_param_named(debug_enable, msm_bam_dmux_debug_enable,
48 int, S_IRUGO | S_IWUSR | S_IWGRP);
49
50#if defined(DEBUG)
51static uint32_t bam_dmux_read_cnt;
52static uint32_t bam_dmux_write_cnt;
53static uint32_t bam_dmux_write_cpy_cnt;
54static uint32_t bam_dmux_write_cpy_bytes;
55
56#define DBG(x...) do { \
57 if (msm_bam_dmux_debug_enable) \
58 pr_debug(x); \
59 } while (0)
60
61#define DBG_INC_READ_CNT(x) do { \
62 bam_dmux_read_cnt += (x); \
63 if (msm_bam_dmux_debug_enable) \
64 pr_debug("%s: total read bytes %u\n", \
65 __func__, bam_dmux_read_cnt); \
66 } while (0)
67
68#define DBG_INC_WRITE_CNT(x) do { \
69 bam_dmux_write_cnt += (x); \
70 if (msm_bam_dmux_debug_enable) \
71 pr_debug("%s: total written bytes %u\n", \
72 __func__, bam_dmux_write_cnt); \
73 } while (0)
74
75#define DBG_INC_WRITE_CPY(x) do { \
76 bam_dmux_write_cpy_bytes += (x); \
77 bam_dmux_write_cpy_cnt++; \
78 if (msm_bam_dmux_debug_enable) \
79 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
80 __func__, bam_dmux_write_cpy_cnt, \
81 bam_dmux_write_cpy_bytes); \
82 } while (0)
83#else
84#define DBG(x...) do { } while (0)
85#define DBG_INC_READ_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CNT(x...) do { } while (0)
87#define DBG_INC_WRITE_CPY(x...) do { } while (0)
88#endif
89
90struct bam_ch_info {
91 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -060092 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 void *priv;
94 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -060095 struct platform_device *pdev;
96 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097};
98
99struct tx_pkt_info {
100 struct sk_buff *skb;
101 dma_addr_t dma_address;
102 char is_cmd;
103 uint32_t len;
104 struct work_struct work;
105};
106
107struct rx_pkt_info {
108 struct sk_buff *skb;
109 dma_addr_t dma_address;
110 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600111 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112};
113
114#define A2_NUM_PIPES 6
115#define A2_SUMMING_THRESHOLD 4096
116#define A2_DEFAULT_DESCRIPTORS 32
117#define A2_PHYS_BASE 0x124C2000
118#define A2_PHYS_SIZE 0x2000
119#define BUFFER_SIZE 2048
120#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121static struct sps_bam_props a2_props;
122static struct sps_pipe *bam_tx_pipe;
123static struct sps_pipe *bam_rx_pipe;
124static struct sps_connect tx_connection;
125static struct sps_connect rx_connection;
126static struct sps_mem_buffer tx_desc_mem_buf;
127static struct sps_mem_buffer rx_desc_mem_buf;
128static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600129static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130
131static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
132static int bam_mux_initialized;
133
Jeff Hugo949080a2011-08-30 11:58:56 -0600134static int polling_mode;
135
136static LIST_HEAD(bam_rx_pool);
137static DEFINE_MUTEX(bam_rx_pool_lock);
138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139struct bam_mux_hdr {
140 uint16_t magic_num;
141 uint8_t reserved;
142 uint8_t cmd;
143 uint8_t pad_len;
144 uint8_t ch_id;
145 uint16_t pkt_len;
146};
147
148static void bam_mux_write_done(struct work_struct *work);
149static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600150static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151
152static DEFINE_MUTEX(bam_mux_lock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600153static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154
155static struct workqueue_struct *bam_mux_rx_workqueue;
156static struct workqueue_struct *bam_mux_tx_workqueue;
157
158#define bam_ch_is_open(x) \
159 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
160
161#define bam_ch_is_local_open(x) \
162 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
163
164#define bam_ch_is_remote_open(x) \
165 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
166
167static void queue_rx(void)
168{
169 void *ptr;
170 struct rx_pkt_info *info;
171
172 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
173 if (!info)
174 return; /*need better way to handle this */
175
176 INIT_WORK(&info->work, handle_bam_mux_cmd);
177
178 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
179 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600180
181 mutex_lock(&bam_rx_pool_lock);
182 list_add_tail(&info->list_node, &bam_rx_pool);
183 mutex_unlock(&bam_rx_pool_lock);
184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 /* need a way to handle error case */
186 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
187 DMA_FROM_DEVICE);
188 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600189 BUFFER_SIZE, info,
190 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191}
192
193static void bam_mux_process_data(struct sk_buff *rx_skb)
194{
195 unsigned long flags;
196 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600197 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198
199 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
200
201 rx_skb->data = (unsigned char *)(rx_hdr + 1);
202 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
203 rx_skb->len = rx_hdr->pkt_len;
204
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600205 event_data = (unsigned long)(rx_skb);
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600208 if (bam_ch[rx_hdr->ch_id].notify)
209 bam_ch[rx_hdr->ch_id].notify(
210 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
211 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212 else
213 dev_kfree_skb_any(rx_skb);
214 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
215
216 queue_rx();
217}
218
219static void handle_bam_mux_cmd(struct work_struct *work)
220{
221 unsigned long flags;
222 struct bam_mux_hdr *rx_hdr;
223 struct rx_pkt_info *info;
224 struct sk_buff *rx_skb;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600225 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226
227 info = container_of(work, struct rx_pkt_info, work);
228 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600229 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 kfree(info);
231
232 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
233
234 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
235 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
236 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
237 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
238 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
239 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
240 " pad %d ch %d len %d\n", __func__,
241 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
242 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
243 dev_kfree_skb_any(rx_skb);
244 queue_rx();
245 return;
246 }
247 switch (rx_hdr->cmd) {
248 case BAM_MUX_HDR_CMD_DATA:
249 DBG_INC_READ_CNT(rx_hdr->pkt_len);
250 bam_mux_process_data(rx_skb);
251 break;
252 case BAM_MUX_HDR_CMD_OPEN:
253 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
254 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
255 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
256 dev_kfree_skb_any(rx_skb);
257 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600258 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
259 if (ret)
260 pr_err("%s: platform_device_add() error: %d\n",
261 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 break;
263 case BAM_MUX_HDR_CMD_CLOSE:
264 /* probably should drop pending write */
265 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
266 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
267 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
268 dev_kfree_skb_any(rx_skb);
269 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600270 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
271 bam_ch[rx_hdr->ch_id].pdev =
272 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
273 if (!bam_ch[rx_hdr->ch_id].pdev)
274 pr_err("%s: platform_device_alloc failed\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 break;
276 default:
277 pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
278 " pad %d ch %d len %d\n", __func__,
279 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
280 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
281 dev_kfree_skb_any(rx_skb);
282 queue_rx();
283 return;
284 }
285}
286
287static int bam_mux_write_cmd(void *data, uint32_t len)
288{
289 int rc;
290 struct tx_pkt_info *pkt;
291 dma_addr_t dma_address;
292
293 mutex_lock(&bam_mux_lock);
294 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
295 if (pkt == NULL) {
296 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
297 rc = -ENOMEM;
298 mutex_unlock(&bam_mux_lock);
299 return rc;
300 }
301
302 dma_address = dma_map_single(NULL, data, len,
303 DMA_TO_DEVICE);
304 if (!dma_address) {
305 pr_err("%s: dma_map_single() failed\n", __func__);
306 rc = -ENOMEM;
307 mutex_unlock(&bam_mux_lock);
308 return rc;
309 }
310 pkt->skb = (struct sk_buff *)(data);
311 pkt->len = len;
312 pkt->dma_address = dma_address;
313 pkt->is_cmd = 1;
314 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
315 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
316
317 mutex_unlock(&bam_mux_lock);
318 return rc;
319}
320
321static void bam_mux_write_done(struct work_struct *work)
322{
323 struct sk_buff *skb;
324 struct bam_mux_hdr *hdr;
325 struct tx_pkt_info *info;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600326 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327
328 info = container_of(work, struct tx_pkt_info, work);
329 skb = info->skb;
330 kfree(info);
331 hdr = (struct bam_mux_hdr *)skb->data;
332 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600333 event_data = (unsigned long)(skb);
334 if (bam_ch[hdr->ch_id].notify)
335 bam_ch[hdr->ch_id].notify(
336 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
337 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 else
339 dev_kfree_skb_any(skb);
340}
341
342int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
343{
344 int rc = 0;
345 struct bam_mux_hdr *hdr;
346 unsigned long flags;
347 struct sk_buff *new_skb = NULL;
348 dma_addr_t dma_address;
349 struct tx_pkt_info *pkt;
350
351 if (id >= BAM_DMUX_NUM_CHANNELS)
352 return -EINVAL;
353 if (!skb)
354 return -EINVAL;
355 if (!bam_mux_initialized)
356 return -ENODEV;
357
358 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
359 spin_lock_irqsave(&bam_ch[id].lock, flags);
360 if (!bam_ch_is_open(id)) {
361 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
362 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
363 return -ENODEV;
364 }
365 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
366
367 /* if skb do not have any tailroom for padding,
368 copy the skb into a new expanded skb */
369 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
370 /* revisit, probably dev_alloc_skb and memcpy is effecient */
371 new_skb = skb_copy_expand(skb, skb_headroom(skb),
372 4 - (skb->len & 0x3), GFP_ATOMIC);
373 if (new_skb == NULL) {
374 pr_err("%s: cannot allocate skb\n", __func__);
375 return -ENOMEM;
376 }
377 dev_kfree_skb_any(skb);
378 skb = new_skb;
379 DBG_INC_WRITE_CPY(skb->len);
380 }
381
382 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
383
384 /* caller should allocate for hdr and padding
385 hdr is fine, padding is tricky */
386 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
387 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
388 hdr->reserved = 0;
389 hdr->ch_id = id;
390 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
391 if (skb->len & 0x3)
392 skb_put(skb, 4 - (skb->len & 0x3));
393
394 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
395
396 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
397 __func__, skb->data, skb->tail, skb->len,
398 hdr->pkt_len, hdr->pad_len);
399
400 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
401 if (pkt == NULL) {
402 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
403 if (new_skb)
404 dev_kfree_skb_any(new_skb);
405 return -ENOMEM;
406 }
407
408 dma_address = dma_map_single(NULL, skb->data, skb->len,
409 DMA_TO_DEVICE);
410 if (!dma_address) {
411 pr_err("%s: dma_map_single() failed\n", __func__);
412 if (new_skb)
413 dev_kfree_skb_any(new_skb);
414 kfree(pkt);
415 return -ENOMEM;
416 }
417 pkt->skb = skb;
418 pkt->dma_address = dma_address;
419 pkt->is_cmd = 0;
420 INIT_WORK(&pkt->work, bam_mux_write_done);
421 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
422 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
423 return rc;
424}
425
426int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600427 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428{
429 struct bam_mux_hdr *hdr;
430 unsigned long flags;
431 int rc = 0;
432
433 DBG("%s: opening ch %d\n", __func__, id);
434 if (!bam_mux_initialized)
435 return -ENODEV;
436 if (id >= BAM_DMUX_NUM_CHANNELS)
437 return -EINVAL;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600438 if (notify == NULL)
439 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440
441 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
442 if (hdr == NULL) {
443 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
444 return -ENOMEM;
445 }
446 spin_lock_irqsave(&bam_ch[id].lock, flags);
447 if (bam_ch_is_open(id)) {
448 DBG("%s: Already opened %d\n", __func__, id);
449 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
450 kfree(hdr);
451 goto open_done;
452 }
453 if (!bam_ch_is_remote_open(id)) {
454 DBG("%s: Remote not open; ch: %d\n", __func__, id);
455 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
456 kfree(hdr);
457 rc = -ENODEV;
458 goto open_done;
459 }
460
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600461 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 bam_ch[id].priv = priv;
463 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
464 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
465
466 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
467 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
468 hdr->reserved = 0;
469 hdr->ch_id = id;
470 hdr->pkt_len = 0;
471 hdr->pad_len = 0;
472
473 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
474
475open_done:
476 DBG("%s: opened ch %d\n", __func__, id);
477 return rc;
478}
479
480int msm_bam_dmux_close(uint32_t id)
481{
482 struct bam_mux_hdr *hdr;
483 unsigned long flags;
484 int rc;
485
486 if (id >= BAM_DMUX_NUM_CHANNELS)
487 return -EINVAL;
488 DBG("%s: closing ch %d\n", __func__, id);
489 if (!bam_mux_initialized)
490 return -ENODEV;
491 spin_lock_irqsave(&bam_ch[id].lock, flags);
492
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600493 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 bam_ch[id].priv = NULL;
495 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
496 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
497
498 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
499 if (hdr == NULL) {
500 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
501 return -ENOMEM;
502 }
503 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
504 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
505 hdr->reserved = 0;
506 hdr->ch_id = id;
507 hdr->pkt_len = 0;
508 hdr->pad_len = 0;
509
510 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
511
512 DBG("%s: closed ch %d\n", __func__, id);
513 return rc;
514}
515
Jeff Hugo949080a2011-08-30 11:58:56 -0600516static void rx_timer_work_func(struct work_struct *work)
517{
518 struct sps_iovec iov;
519 struct list_head *node;
520 struct rx_pkt_info *info;
521 int inactive_cycles = 0;
522 int ret;
523 struct sps_connect cur_rx_conn;
524
525 while (1) { /* timer loop */
526 ++inactive_cycles;
527 while (1) { /* deplete queue loop */
528 sps_get_iovec(bam_rx_pipe, &iov);
529 if (iov.addr == 0)
530 break;
531 inactive_cycles = 0;
532 mutex_lock(&bam_rx_pool_lock);
533 node = bam_rx_pool.next;
534 list_del(node);
535 mutex_unlock(&bam_rx_pool_lock);
536 info = container_of(node, struct rx_pkt_info,
537 list_node);
538 handle_bam_mux_cmd(&info->work);
539 }
540
541 if (inactive_cycles == POLLING_INACTIVITY) {
542 /*
543 * attempt to enable interrupts in this pipe
544 * if enabling interrupts fails, continue polling
545 */
546 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
547 if (ret) {
548 pr_err("%s: sps_get_config() failed, interrupts"
549 " not enabled\n", __func__);
550 queue_work(bam_mux_rx_workqueue,
551 &rx_timer_work);
552 return;
553 } else {
554 rx_register_event.options = SPS_O_EOT;
555 /* should check return value */
556 sps_register_event(bam_rx_pipe,
557 &rx_register_event);
558 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
559 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
560 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
561 if (ret) {
562 pr_err("%s: sps_set_config() failed, "
563 "interrupts not enabled\n",
564 __func__);
565 queue_work(bam_mux_rx_workqueue,
566 &rx_timer_work);
567 return;
568 }
569 polling_mode = 0;
570 }
571 /* handle race condition - missed packet? */
572 sps_get_iovec(bam_rx_pipe, &iov);
573 if (iov.addr == 0)
574 return;
575 inactive_cycles = 0;
576 mutex_lock(&bam_rx_pool_lock);
577 node = bam_rx_pool.next;
578 list_del(node);
579 mutex_unlock(&bam_rx_pool_lock);
580 info = container_of(node, struct rx_pkt_info,
581 list_node);
582 handle_bam_mux_cmd(&info->work);
583 return;
584 }
585
586 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
587 }
588}
589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590static void bam_mux_tx_notify(struct sps_event_notify *notify)
591{
592 struct tx_pkt_info *pkt;
593
594 DBG("%s: event %d notified\n", __func__, notify->event_id);
595
596 switch (notify->event_id) {
597 case SPS_EVENT_EOT:
598 pkt = notify->data.transfer.user;
599 if (!pkt->is_cmd) {
600 dma_unmap_single(NULL, pkt->dma_address,
601 pkt->skb->len,
602 DMA_TO_DEVICE);
603 queue_work(bam_mux_tx_workqueue, &pkt->work);
604 } else {
605 dma_unmap_single(NULL, pkt->dma_address,
606 pkt->len,
607 DMA_TO_DEVICE);
608 kfree(pkt->skb);
609 kfree(pkt);
610 }
611 break;
612 default:
613 pr_err("%s: recieved unexpected event id %d\n", __func__,
614 notify->event_id);
615 }
616}
617
Jeff Hugo33dbc002011-08-25 15:52:53 -0600618static void bam_mux_rx_notify(struct sps_event_notify *notify)
619{
Jeff Hugo949080a2011-08-30 11:58:56 -0600620 int ret;
621 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600622
623 DBG("%s: event %d notified\n", __func__, notify->event_id);
624
Jeff Hugo33dbc002011-08-25 15:52:53 -0600625 switch (notify->event_id) {
626 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -0600627 /* attempt to disable interrupts in this pipe */
628 if (!polling_mode) {
629 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
630 if (ret) {
631 pr_err("%s: sps_get_config() failed, interrupts"
632 " not disabled\n", __func__);
633 break;
634 }
635 rx_register_event.options = 0;
636 ret = sps_register_event(bam_rx_pipe,
637 &rx_register_event);
638 if (ret) {
639 pr_err("%s: sps_register_event ret = %d\n",
640 __func__, ret);
641 break;
642 }
643 cur_rx_conn.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
644 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
645 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
646 if (ret) {
647 pr_err("%s: sps_set_config() failed, interrupts"
648 " not disabled\n", __func__);
649 break;
650 }
651 polling_mode = 1;
652 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
653 }
Jeff Hugo33dbc002011-08-25 15:52:53 -0600654 break;
655 default:
656 pr_err("%s: recieved unexpected event id %d\n", __func__,
657 notify->event_id);
658 }
659}
660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661#ifdef CONFIG_DEBUG_FS
662
663static int debug_tbl(char *buf, int max)
664{
665 int i = 0;
666 int j;
667
668 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
669 i += scnprintf(buf + i, max - i,
670 "ch%02d local open=%s remote open=%s\n",
671 j, bam_ch_is_local_open(j) ? "Y" : "N",
672 bam_ch_is_remote_open(j) ? "Y" : "N");
673 }
674
675 return i;
676}
677
678#define DEBUG_BUFMAX 4096
679static char debug_buffer[DEBUG_BUFMAX];
680
681static ssize_t debug_read(struct file *file, char __user *buf,
682 size_t count, loff_t *ppos)
683{
684 int (*fill)(char *buf, int max) = file->private_data;
685 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
686 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
687}
688
689static int debug_open(struct inode *inode, struct file *file)
690{
691 file->private_data = inode->i_private;
692 return 0;
693}
694
695
696static const struct file_operations debug_ops = {
697 .read = debug_read,
698 .open = debug_open,
699};
700
701static void debug_create(const char *name, mode_t mode,
702 struct dentry *dent,
703 int (*fill)(char *buf, int max))
704{
705 debugfs_create_file(name, mode, dent, fill, &debug_ops);
706}
707
708#endif
709
Jeff Hugoade1f842011-08-03 15:53:59 -0600710static void bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711{
712 u32 h;
713 dma_addr_t dma_addr;
714 int ret;
715 void *a2_virt_addr;
716 int i;
717
718 /* init BAM */
719 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
720 if (!a2_virt_addr) {
721 pr_err("%s: ioremap failed\n", __func__);
722 ret = -ENOMEM;
723 goto register_bam_failed;
724 }
725 a2_props.phys_addr = A2_PHYS_BASE;
726 a2_props.virt_addr = a2_virt_addr;
727 a2_props.virt_size = A2_PHYS_SIZE;
728 a2_props.irq = A2_BAM_IRQ;
729 a2_props.num_pipes = A2_NUM_PIPES;
730 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
731 /* need to free on tear down */
732 ret = sps_register_bam_device(&a2_props, &h);
733 if (ret < 0) {
734 pr_err("%s: register bam error %d\n", __func__, ret);
735 goto register_bam_failed;
736 }
737
738 bam_tx_pipe = sps_alloc_endpoint();
739 if (bam_tx_pipe == NULL) {
740 pr_err("%s: tx alloc endpoint failed\n", __func__);
741 ret = -ENOMEM;
742 goto register_bam_failed;
743 }
744 ret = sps_get_config(bam_tx_pipe, &tx_connection);
745 if (ret) {
746 pr_err("%s: tx get config failed %d\n", __func__, ret);
747 goto tx_get_config_failed;
748 }
749
750 tx_connection.source = SPS_DEV_HANDLE_MEM;
751 tx_connection.src_pipe_index = 0;
752 tx_connection.destination = h;
753 tx_connection.dest_pipe_index = 4;
754 tx_connection.mode = SPS_MODE_DEST;
755 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
756 tx_desc_mem_buf.size = 0x800; /* 2k */
757 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
758 &dma_addr, 0);
759 if (tx_desc_mem_buf.base == NULL) {
760 pr_err("%s: tx memory alloc failed\n", __func__);
761 ret = -ENOMEM;
762 goto tx_mem_failed;
763 }
764 tx_desc_mem_buf.phys_base = dma_addr;
765 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
766 tx_connection.desc = tx_desc_mem_buf;
767 tx_connection.event_thresh = 0x10;
768
769 ret = sps_connect(bam_tx_pipe, &tx_connection);
770 if (ret < 0) {
771 pr_err("%s: tx connect error %d\n", __func__, ret);
772 goto tx_connect_failed;
773 }
774
775 bam_rx_pipe = sps_alloc_endpoint();
776 if (bam_rx_pipe == NULL) {
777 pr_err("%s: rx alloc endpoint failed\n", __func__);
778 ret = -ENOMEM;
779 goto tx_connect_failed;
780 }
781 ret = sps_get_config(bam_rx_pipe, &rx_connection);
782 if (ret) {
783 pr_err("%s: rx get config failed %d\n", __func__, ret);
784 goto rx_get_config_failed;
785 }
786
787 rx_connection.source = h;
788 rx_connection.src_pipe_index = 5;
789 rx_connection.destination = SPS_DEV_HANDLE_MEM;
790 rx_connection.dest_pipe_index = 1;
791 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -0600792 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
793 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 rx_desc_mem_buf.size = 0x800; /* 2k */
795 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
796 &dma_addr, 0);
797 if (rx_desc_mem_buf.base == NULL) {
798 pr_err("%s: rx memory alloc failed\n", __func__);
799 ret = -ENOMEM;
800 goto rx_mem_failed;
801 }
802 rx_desc_mem_buf.phys_base = dma_addr;
803 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
804 rx_connection.desc = rx_desc_mem_buf;
805 rx_connection.event_thresh = 0x10;
806
807 ret = sps_connect(bam_rx_pipe, &rx_connection);
808 if (ret < 0) {
809 pr_err("%s: rx connect error %d\n", __func__, ret);
810 goto rx_connect_failed;
811 }
812
813 tx_register_event.options = SPS_O_EOT;
814 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
815 tx_register_event.xfer_done = NULL;
816 tx_register_event.callback = bam_mux_tx_notify;
817 tx_register_event.user = NULL;
818 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
819 if (ret < 0) {
820 pr_err("%s: tx register event error %d\n", __func__, ret);
821 goto rx_event_reg_failed;
822 }
823
Jeff Hugo33dbc002011-08-25 15:52:53 -0600824 rx_register_event.options = SPS_O_EOT;
825 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
826 rx_register_event.xfer_done = NULL;
827 rx_register_event.callback = bam_mux_rx_notify;
828 rx_register_event.user = NULL;
829 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
830 if (ret < 0) {
831 pr_err("%s: tx register event error %d\n", __func__, ret);
832 goto rx_event_reg_failed;
833 }
834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835 bam_mux_initialized = 1;
836 for (i = 0; i < NUM_BUFFERS; ++i)
837 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 return;
839
840rx_event_reg_failed:
841 sps_disconnect(bam_rx_pipe);
842rx_connect_failed:
843 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
844 rx_desc_mem_buf.phys_base);
845rx_mem_failed:
846 sps_disconnect(bam_tx_pipe);
847rx_get_config_failed:
848 sps_free_endpoint(bam_rx_pipe);
849tx_connect_failed:
850 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
851 tx_desc_mem_buf.phys_base);
852tx_get_config_failed:
853 sps_free_endpoint(bam_tx_pipe);
854tx_mem_failed:
855 sps_deregister_bam_device(h);
856register_bam_failed:
857 /*destroy_workqueue(bam_mux_workqueue);*/
858 /*return ret;*/
859 return;
860}
Jeff Hugoade1f842011-08-03 15:53:59 -0600861
862static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
863{
864 DBG("%s: smsm activity\n", __func__);
865 if (bam_mux_initialized)
866 pr_err("%s: bam_dmux already initialized\n", __func__);
867 else if (new_state & SMSM_A2_POWER_CONTROL)
868 bam_init();
869 else
870 pr_err("%s: unsupported state change\n", __func__);
871
872}
873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874static int bam_dmux_probe(struct platform_device *pdev)
875{
876 int rc;
877
878 DBG("%s probe called\n", __func__);
879 if (bam_mux_initialized)
880 return 0;
881
882 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
883 if (!bam_mux_rx_workqueue)
884 return -ENOMEM;
885
886 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
887 if (!bam_mux_tx_workqueue) {
888 destroy_workqueue(bam_mux_rx_workqueue);
889 return -ENOMEM;
890 }
891
Jeff Hugo7960abd2011-08-02 15:39:38 -0600892 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600894 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
895 "bam_dmux_ch_%d", rc);
896 /* bus 2, ie a2 stream 2 */
897 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
898 if (!bam_ch[rc].pdev) {
899 pr_err("%s: platform device alloc failed\n", __func__);
900 destroy_workqueue(bam_mux_rx_workqueue);
901 destroy_workqueue(bam_mux_tx_workqueue);
902 return -ENOMEM;
903 }
904 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905
Jeff Hugoade1f842011-08-03 15:53:59 -0600906 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
907 bam_dmux_smsm_cb, NULL);
908
909 if (rc) {
910 destroy_workqueue(bam_mux_rx_workqueue);
911 destroy_workqueue(bam_mux_tx_workqueue);
912 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
913 return -ENOMEM;
914 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915
916 return 0;
917}
918
919static struct platform_driver bam_dmux_driver = {
920 .probe = bam_dmux_probe,
921 .driver = {
922 .name = "BAM_RMNT",
923 .owner = THIS_MODULE,
924 },
925};
926
927static int __init bam_dmux_init(void)
928{
929#ifdef CONFIG_DEBUG_FS
930 struct dentry *dent;
931
932 dent = debugfs_create_dir("bam_dmux", 0);
933 if (!IS_ERR(dent))
934 debug_create("tbl", 0444, dent, debug_tbl);
935#endif
936 return platform_driver_register(&bam_dmux_driver);
937}
938
Jeff Hugoade1f842011-08-03 15:53:59 -0600939late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940MODULE_DESCRIPTION("MSM BAM DMUX");
941MODULE_LICENSE("GPL v2");