blob: d001282cc60f18ffbbbc32357f070bee53396ec6 [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37#define BAM_CH_LOCAL_OPEN 0x1
38#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060039#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41#define BAM_MUX_HDR_MAGIC_NO 0x33fc
42
Eric Holmberg006057d2012-01-11 10:10:42 -070043#define BAM_MUX_HDR_CMD_DATA 0
44#define BAM_MUX_HDR_CMD_OPEN 1
45#define BAM_MUX_HDR_CMD_CLOSE 2
46#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
47#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
Jeff Hugo949080a2011-08-30 11:58:56 -060049#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
50#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
51#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070053#define LOW_WATERMARK 2
54#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
56static int msm_bam_dmux_debug_enable;
57module_param_named(debug_enable, msm_bam_dmux_debug_enable,
58 int, S_IRUGO | S_IWUSR | S_IWGRP);
59
60#if defined(DEBUG)
61static uint32_t bam_dmux_read_cnt;
62static uint32_t bam_dmux_write_cnt;
63static uint32_t bam_dmux_write_cpy_cnt;
64static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070065static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070066static uint32_t bam_dmux_tx_stall_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
68#define DBG(x...) do { \
69 if (msm_bam_dmux_debug_enable) \
70 pr_debug(x); \
71 } while (0)
72
73#define DBG_INC_READ_CNT(x) do { \
74 bam_dmux_read_cnt += (x); \
75 if (msm_bam_dmux_debug_enable) \
76 pr_debug("%s: total read bytes %u\n", \
77 __func__, bam_dmux_read_cnt); \
78 } while (0)
79
80#define DBG_INC_WRITE_CNT(x) do { \
81 bam_dmux_write_cnt += (x); \
82 if (msm_bam_dmux_debug_enable) \
83 pr_debug("%s: total written bytes %u\n", \
84 __func__, bam_dmux_write_cnt); \
85 } while (0)
86
87#define DBG_INC_WRITE_CPY(x) do { \
88 bam_dmux_write_cpy_bytes += (x); \
89 bam_dmux_write_cpy_cnt++; \
90 if (msm_bam_dmux_debug_enable) \
91 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
92 __func__, bam_dmux_write_cpy_cnt, \
93 bam_dmux_write_cpy_bytes); \
94 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070095
96#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
97 bam_dmux_tx_sps_failure_cnt++; \
98} while (0)
99
Eric Holmberg6074aba2012-01-18 17:59:44 -0700100#define DBG_INC_TX_STALL_CNT() do { \
101 bam_dmux_tx_stall_cnt++; \
102} while (0)
103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104#else
105#define DBG(x...) do { } while (0)
106#define DBG_INC_READ_CNT(x...) do { } while (0)
107#define DBG_INC_WRITE_CNT(x...) do { } while (0)
108#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700109#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700110#define DBG_INC_TX_STALL_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111#endif
112
113struct bam_ch_info {
114 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600115 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 void *priv;
117 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600118 struct platform_device *pdev;
119 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700120 int num_tx_pkts;
121 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122};
123
124struct tx_pkt_info {
125 struct sk_buff *skb;
126 dma_addr_t dma_address;
127 char is_cmd;
128 uint32_t len;
129 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600130 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700131 unsigned ts_sec;
132 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133};
134
135struct rx_pkt_info {
136 struct sk_buff *skb;
137 dma_addr_t dma_address;
138 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600139 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140};
141
142#define A2_NUM_PIPES 6
143#define A2_SUMMING_THRESHOLD 4096
144#define A2_DEFAULT_DESCRIPTORS 32
145#define A2_PHYS_BASE 0x124C2000
146#define A2_PHYS_SIZE 0x2000
147#define BUFFER_SIZE 2048
148#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600150static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151static struct sps_pipe *bam_tx_pipe;
152static struct sps_pipe *bam_rx_pipe;
153static struct sps_connect tx_connection;
154static struct sps_connect rx_connection;
155static struct sps_mem_buffer tx_desc_mem_buf;
156static struct sps_mem_buffer rx_desc_mem_buf;
157static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600158static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159
160static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
161static int bam_mux_initialized;
162
Jeff Hugo949080a2011-08-30 11:58:56 -0600163static int polling_mode;
164
165static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600166static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600167static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600168static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170struct bam_mux_hdr {
171 uint16_t magic_num;
172 uint8_t reserved;
173 uint8_t cmd;
174 uint8_t pad_len;
175 uint8_t ch_id;
176 uint16_t pkt_len;
177};
178
Jeff Hugod98b1082011-10-24 10:30:23 -0600179static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180static void bam_mux_write_done(struct work_struct *work);
181static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600182static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183
Jeff Hugo949080a2011-08-30 11:58:56 -0600184static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185
186static struct workqueue_struct *bam_mux_rx_workqueue;
187static struct workqueue_struct *bam_mux_tx_workqueue;
188
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600189/* A2 power collaspe */
190#define UL_TIMEOUT_DELAY 1000 /* in ms */
191static void toggle_apps_ack(void);
192static void reconnect_to_bam(void);
193static void disconnect_to_bam(void);
194static void ul_wakeup(void);
195static void ul_timeout(struct work_struct *work);
196static void vote_dfab(void);
197static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600198static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700199static void grab_wakelock(void);
200static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600201
202static int bam_is_connected;
203static DEFINE_MUTEX(wakeup_lock);
204static struct completion ul_wakeup_ack_completion;
205static struct completion bam_connection_completion;
206static struct delayed_work ul_timeout_work;
207static int ul_packet_written;
208static struct clk *dfab_clk;
209static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600210static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600211static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700212static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700213static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700214static int a2_pc_disabled;
215static DEFINE_MUTEX(dfab_status_lock);
216static int dfab_is_on;
217static int wait_for_dfab;
218static struct completion dfab_unvote_completion;
219static DEFINE_SPINLOCK(wakelock_reference_lock);
220static int wakelock_reference_count;
Eric Holmberg604ab252012-01-15 00:01:18 -0700221static struct delayed_work msm9615_bam_init_work;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600222/* End A2 power collaspe */
223
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600224/* subsystem restart */
225static int restart_notifier_cb(struct notifier_block *this,
226 unsigned long code,
227 void *data);
228
229static struct notifier_block restart_notifier = {
230 .notifier_call = restart_notifier_cb,
231};
232static int in_global_reset;
233/* end subsystem restart */
234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235#define bam_ch_is_open(x) \
236 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
237
238#define bam_ch_is_local_open(x) \
239 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
240
241#define bam_ch_is_remote_open(x) \
242 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
243
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600244#define bam_ch_is_in_reset(x) \
245 (bam_ch[(x)].status & BAM_CH_IN_RESET)
246
Eric Holmberg878923a2012-01-10 14:28:19 -0700247#define LOG_MESSAGE_MAX_SIZE 80
248struct kfifo bam_dmux_state_log;
249static uint32_t bam_dmux_state_logging_disabled;
250static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
251static int bam_dmux_uplink_vote;
252static int bam_dmux_power_state;
253
254
255#define DMUX_LOG_KERR(fmt...) \
256do { \
257 bam_dmux_log(fmt); \
258 pr_err(fmt); \
259} while (0)
260
261/**
262 * Log a state change along with a small message.
263 *
264 * Complete size of messsage is limited to @todo.
265 */
266static void bam_dmux_log(const char *fmt, ...)
267{
268 char buff[LOG_MESSAGE_MAX_SIZE];
269 unsigned long flags;
270 va_list arg_list;
271 unsigned long long t_now;
272 unsigned long nanosec_rem;
273 int len = 0;
274
275 if (bam_dmux_state_logging_disabled)
276 return;
277
278 t_now = sched_clock();
279 nanosec_rem = do_div(t_now, 1000000000U);
280
281 /*
282 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700283 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700284 * R: 1 = in global reset
285 * P: 1 = BAM is powered up
286 * A: 1 = BAM initialized and ready for data
287 *
288 * V: 1 = Uplink vote for power
289 * U: 1 = Uplink active
290 * W: 1 = Uplink Wait-for-ack
291 * A: 1 = Uplink ACK received
292 */
293 len += scnprintf(buff, sizeof(buff),
Eric Holmberg006057d2012-01-11 10:10:42 -0700294 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700295 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700296 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700297 in_global_reset ? 'R' : 'r',
298 bam_dmux_power_state ? 'P' : 'p',
299 bam_connection_is_active ? 'A' : 'a',
300 bam_dmux_uplink_vote ? 'V' : 'v',
301 bam_is_connected ? 'U' : 'u',
302 wait_for_ack ? 'W' : 'w',
303 ul_wakeup_ack_completion.done ? 'A' : 'a'
304 );
305
306 va_start(arg_list, fmt);
307 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
308 va_end(arg_list);
309 memset(buff + len, 0x0, sizeof(buff) - len);
310
311 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
312 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
313 char junk[LOG_MESSAGE_MAX_SIZE];
314 int ret;
315
316 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
317 if (ret != LOG_MESSAGE_MAX_SIZE) {
318 pr_err("%s: unable to empty log %d\n", __func__, ret);
319 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
320 flags);
321 return;
322 }
323 }
324 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
325 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
326}
327
328static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
329{
330 unsigned long long t_now;
331
332 t_now = sched_clock();
333 pkt->ts_nsec = do_div(t_now, 1000000000U);
334 pkt->ts_sec = (unsigned)t_now;
335}
336
337static inline void verify_tx_queue_is_empty(const char *func)
338{
339 unsigned long flags;
340 struct tx_pkt_info *info;
341 int reported = 0;
342
343 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
344 list_for_each_entry(info, &bam_tx_pool, list_node) {
345 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700346 bam_dmux_log("%s: tx pool not empty\n", func);
347 if (!in_global_reset)
348 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700349 reported = 1;
350 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700351 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
352 &info->list_node, info->ts_sec, info->ts_nsec);
353 if (!in_global_reset)
354 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
355 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700356 }
357 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
358}
359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360static void queue_rx(void)
361{
362 void *ptr;
363 struct rx_pkt_info *info;
364
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600365 if (in_global_reset)
366 return;
367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
Jeff Hugoe05bc222011-12-07 13:57:23 -0700369 if (!info) {
370 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
371 return;
372 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
374 INIT_WORK(&info->work, handle_bam_mux_cmd);
375
376 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
Jeff Hugo4ba22f92011-12-07 12:42:47 -0700377 if (info->skb == NULL) {
378 pr_err("%s: unable to alloc skb\n", __func__);
379 kfree(info);
380 return;
381 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382 ptr = skb_put(info->skb, BUFFER_SIZE);
Jeff Hugo949080a2011-08-30 11:58:56 -0600383
Jeff Hugoc9749932011-11-02 17:50:40 -0600384 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600385 list_add_tail(&info->list_node, &bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600386 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 /* need a way to handle error case */
389 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
390 DMA_FROM_DEVICE);
391 sps_transfer_one(bam_rx_pipe, info->dma_address,
Jeff Hugo33dbc002011-08-25 15:52:53 -0600392 BUFFER_SIZE, info,
393 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394}
395
396static void bam_mux_process_data(struct sk_buff *rx_skb)
397{
398 unsigned long flags;
399 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600400 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
402 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
403
404 rx_skb->data = (unsigned char *)(rx_hdr + 1);
405 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
406 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600407 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600409 event_data = (unsigned long)(rx_skb);
410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600412 if (bam_ch[rx_hdr->ch_id].notify)
413 bam_ch[rx_hdr->ch_id].notify(
414 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
415 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 else
417 dev_kfree_skb_any(rx_skb);
418 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
419
420 queue_rx();
421}
422
Eric Holmberg006057d2012-01-11 10:10:42 -0700423static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
424{
425 unsigned long flags;
426 int ret;
427
428 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
429 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
430 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
431 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
432 queue_rx();
433 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
434 if (ret)
435 pr_err("%s: platform_device_add() error: %d\n",
436 __func__, ret);
437}
438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439static void handle_bam_mux_cmd(struct work_struct *work)
440{
441 unsigned long flags;
442 struct bam_mux_hdr *rx_hdr;
443 struct rx_pkt_info *info;
444 struct sk_buff *rx_skb;
445
446 info = container_of(work, struct rx_pkt_info, work);
447 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600448 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 kfree(info);
450
451 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
452
453 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
454 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
455 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
456 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
457 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700458 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
459 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 " pad %d ch %d len %d\n", __func__,
461 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
462 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
463 dev_kfree_skb_any(rx_skb);
464 queue_rx();
465 return;
466 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700467
468 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700469 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
470 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700471 " pad %d ch %d len %d\n", __func__,
472 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
473 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
474 dev_kfree_skb_any(rx_skb);
475 queue_rx();
476 return;
477 }
478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 switch (rx_hdr->cmd) {
480 case BAM_MUX_HDR_CMD_DATA:
481 DBG_INC_READ_CNT(rx_hdr->pkt_len);
482 bam_mux_process_data(rx_skb);
483 break;
484 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700485 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700486 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700487 handle_bam_mux_cmd_open(rx_hdr);
488 dev_kfree_skb_any(rx_skb);
489 break;
490 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
491 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
492 rx_hdr->ch_id);
493
494 if (!a2_pc_disabled) {
495 a2_pc_disabled = 1;
496 schedule_delayed_work(&ul_timeout_work,
497 msecs_to_jiffies(UL_TIMEOUT_DELAY));
498 }
499
500 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600501 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 break;
503 case BAM_MUX_HDR_CMD_CLOSE:
504 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700505 bam_dmux_log("%s: closing cid %d\n", __func__,
506 rx_hdr->ch_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
508 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
509 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 queue_rx();
Jeff Hugo7960abd2011-08-02 15:39:38 -0600511 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
512 bam_ch[rx_hdr->ch_id].pdev =
513 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
514 if (!bam_ch[rx_hdr->ch_id].pdev)
515 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberge779dba2011-11-04 18:22:01 -0600516 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 break;
518 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700519 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
520 " reserved %d cmd %d pad %d ch %d len %d\n",
521 __func__, rx_hdr->magic_num, rx_hdr->reserved,
522 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
523 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 dev_kfree_skb_any(rx_skb);
525 queue_rx();
526 return;
527 }
528}
529
530static int bam_mux_write_cmd(void *data, uint32_t len)
531{
532 int rc;
533 struct tx_pkt_info *pkt;
534 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700535 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600537 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 if (pkt == NULL) {
539 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
540 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 return rc;
542 }
543
544 dma_address = dma_map_single(NULL, data, len,
545 DMA_TO_DEVICE);
546 if (!dma_address) {
547 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700548 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 return rc;
551 }
552 pkt->skb = (struct sk_buff *)(data);
553 pkt->len = len;
554 pkt->dma_address = dma_address;
555 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700556 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600557 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700558 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600559 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
561 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600562 if (rc) {
563 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600564 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700565 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700566 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600567 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700568 } else {
569 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600570 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600572 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 return rc;
574}
575
576static void bam_mux_write_done(struct work_struct *work)
577{
578 struct sk_buff *skb;
579 struct bam_mux_hdr *hdr;
580 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700581 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600582 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700583 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600585 if (in_global_reset)
586 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700589
590 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
591 info_expected = list_first_entry(&bam_tx_pool,
592 struct tx_pkt_info, list_node);
593 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700594 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700595
Eric Holmberg878923a2012-01-10 14:28:19 -0700596 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
597 " list_node=%p, ts=%u.%09lu\n",
598 __func__, bam_tx_pool.next, &info->list_node,
599 info->ts_sec, info->ts_nsec
600 );
601
602 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
603 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
604 &errant_pkt->list_node, errant_pkt->ts_sec,
605 errant_pkt->ts_nsec);
606
607 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700608 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
609 BUG();
610 }
611 list_del(&info->list_node);
612 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
613
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600614 if (info->is_cmd) {
615 kfree(info->skb);
616 kfree(info);
617 return;
618 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 skb = info->skb;
620 kfree(info);
621 hdr = (struct bam_mux_hdr *)skb->data;
622 DBG_INC_WRITE_CNT(skb->data_len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600623 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700624 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
625 bam_ch[hdr->ch_id].num_tx_pkts--;
626 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600627 if (bam_ch[hdr->ch_id].notify)
628 bam_ch[hdr->ch_id].notify(
629 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
630 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 else
632 dev_kfree_skb_any(skb);
633}
634
635int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
636{
637 int rc = 0;
638 struct bam_mux_hdr *hdr;
639 unsigned long flags;
640 struct sk_buff *new_skb = NULL;
641 dma_addr_t dma_address;
642 struct tx_pkt_info *pkt;
643
644 if (id >= BAM_DMUX_NUM_CHANNELS)
645 return -EINVAL;
646 if (!skb)
647 return -EINVAL;
648 if (!bam_mux_initialized)
649 return -ENODEV;
650
651 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
652 spin_lock_irqsave(&bam_ch[id].lock, flags);
653 if (!bam_ch_is_open(id)) {
654 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
655 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
656 return -ENODEV;
657 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700658
659 if (bam_ch[id].use_wm &&
660 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
661 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
662 pr_err("%s: watermark exceeded: %d\n", __func__, id);
663 return -EAGAIN;
664 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
666
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600667 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600668 if (!bam_is_connected) {
669 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600670 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600671 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600672 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600673 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600674
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 /* if skb do not have any tailroom for padding,
676 copy the skb into a new expanded skb */
677 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
678 /* revisit, probably dev_alloc_skb and memcpy is effecient */
679 new_skb = skb_copy_expand(skb, skb_headroom(skb),
680 4 - (skb->len & 0x3), GFP_ATOMIC);
681 if (new_skb == NULL) {
682 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600683 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684 }
685 dev_kfree_skb_any(skb);
686 skb = new_skb;
687 DBG_INC_WRITE_CPY(skb->len);
688 }
689
690 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
691
692 /* caller should allocate for hdr and padding
693 hdr is fine, padding is tricky */
694 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
695 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
696 hdr->reserved = 0;
697 hdr->ch_id = id;
698 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
699 if (skb->len & 0x3)
700 skb_put(skb, 4 - (skb->len & 0x3));
701
702 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
703
704 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
705 __func__, skb->data, skb->tail, skb->len,
706 hdr->pkt_len, hdr->pad_len);
707
708 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
709 if (pkt == NULL) {
710 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600711 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 }
713
714 dma_address = dma_map_single(NULL, skb->data, skb->len,
715 DMA_TO_DEVICE);
716 if (!dma_address) {
717 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600718 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 }
720 pkt->skb = skb;
721 pkt->dma_address = dma_address;
722 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700723 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700725 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600726 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
728 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600729 if (rc) {
730 DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600731 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700732 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700733 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600734 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700735 if (new_skb)
736 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700737 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700738 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700739 spin_lock_irqsave(&bam_ch[id].lock, flags);
740 bam_ch[id].num_tx_pkts++;
741 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600742 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600743 ul_packet_written = 1;
744 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600746
747write_fail3:
748 kfree(pkt);
749write_fail2:
750 if (new_skb)
751 dev_kfree_skb_any(new_skb);
752write_fail:
753 read_unlock(&ul_wakeup_lock);
754 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755}
756
757int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600758 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759{
760 struct bam_mux_hdr *hdr;
761 unsigned long flags;
762 int rc = 0;
763
764 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700765 if (!bam_mux_initialized) {
766 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700768 }
769 if (id >= BAM_DMUX_NUM_CHANNELS) {
770 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700772 }
773 if (notify == NULL) {
774 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600775 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700776 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777
778 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
779 if (hdr == NULL) {
780 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
781 return -ENOMEM;
782 }
783 spin_lock_irqsave(&bam_ch[id].lock, flags);
784 if (bam_ch_is_open(id)) {
785 DBG("%s: Already opened %d\n", __func__, id);
786 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
787 kfree(hdr);
788 goto open_done;
789 }
790 if (!bam_ch_is_remote_open(id)) {
791 DBG("%s: Remote not open; ch: %d\n", __func__, id);
792 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
793 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700794 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 }
796
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600797 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 bam_ch[id].priv = priv;
799 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700800 bam_ch[id].num_tx_pkts = 0;
801 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
803
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600804 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600805 if (!bam_is_connected) {
806 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600807 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600808 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600809 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600810 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
813 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
814 hdr->reserved = 0;
815 hdr->ch_id = id;
816 hdr->pkt_len = 0;
817 hdr->pad_len = 0;
818
819 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600820 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821
822open_done:
823 DBG("%s: opened ch %d\n", __func__, id);
824 return rc;
825}
826
827int msm_bam_dmux_close(uint32_t id)
828{
829 struct bam_mux_hdr *hdr;
830 unsigned long flags;
831 int rc;
832
833 if (id >= BAM_DMUX_NUM_CHANNELS)
834 return -EINVAL;
835 DBG("%s: closing ch %d\n", __func__, id);
836 if (!bam_mux_initialized)
837 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600839 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600840 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600841 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600842 ul_wakeup();
Jeff Hugo061ce672011-10-21 17:15:32 -0600843 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600844 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600845 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600846
Jeff Hugo061ce672011-10-21 17:15:32 -0600847 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600848 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 bam_ch[id].priv = NULL;
850 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
851 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
852
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600853 if (bam_ch_is_in_reset(id)) {
854 read_unlock(&ul_wakeup_lock);
855 bam_ch[id].status &= ~BAM_CH_IN_RESET;
856 return 0;
857 }
858
Jeff Hugobb5802f2011-11-02 17:10:29 -0600859 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 if (hdr == NULL) {
861 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600862 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 return -ENOMEM;
864 }
865 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
866 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
867 hdr->reserved = 0;
868 hdr->ch_id = id;
869 hdr->pkt_len = 0;
870 hdr->pad_len = 0;
871
872 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600873 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874
875 DBG("%s: closed ch %d\n", __func__, id);
876 return rc;
877}
878
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700879int msm_bam_dmux_is_ch_full(uint32_t id)
880{
881 unsigned long flags;
882 int ret;
883
884 if (id >= BAM_DMUX_NUM_CHANNELS)
885 return -EINVAL;
886
887 spin_lock_irqsave(&bam_ch[id].lock, flags);
888 bam_ch[id].use_wm = 1;
889 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
890 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
891 id, bam_ch[id].num_tx_pkts, ret);
892 if (!bam_ch_is_local_open(id)) {
893 ret = -ENODEV;
894 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
895 }
896 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
897
898 return ret;
899}
900
901int msm_bam_dmux_is_ch_low(uint32_t id)
902{
903 int ret;
904
905 if (id >= BAM_DMUX_NUM_CHANNELS)
906 return -EINVAL;
907
908 bam_ch[id].use_wm = 1;
909 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
910 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
911 id, bam_ch[id].num_tx_pkts, ret);
912 if (!bam_ch_is_local_open(id)) {
913 ret = -ENODEV;
914 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
915 }
916
917 return ret;
918}
919
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700920static void rx_switch_to_interrupt_mode(void)
921{
922 struct sps_connect cur_rx_conn;
923 struct sps_iovec iov;
924 struct rx_pkt_info *info;
925 int ret;
926
927 /*
928 * Attempt to enable interrupts - if this fails,
929 * continue polling and we will retry later.
930 */
931 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
932 if (ret) {
933 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
934 goto fail;
935 }
936
937 rx_register_event.options = SPS_O_EOT;
938 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
939 if (ret) {
940 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
941 goto fail;
942 }
943
944 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
945 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
946 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
947 if (ret) {
948 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
949 goto fail;
950 }
951 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -0700952 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700953
954 /* handle any rx packets before interrupt was enabled */
955 while (bam_connection_is_active && !polling_mode) {
956 ret = sps_get_iovec(bam_rx_pipe, &iov);
957 if (ret) {
958 pr_err("%s: sps_get_iovec failed %d\n",
959 __func__, ret);
960 break;
961 }
962 if (iov.addr == 0)
963 break;
964
965 mutex_lock(&bam_rx_pool_mutexlock);
966 if (unlikely(list_empty(&bam_rx_pool))) {
967 mutex_unlock(&bam_rx_pool_mutexlock);
968 continue;
969 }
970 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
971 list_node);
972 list_del(&info->list_node);
973 mutex_unlock(&bam_rx_pool_mutexlock);
974 handle_bam_mux_cmd(&info->work);
975 }
976 return;
977
978fail:
979 pr_err("%s: reverting to polling\n", __func__);
980 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
981}
982
Jeff Hugo949080a2011-08-30 11:58:56 -0600983static void rx_timer_work_func(struct work_struct *work)
984{
985 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -0600986 struct rx_pkt_info *info;
987 int inactive_cycles = 0;
988 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -0600989
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700990 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -0600991 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700992 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600993 if (in_global_reset)
994 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -0700995
996 ret = sps_get_iovec(bam_rx_pipe, &iov);
997 if (ret) {
998 pr_err("%s: sps_get_iovec failed %d\n",
999 __func__, ret);
1000 break;
1001 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001002 if (iov.addr == 0)
1003 break;
1004 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001005 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001006 if (unlikely(list_empty(&bam_rx_pool))) {
1007 mutex_unlock(&bam_rx_pool_mutexlock);
1008 continue;
1009 }
1010 info = list_first_entry(&bam_rx_pool,
1011 struct rx_pkt_info, list_node);
1012 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001013 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001014 handle_bam_mux_cmd(&info->work);
1015 }
1016
1017 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001018 rx_switch_to_interrupt_mode();
1019 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001020 }
1021
1022 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1023 }
1024}
1025
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026static void bam_mux_tx_notify(struct sps_event_notify *notify)
1027{
1028 struct tx_pkt_info *pkt;
1029
1030 DBG("%s: event %d notified\n", __func__, notify->event_id);
1031
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001032 if (in_global_reset)
1033 return;
1034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035 switch (notify->event_id) {
1036 case SPS_EVENT_EOT:
1037 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001038 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039 dma_unmap_single(NULL, pkt->dma_address,
1040 pkt->skb->len,
1041 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001042 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 dma_unmap_single(NULL, pkt->dma_address,
1044 pkt->len,
1045 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001046 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001047 break;
1048 default:
1049 pr_err("%s: recieved unexpected event id %d\n", __func__,
1050 notify->event_id);
1051 }
1052}
1053
Jeff Hugo33dbc002011-08-25 15:52:53 -06001054static void bam_mux_rx_notify(struct sps_event_notify *notify)
1055{
Jeff Hugo949080a2011-08-30 11:58:56 -06001056 int ret;
1057 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001058
1059 DBG("%s: event %d notified\n", __func__, notify->event_id);
1060
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001061 if (in_global_reset)
1062 return;
1063
Jeff Hugo33dbc002011-08-25 15:52:53 -06001064 switch (notify->event_id) {
1065 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001066 /* attempt to disable interrupts in this pipe */
1067 if (!polling_mode) {
1068 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1069 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001070 pr_err("%s: sps_get_config() failed %d, interrupts"
1071 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001072 break;
1073 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001074 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001075 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1076 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1077 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001078 pr_err("%s: sps_set_config() failed %d, interrupts"
1079 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001080 break;
1081 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001082 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001083 polling_mode = 1;
1084 queue_work(bam_mux_rx_workqueue, &rx_timer_work);
1085 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001086 break;
1087 default:
1088 pr_err("%s: recieved unexpected event id %d\n", __func__,
1089 notify->event_id);
1090 }
1091}
1092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093#ifdef CONFIG_DEBUG_FS
1094
1095static int debug_tbl(char *buf, int max)
1096{
1097 int i = 0;
1098 int j;
1099
1100 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1101 i += scnprintf(buf + i, max - i,
1102 "ch%02d local open=%s remote open=%s\n",
1103 j, bam_ch_is_local_open(j) ? "Y" : "N",
1104 bam_ch_is_remote_open(j) ? "Y" : "N");
1105 }
1106
1107 return i;
1108}
1109
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001110static int debug_ul_pkt_cnt(char *buf, int max)
1111{
1112 struct list_head *p;
1113 unsigned long flags;
1114 int n = 0;
1115
1116 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1117 __list_for_each(p, &bam_tx_pool) {
1118 ++n;
1119 }
1120 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1121
1122 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1123}
1124
1125static int debug_stats(char *buf, int max)
1126{
1127 int i = 0;
1128
1129 i += scnprintf(buf + i, max - i,
1130 "skb copy cnt: %u\n"
1131 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001132 "sps tx failures: %u\n"
1133 "sps tx stalls: %u\n",
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001134 bam_dmux_write_cpy_cnt,
1135 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001136 bam_dmux_tx_sps_failure_cnt,
1137 bam_dmux_tx_stall_cnt
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001138 );
1139
1140 return i;
1141}
1142
Eric Holmberg878923a2012-01-10 14:28:19 -07001143static int debug_log(char *buff, int max, loff_t *ppos)
1144{
1145 unsigned long flags;
1146 int i = 0;
1147
1148 if (bam_dmux_state_logging_disabled) {
1149 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1150 return i;
1151 }
1152
1153 if (*ppos == 0) {
1154 i += scnprintf(buff - i, max - i,
1155 "<DMUX> timestamp FLAGS [Message]\n"
1156 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001157 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001158 "\tR: 1 = in global reset\n"
1159 "\tP: 1 = BAM is powered up\n"
1160 "\tA: 1 = BAM initialized and ready for data\n"
1161 "\n"
1162 "\tV: 1 = Uplink vote for power\n"
1163 "\tU: 1 = Uplink active\n"
1164 "\tW: 1 = Uplink Wait-for-ack\n"
1165 "\tA: 1 = Uplink ACK received\n"
1166 );
1167 buff += i;
1168 }
1169
1170 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1171 while (kfifo_len(&bam_dmux_state_log)
1172 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1173 int k_len;
1174 k_len = kfifo_out(&bam_dmux_state_log,
1175 buff, LOG_MESSAGE_MAX_SIZE);
1176 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1177 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1178 break;
1179 }
1180
1181 /* keep non-null portion of string and add line break */
1182 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1183 buff += k_len;
1184 i += k_len;
1185 if (k_len && *(buff - 1) != '\n') {
1186 *buff++ = '\n';
1187 ++i;
1188 }
1189 }
1190 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1191
1192 return i;
1193}
1194
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195#define DEBUG_BUFMAX 4096
1196static char debug_buffer[DEBUG_BUFMAX];
1197
1198static ssize_t debug_read(struct file *file, char __user *buf,
1199 size_t count, loff_t *ppos)
1200{
1201 int (*fill)(char *buf, int max) = file->private_data;
1202 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1203 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1204}
1205
Eric Holmberg878923a2012-01-10 14:28:19 -07001206static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1207 size_t count, loff_t *ppos)
1208{
1209 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1210 char *buffer;
1211 int bsize;
1212
1213 buffer = kmalloc(count, GFP_KERNEL);
1214 if (!buffer)
1215 return -ENOMEM;
1216
1217 bsize = util_func(buffer, count, ppos);
1218
1219 if (bsize >= 0) {
1220 if (copy_to_user(buff, buffer, bsize)) {
1221 kfree(buffer);
1222 return -EFAULT;
1223 }
1224 *ppos += bsize;
1225 }
1226 kfree(buffer);
1227 return bsize;
1228}
1229
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230static int debug_open(struct inode *inode, struct file *file)
1231{
1232 file->private_data = inode->i_private;
1233 return 0;
1234}
1235
1236
1237static const struct file_operations debug_ops = {
1238 .read = debug_read,
1239 .open = debug_open,
1240};
1241
Eric Holmberg878923a2012-01-10 14:28:19 -07001242static const struct file_operations debug_ops_multiple = {
1243 .read = debug_read_multiple,
1244 .open = debug_open,
1245};
1246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247static void debug_create(const char *name, mode_t mode,
1248 struct dentry *dent,
1249 int (*fill)(char *buf, int max))
1250{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001251 struct dentry *file;
1252
1253 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1254 if (IS_ERR(file))
1255 pr_err("%s: debugfs create failed %d\n", __func__,
1256 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257}
1258
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001259static void debug_create_multiple(const char *name, mode_t mode,
1260 struct dentry *dent,
1261 int (*fill)(char *buf, int max, loff_t *ppos))
1262{
1263 struct dentry *file;
1264
1265 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1266 if (IS_ERR(file))
1267 pr_err("%s: debugfs create failed %d\n", __func__,
1268 (int)PTR_ERR(file));
1269}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270#endif
1271
Jeff Hugod98b1082011-10-24 10:30:23 -06001272static void notify_all(int event, unsigned long data)
1273{
1274 int i;
1275
1276 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001277 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001278 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001279 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1280 __func__, i, event, data);
1281 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001282 }
1283}
1284
1285static void kickoff_ul_wakeup_func(struct work_struct *work)
1286{
1287 read_lock(&ul_wakeup_lock);
1288 if (!bam_is_connected) {
1289 read_unlock(&ul_wakeup_lock);
1290 ul_wakeup();
1291 read_lock(&ul_wakeup_lock);
1292 ul_packet_written = 1;
1293 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1294 }
1295 read_unlock(&ul_wakeup_lock);
1296}
1297
1298void msm_bam_dmux_kickoff_ul_wakeup(void)
1299{
1300 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1301}
1302
Eric Holmberg878923a2012-01-10 14:28:19 -07001303static void power_vote(int vote)
1304{
1305 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1306 bam_dmux_uplink_vote, vote);
1307
1308 if (bam_dmux_uplink_vote == vote)
1309 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1310
1311 bam_dmux_uplink_vote = vote;
1312 if (vote)
1313 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1314 else
1315 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1316}
1317
Eric Holmberg454d9da2012-01-12 09:37:14 -07001318/*
1319 * @note: Must be called with ul_wakeup_lock locked.
1320 */
1321static inline void ul_powerdown(void)
1322{
1323 bam_dmux_log("%s: powerdown\n", __func__);
1324 verify_tx_queue_is_empty(__func__);
1325
1326 if (a2_pc_disabled) {
1327 wait_for_dfab = 1;
1328 INIT_COMPLETION(dfab_unvote_completion);
1329 release_wakelock();
1330 } else {
1331 wait_for_ack = 1;
1332 INIT_COMPLETION(ul_wakeup_ack_completion);
1333 power_vote(0);
1334 }
1335 bam_is_connected = 0;
1336 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1337}
1338
1339static inline void ul_powerdown_finish(void)
1340{
1341 if (a2_pc_disabled && wait_for_dfab) {
1342 unvote_dfab();
1343 complete_all(&dfab_unvote_completion);
1344 wait_for_dfab = 0;
1345 }
1346}
1347
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001348static void ul_timeout(struct work_struct *work)
1349{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001350 unsigned long flags;
1351 int ret;
1352
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001353 if (in_global_reset)
1354 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001355 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1356 if (!ret) { /* failed to grab lock, reschedule and bail */
1357 schedule_delayed_work(&ul_timeout_work,
1358 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1359 return;
1360 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001361 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001362 if (!ul_packet_written) {
1363 spin_lock(&bam_tx_pool_spinlock);
1364 if (!list_empty(&bam_tx_pool)) {
1365 struct tx_pkt_info *info;
1366
1367 info = list_first_entry(&bam_tx_pool,
1368 struct tx_pkt_info, list_node);
1369 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1370 __func__, info->ts_sec, info->ts_nsec);
1371 DBG_INC_TX_STALL_CNT();
1372 ul_packet_written = 1;
1373 }
1374 spin_unlock(&bam_tx_pool_spinlock);
1375 }
1376
Eric Holmberg454d9da2012-01-12 09:37:14 -07001377 if (ul_packet_written) {
1378 bam_dmux_log("%s: packet written\n", __func__);
1379 ul_packet_written = 0;
1380 schedule_delayed_work(&ul_timeout_work,
1381 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001382 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001383 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001384 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001385 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001386 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001387 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001388}
1389static void ul_wakeup(void)
1390{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001391 int ret;
Eric Holmberg006057d2012-01-11 10:10:42 -07001392 static int called_before;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001393
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001394 mutex_lock(&wakeup_lock);
1395 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001396 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001397 mutex_unlock(&wakeup_lock);
1398 return;
1399 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001400
Eric Holmberg006057d2012-01-11 10:10:42 -07001401 if (a2_pc_disabled) {
1402 /*
1403 * don't grab the wakelock the first time because it is
1404 * already grabbed when a2 powers on
1405 */
1406 if (likely(called_before))
1407 grab_wakelock();
1408 else
1409 called_before = 1;
1410 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001411 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001412 &dfab_unvote_completion, HZ);
1413 BUG_ON(ret == 0);
1414 }
1415 vote_dfab();
1416 schedule_delayed_work(&ul_timeout_work,
1417 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1418 bam_is_connected = 1;
1419 mutex_unlock(&wakeup_lock);
1420 return;
1421 }
1422
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001423 /*
1424 * must wait for the previous power down request to have been acked
1425 * chances are it already came in and this will just fall through
1426 * instead of waiting
1427 */
1428 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001429 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001430 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001431 &ul_wakeup_ack_completion, HZ);
1432 BUG_ON(ret == 0);
Eric Holmberg006057d2012-01-11 10:10:42 -07001433 wait_for_ack = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001434 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001435 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001436 power_vote(1);
1437 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001438 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001439 BUG_ON(ret == 0);
Eric Holmberg878923a2012-01-10 14:28:19 -07001440 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001441 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001442 BUG_ON(ret == 0);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001443
1444 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001445 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001446 schedule_delayed_work(&ul_timeout_work,
1447 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1448 mutex_unlock(&wakeup_lock);
1449}
1450
1451static void reconnect_to_bam(void)
1452{
1453 int i;
1454
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001455 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001456 vote_dfab();
1457 i = sps_device_reset(a2_device_handle);
1458 if (i)
1459 pr_err("%s: device reset failed rc = %d\n", __func__, i);
1460 i = sps_connect(bam_tx_pipe, &tx_connection);
1461 if (i)
1462 pr_err("%s: tx connection failed rc = %d\n", __func__, i);
1463 i = sps_connect(bam_rx_pipe, &rx_connection);
1464 if (i)
1465 pr_err("%s: rx connection failed rc = %d\n", __func__, i);
1466 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1467 if (i)
1468 pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
1469 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1470 if (i)
1471 pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001472
1473 bam_connection_is_active = 1;
1474
1475 if (polling_mode)
1476 rx_switch_to_interrupt_mode();
1477
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001478 for (i = 0; i < NUM_BUFFERS; ++i)
1479 queue_rx();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001480
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001481 toggle_apps_ack();
1482 complete_all(&bam_connection_completion);
1483}
1484
1485static void disconnect_to_bam(void)
1486{
1487 struct list_head *node;
1488 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001489 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001490
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001491 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001492
1493 /* handle disconnect during active UL */
1494 write_lock_irqsave(&ul_wakeup_lock, flags);
1495 if (bam_is_connected) {
1496 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1497 ul_powerdown();
1498 }
1499 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1500 ul_powerdown_finish();
1501
1502 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001503 INIT_COMPLETION(bam_connection_completion);
1504 sps_disconnect(bam_tx_pipe);
1505 sps_disconnect(bam_rx_pipe);
1506 unvote_dfab();
1507 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1508 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001509
1510 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001511 while (!list_empty(&bam_rx_pool)) {
1512 node = bam_rx_pool.next;
1513 list_del(node);
1514 info = container_of(node, struct rx_pkt_info, list_node);
1515 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1516 DMA_FROM_DEVICE);
1517 dev_kfree_skb_any(info->skb);
1518 kfree(info);
1519 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001520 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001521
1522 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001523}
1524
1525static void vote_dfab(void)
1526{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001527 int rc;
1528
Eric Holmberg006057d2012-01-11 10:10:42 -07001529 bam_dmux_log("%s\n", __func__);
1530 mutex_lock(&dfab_status_lock);
1531 if (dfab_is_on) {
1532 bam_dmux_log("%s: dfab is already on\n", __func__);
1533 mutex_unlock(&dfab_status_lock);
1534 return;
1535 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001536 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001537 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001538 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
1539 dfab_is_on = 1;
1540 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001541}
1542
1543static void unvote_dfab(void)
1544{
Eric Holmberg006057d2012-01-11 10:10:42 -07001545 bam_dmux_log("%s\n", __func__);
1546 mutex_lock(&dfab_status_lock);
1547 if (!dfab_is_on) {
1548 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1549 dump_stack();
1550 mutex_unlock(&dfab_status_lock);
1551 return;
1552 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001553 clk_disable_unprepare(dfab_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001554 dfab_is_on = 0;
1555 mutex_unlock(&dfab_status_lock);
1556}
1557
1558/* reference counting wrapper around wakelock */
1559static void grab_wakelock(void)
1560{
1561 unsigned long flags;
1562
1563 spin_lock_irqsave(&wakelock_reference_lock, flags);
1564 bam_dmux_log("%s: ref count = %d\n", __func__,
1565 wakelock_reference_count);
1566 if (wakelock_reference_count == 0)
1567 wake_lock(&bam_wakelock);
1568 ++wakelock_reference_count;
1569 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1570}
1571
1572static void release_wakelock(void)
1573{
1574 unsigned long flags;
1575
1576 spin_lock_irqsave(&wakelock_reference_lock, flags);
1577 if (wakelock_reference_count == 0) {
1578 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1579 dump_stack();
1580 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1581 return;
1582 }
1583 bam_dmux_log("%s: ref count = %d\n", __func__,
1584 wakelock_reference_count);
1585 --wakelock_reference_count;
1586 if (wakelock_reference_count == 0)
1587 wake_unlock(&bam_wakelock);
1588 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001589}
1590
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001591static int restart_notifier_cb(struct notifier_block *this,
1592 unsigned long code,
1593 void *data)
1594{
1595 int i;
1596 struct list_head *node;
1597 struct tx_pkt_info *info;
1598 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001599 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001600
1601 if (code != SUBSYS_AFTER_SHUTDOWN)
1602 return NOTIFY_DONE;
1603
Eric Holmberg878923a2012-01-10 14:28:19 -07001604 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001605 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001606
1607 /* Handle uplink Powerdown */
1608 write_lock_irqsave(&ul_wakeup_lock, flags);
1609 if (bam_is_connected) {
1610 ul_powerdown();
1611 wait_for_ack = 0;
1612 }
1613 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1614 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001615 a2_pc_disabled = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001616
1617 /* Cleanup Channel States */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001618 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1619 temp_remote_status = bam_ch_is_remote_open(i);
1620 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001621 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001622 if (bam_ch_is_local_open(i))
1623 bam_ch[i].status |= BAM_CH_IN_RESET;
1624 if (temp_remote_status) {
1625 platform_device_unregister(bam_ch[i].pdev);
1626 bam_ch[i].pdev = platform_device_alloc(
1627 bam_ch[i].name, 2);
1628 }
1629 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001630
1631 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001632 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001633 while (!list_empty(&bam_tx_pool)) {
1634 node = bam_tx_pool.next;
1635 list_del(node);
1636 info = container_of(node, struct tx_pkt_info,
1637 list_node);
1638 if (!info->is_cmd) {
1639 dma_unmap_single(NULL, info->dma_address,
1640 info->skb->len,
1641 DMA_TO_DEVICE);
1642 dev_kfree_skb_any(info->skb);
1643 } else {
1644 dma_unmap_single(NULL, info->dma_address,
1645 info->len,
1646 DMA_TO_DEVICE);
1647 kfree(info->skb);
1648 }
1649 kfree(info);
1650 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001651 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001652
Eric Holmberg878923a2012-01-10 14:28:19 -07001653 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001654 return NOTIFY_DONE;
1655}
1656
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001657static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658{
1659 u32 h;
1660 dma_addr_t dma_addr;
1661 int ret;
1662 void *a2_virt_addr;
1663 int i;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001664 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001666 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667 /* init BAM */
1668 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1669 if (!a2_virt_addr) {
1670 pr_err("%s: ioremap failed\n", __func__);
1671 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001672 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 }
1674 a2_props.phys_addr = A2_PHYS_BASE;
1675 a2_props.virt_addr = a2_virt_addr;
1676 a2_props.virt_size = A2_PHYS_SIZE;
1677 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001678 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 a2_props.num_pipes = A2_NUM_PIPES;
1680 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001681 if (cpu_is_msm9615())
1682 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 /* need to free on tear down */
1684 ret = sps_register_bam_device(&a2_props, &h);
1685 if (ret < 0) {
1686 pr_err("%s: register bam error %d\n", __func__, ret);
1687 goto register_bam_failed;
1688 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001689 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690
1691 bam_tx_pipe = sps_alloc_endpoint();
1692 if (bam_tx_pipe == NULL) {
1693 pr_err("%s: tx alloc endpoint failed\n", __func__);
1694 ret = -ENOMEM;
1695 goto register_bam_failed;
1696 }
1697 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1698 if (ret) {
1699 pr_err("%s: tx get config failed %d\n", __func__, ret);
1700 goto tx_get_config_failed;
1701 }
1702
1703 tx_connection.source = SPS_DEV_HANDLE_MEM;
1704 tx_connection.src_pipe_index = 0;
1705 tx_connection.destination = h;
1706 tx_connection.dest_pipe_index = 4;
1707 tx_connection.mode = SPS_MODE_DEST;
1708 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1709 tx_desc_mem_buf.size = 0x800; /* 2k */
1710 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1711 &dma_addr, 0);
1712 if (tx_desc_mem_buf.base == NULL) {
1713 pr_err("%s: tx memory alloc failed\n", __func__);
1714 ret = -ENOMEM;
1715 goto tx_mem_failed;
1716 }
1717 tx_desc_mem_buf.phys_base = dma_addr;
1718 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1719 tx_connection.desc = tx_desc_mem_buf;
1720 tx_connection.event_thresh = 0x10;
1721
1722 ret = sps_connect(bam_tx_pipe, &tx_connection);
1723 if (ret < 0) {
1724 pr_err("%s: tx connect error %d\n", __func__, ret);
1725 goto tx_connect_failed;
1726 }
1727
1728 bam_rx_pipe = sps_alloc_endpoint();
1729 if (bam_rx_pipe == NULL) {
1730 pr_err("%s: rx alloc endpoint failed\n", __func__);
1731 ret = -ENOMEM;
1732 goto tx_connect_failed;
1733 }
1734 ret = sps_get_config(bam_rx_pipe, &rx_connection);
1735 if (ret) {
1736 pr_err("%s: rx get config failed %d\n", __func__, ret);
1737 goto rx_get_config_failed;
1738 }
1739
1740 rx_connection.source = h;
1741 rx_connection.src_pipe_index = 5;
1742 rx_connection.destination = SPS_DEV_HANDLE_MEM;
1743 rx_connection.dest_pipe_index = 1;
1744 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06001745 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
1746 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001747 rx_desc_mem_buf.size = 0x800; /* 2k */
1748 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
1749 &dma_addr, 0);
1750 if (rx_desc_mem_buf.base == NULL) {
1751 pr_err("%s: rx memory alloc failed\n", __func__);
1752 ret = -ENOMEM;
1753 goto rx_mem_failed;
1754 }
1755 rx_desc_mem_buf.phys_base = dma_addr;
1756 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
1757 rx_connection.desc = rx_desc_mem_buf;
1758 rx_connection.event_thresh = 0x10;
1759
1760 ret = sps_connect(bam_rx_pipe, &rx_connection);
1761 if (ret < 0) {
1762 pr_err("%s: rx connect error %d\n", __func__, ret);
1763 goto rx_connect_failed;
1764 }
1765
1766 tx_register_event.options = SPS_O_EOT;
1767 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
1768 tx_register_event.xfer_done = NULL;
1769 tx_register_event.callback = bam_mux_tx_notify;
1770 tx_register_event.user = NULL;
1771 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
1772 if (ret < 0) {
1773 pr_err("%s: tx register event error %d\n", __func__, ret);
1774 goto rx_event_reg_failed;
1775 }
1776
Jeff Hugo33dbc002011-08-25 15:52:53 -06001777 rx_register_event.options = SPS_O_EOT;
1778 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
1779 rx_register_event.xfer_done = NULL;
1780 rx_register_event.callback = bam_mux_rx_notify;
1781 rx_register_event.user = NULL;
1782 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1783 if (ret < 0) {
1784 pr_err("%s: tx register event error %d\n", __func__, ret);
1785 goto rx_event_reg_failed;
1786 }
1787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 bam_mux_initialized = 1;
1789 for (i = 0; i < NUM_BUFFERS; ++i)
1790 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001791 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001792 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001793 complete_all(&bam_connection_completion);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001794 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795
1796rx_event_reg_failed:
1797 sps_disconnect(bam_rx_pipe);
1798rx_connect_failed:
1799 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
1800 rx_desc_mem_buf.phys_base);
1801rx_mem_failed:
1802 sps_disconnect(bam_tx_pipe);
1803rx_get_config_failed:
1804 sps_free_endpoint(bam_rx_pipe);
1805tx_connect_failed:
1806 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
1807 tx_desc_mem_buf.phys_base);
1808tx_get_config_failed:
1809 sps_free_endpoint(bam_tx_pipe);
1810tx_mem_failed:
1811 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001812 /*
1813 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
1814 * same handle below will cause a crash, so skip it if we've freed
1815 * the handle here.
1816 */
1817 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001818register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001819 if (!skip_iounmap)
1820 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07001821ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001822 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001823 return ret;
1824}
1825
1826static int bam_init_fallback(void)
1827{
1828 u32 h;
1829 int ret;
1830 void *a2_virt_addr;
1831
1832 unvote_dfab();
1833 /* init BAM */
1834 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1835 if (!a2_virt_addr) {
1836 pr_err("%s: ioremap failed\n", __func__);
1837 ret = -ENOMEM;
1838 goto ioremap_failed;
1839 }
1840 a2_props.phys_addr = A2_PHYS_BASE;
1841 a2_props.virt_addr = a2_virt_addr;
1842 a2_props.virt_size = A2_PHYS_SIZE;
1843 a2_props.irq = A2_BAM_IRQ;
1844 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
1845 a2_props.num_pipes = A2_NUM_PIPES;
1846 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
1847 if (cpu_is_msm9615())
1848 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
1849 ret = sps_register_bam_device(&a2_props, &h);
1850 if (ret < 0) {
1851 pr_err("%s: register bam error %d\n", __func__, ret);
1852 goto register_bam_failed;
1853 }
1854 a2_device_handle = h;
1855
1856 return 0;
1857
1858register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001859 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001860ioremap_failed:
1861 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862}
Jeff Hugoade1f842011-08-03 15:53:59 -06001863
Eric Holmberg604ab252012-01-15 00:01:18 -07001864static void msm9615_bam_init(struct work_struct *work)
1865{
1866 int ret = 0;
1867
1868 ret = bam_init();
1869 if (ret) {
1870 ret = bam_init_fallback();
1871 if (ret)
1872 pr_err("%s: bam init fallback failed: %d",
1873 __func__, ret);
1874 }
1875}
1876
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001877static void toggle_apps_ack(void)
1878{
1879 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07001880
1881 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
1882 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001883 smsm_change_state(SMSM_APPS_STATE,
1884 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
1885 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
1886 clear_bit = ~clear_bit;
1887}
1888
Jeff Hugoade1f842011-08-03 15:53:59 -06001889static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
1890{
Eric Holmberg878923a2012-01-10 14:28:19 -07001891 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
1892 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
1893 new_state);
1894
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001895 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001896 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001897 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001898 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001899 } else if (bam_mux_initialized &&
1900 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001901 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001902 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07001903 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001904 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001905 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001906 grab_wakelock();
Eric Holmberg604ab252012-01-15 00:01:18 -07001907 if (cpu_is_msm9615()) {
1908 /*
1909 * even though a2 has signaled it is ready via the
1910 * SMSM_A2_POWER_CONTROL bit, it has not yet
1911 * enabled the pipes as needed by sps_connect
1912 * in satallite mode. Add a short delay to give modem
1913 * time to enable the pipes.
1914 */
1915 schedule_delayed_work(&msm9615_bam_init_work,
1916 msecs_to_jiffies(100));
1917 } else {
1918 bam_init();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001919 }
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001920 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07001921 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06001922 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001923 }
Jeff Hugoade1f842011-08-03 15:53:59 -06001924
1925}
1926
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001927static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
1928 uint32_t new_state)
1929{
Eric Holmberg878923a2012-01-10 14:28:19 -07001930 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
1931 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001932 complete_all(&ul_wakeup_ack_completion);
1933}
1934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935static int bam_dmux_probe(struct platform_device *pdev)
1936{
1937 int rc;
1938
1939 DBG("%s probe called\n", __func__);
1940 if (bam_mux_initialized)
1941 return 0;
1942
Stephen Boyd1c51a492011-10-26 12:11:47 -07001943 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001944 if (IS_ERR(dfab_clk)) {
1945 pr_err("%s: did not get dfab clock\n", __func__);
1946 return -EFAULT;
1947 }
1948
1949 rc = clk_set_rate(dfab_clk, 64000000);
1950 if (rc)
1951 pr_err("%s: unable to set dfab clock rate\n", __func__);
1952
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 bam_mux_rx_workqueue = create_singlethread_workqueue("bam_dmux_rx");
1954 if (!bam_mux_rx_workqueue)
1955 return -ENOMEM;
1956
1957 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
1958 if (!bam_mux_tx_workqueue) {
1959 destroy_workqueue(bam_mux_rx_workqueue);
1960 return -ENOMEM;
1961 }
1962
Jeff Hugo7960abd2011-08-02 15:39:38 -06001963 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001964 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06001965 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
1966 "bam_dmux_ch_%d", rc);
1967 /* bus 2, ie a2 stream 2 */
1968 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
1969 if (!bam_ch[rc].pdev) {
1970 pr_err("%s: platform device alloc failed\n", __func__);
1971 destroy_workqueue(bam_mux_rx_workqueue);
1972 destroy_workqueue(bam_mux_tx_workqueue);
1973 return -ENOMEM;
1974 }
1975 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001977 init_completion(&ul_wakeup_ack_completion);
1978 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07001979 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001980 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Eric Holmberg604ab252012-01-15 00:01:18 -07001981 INIT_DELAYED_WORK(&msm9615_bam_init_work, msm9615_bam_init);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07001982 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001983
Jeff Hugoade1f842011-08-03 15:53:59 -06001984 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
1985 bam_dmux_smsm_cb, NULL);
1986
1987 if (rc) {
1988 destroy_workqueue(bam_mux_rx_workqueue);
1989 destroy_workqueue(bam_mux_tx_workqueue);
1990 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
1991 return -ENOMEM;
1992 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001994 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
1995 bam_dmux_smsm_ack_cb, NULL);
1996
1997 if (rc) {
1998 destroy_workqueue(bam_mux_rx_workqueue);
1999 destroy_workqueue(bam_mux_tx_workqueue);
2000 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2001 SMSM_A2_POWER_CONTROL,
2002 bam_dmux_smsm_cb, NULL);
2003 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2004 rc);
2005 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2006 platform_device_put(bam_ch[rc].pdev);
2007 return -ENOMEM;
2008 }
2009
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002010 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2011 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002013 return 0;
2014}
2015
2016static struct platform_driver bam_dmux_driver = {
2017 .probe = bam_dmux_probe,
2018 .driver = {
2019 .name = "BAM_RMNT",
2020 .owner = THIS_MODULE,
2021 },
2022};
2023
2024static int __init bam_dmux_init(void)
2025{
Eric Holmberg878923a2012-01-10 14:28:19 -07002026 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027#ifdef CONFIG_DEBUG_FS
2028 struct dentry *dent;
2029
2030 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002031 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002032 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002033 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2034 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002035 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002036 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002038 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2039 if (ret) {
2040 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2041 bam_dmux_state_logging_disabled = 1;
2042 }
2043
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002044 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045 return platform_driver_register(&bam_dmux_driver);
2046}
2047
Jeff Hugoade1f842011-08-03 15:53:59 -06002048late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002049MODULE_DESCRIPTION("MSM BAM DMUX");
2050MODULE_LICENSE("GPL v2");