blob: cbb9e370f9167e3f9ff3fd188f4bf73fda98a201 [file] [log] [blame]
Arun Kumar Neelakantam381cd542013-01-17 18:58:04 +05301/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Jeff Hugo7bf02052012-08-21 14:08:20 -060030#include <linux/of.h>
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +053031#include <mach/msm_ipc_logging.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032#include <mach/sps.h>
33#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060034#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060035#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070036#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070037#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
39#define BAM_CH_LOCAL_OPEN 0x1
40#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060041#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
43#define BAM_MUX_HDR_MAGIC_NO 0x33fc
44
Eric Holmberg006057d2012-01-11 10:10:42 -070045#define BAM_MUX_HDR_CMD_DATA 0
46#define BAM_MUX_HDR_CMD_OPEN 1
47#define BAM_MUX_HDR_CMD_CLOSE 2
48#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
49#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070052#define LOW_WATERMARK 2
53#define HIGH_WATERMARK 4
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070054#define DEFAULT_POLLING_MIN_SLEEP (950)
55#define MAX_POLLING_SLEEP (6050)
56#define MIN_POLLING_SLEEP (950)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
58static int msm_bam_dmux_debug_enable;
59module_param_named(debug_enable, msm_bam_dmux_debug_enable,
60 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070061static int POLLING_MIN_SLEEP = 950;
62module_param_named(min_sleep, POLLING_MIN_SLEEP,
63 int, S_IRUGO | S_IWUSR | S_IWGRP);
64static int POLLING_MAX_SLEEP = 1050;
65module_param_named(max_sleep, POLLING_MAX_SLEEP,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67static int POLLING_INACTIVITY = 40;
68module_param_named(inactivity, POLLING_INACTIVITY,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70static int bam_adaptive_timer_enabled = 1;
71module_param_named(adaptive_timer_enabled,
72 bam_adaptive_timer_enabled,
73 int, S_IRUGO | S_IWUSR | S_IWGRP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
75#if defined(DEBUG)
76static uint32_t bam_dmux_read_cnt;
77static uint32_t bam_dmux_write_cnt;
78static uint32_t bam_dmux_write_cpy_cnt;
79static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070080static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070081static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070082static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
83static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
84static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085
86#define DBG(x...) do { \
87 if (msm_bam_dmux_debug_enable) \
88 pr_debug(x); \
89 } while (0)
90
91#define DBG_INC_READ_CNT(x) do { \
92 bam_dmux_read_cnt += (x); \
93 if (msm_bam_dmux_debug_enable) \
94 pr_debug("%s: total read bytes %u\n", \
95 __func__, bam_dmux_read_cnt); \
96 } while (0)
97
98#define DBG_INC_WRITE_CNT(x) do { \
99 bam_dmux_write_cnt += (x); \
100 if (msm_bam_dmux_debug_enable) \
101 pr_debug("%s: total written bytes %u\n", \
102 __func__, bam_dmux_write_cnt); \
103 } while (0)
104
105#define DBG_INC_WRITE_CPY(x) do { \
106 bam_dmux_write_cpy_bytes += (x); \
107 bam_dmux_write_cpy_cnt++; \
108 if (msm_bam_dmux_debug_enable) \
109 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
110 __func__, bam_dmux_write_cpy_cnt, \
111 bam_dmux_write_cpy_bytes); \
112 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700113
114#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
115 bam_dmux_tx_sps_failure_cnt++; \
116} while (0)
117
Eric Holmberg6074aba2012-01-18 17:59:44 -0700118#define DBG_INC_TX_STALL_CNT() do { \
119 bam_dmux_tx_stall_cnt++; \
120} while (0)
121
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700122#define DBG_INC_ACK_OUT_CNT() \
123 atomic_inc(&bam_dmux_ack_out_cnt)
124
125#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
126 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
127
128#define DBG_INC_ACK_IN_CNT() \
129 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130#else
131#define DBG(x...) do { } while (0)
132#define DBG_INC_READ_CNT(x...) do { } while (0)
133#define DBG_INC_WRITE_CNT(x...) do { } while (0)
134#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700135#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700136#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700137#define DBG_INC_ACK_OUT_CNT() do { } while (0)
138#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
139 do { } while (0)
140#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141#endif
142
143struct bam_ch_info {
144 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600145 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 void *priv;
147 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600148 struct platform_device *pdev;
149 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700150 int num_tx_pkts;
151 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152};
153
154struct tx_pkt_info {
155 struct sk_buff *skb;
156 dma_addr_t dma_address;
157 char is_cmd;
158 uint32_t len;
159 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600160 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700161 unsigned ts_sec;
162 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163};
164
165struct rx_pkt_info {
166 struct sk_buff *skb;
167 dma_addr_t dma_address;
168 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600169 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170};
171
172#define A2_NUM_PIPES 6
173#define A2_SUMMING_THRESHOLD 4096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174#define A2_PHYS_BASE 0x124C2000
175#define A2_PHYS_SIZE 0x2000
176#define BUFFER_SIZE 2048
Jeff Hugoc8058f82013-03-27 12:44:20 -0600177#define DEFAULT_NUM_BUFFERS 32
Jeff Hugo7bf02052012-08-21 14:08:20 -0600178
179#ifndef A2_BAM_IRQ
180#define A2_BAM_IRQ -1
181#endif
182
183static void *a2_phys_base;
184static uint32_t a2_phys_size;
185static int a2_bam_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600187static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188static struct sps_pipe *bam_tx_pipe;
189static struct sps_pipe *bam_rx_pipe;
190static struct sps_connect tx_connection;
191static struct sps_connect rx_connection;
192static struct sps_mem_buffer tx_desc_mem_buf;
193static struct sps_mem_buffer rx_desc_mem_buf;
194static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600195static struct sps_register_event rx_register_event;
Jeff Hugo0682dad2012-10-22 11:34:28 -0600196static bool satellite_mode;
Jeff Hugoc8058f82013-03-27 12:44:20 -0600197static uint32_t num_buffers;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198
199static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
200static int bam_mux_initialized;
201
Jeff Hugo949080a2011-08-30 11:58:56 -0600202static int polling_mode;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -0700203static unsigned long rx_timer_interval;
Jeff Hugo949080a2011-08-30 11:58:56 -0600204
205static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600206static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700207static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600208static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600209static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600210static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212struct bam_mux_hdr {
213 uint16_t magic_num;
214 uint8_t reserved;
215 uint8_t cmd;
216 uint8_t pad_len;
217 uint8_t ch_id;
218 uint16_t pkt_len;
219};
220
Jeff Hugod98b1082011-10-24 10:30:23 -0600221static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222static void bam_mux_write_done(struct work_struct *work);
223static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600224static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225
Jeff Hugo949080a2011-08-30 11:58:56 -0600226static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600227static struct delayed_work queue_rx_work;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228
229static struct workqueue_struct *bam_mux_rx_workqueue;
230static struct workqueue_struct *bam_mux_tx_workqueue;
231
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600232/* A2 power collaspe */
233#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600234#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600235static void toggle_apps_ack(void);
236static void reconnect_to_bam(void);
237static void disconnect_to_bam(void);
238static void ul_wakeup(void);
239static void ul_timeout(struct work_struct *work);
240static void vote_dfab(void);
241static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600242static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700243static void grab_wakelock(void);
244static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600245
246static int bam_is_connected;
247static DEFINE_MUTEX(wakeup_lock);
248static struct completion ul_wakeup_ack_completion;
249static struct completion bam_connection_completion;
250static struct delayed_work ul_timeout_work;
251static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700252static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800253static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600254static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600255static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600256static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700257static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700258static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700259static int a2_pc_disabled;
260static DEFINE_MUTEX(dfab_status_lock);
261static int dfab_is_on;
262static int wait_for_dfab;
263static struct completion dfab_unvote_completion;
264static DEFINE_SPINLOCK(wakelock_reference_lock);
265static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700266static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600267static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600268static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600269static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600270static DEFINE_MUTEX(delayed_ul_vote_lock);
271static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600272static int power_management_only_mode;
Jeff Hugo73f356f2012-12-14 17:56:19 -0700273static int in_ssr;
274static int ssr_skipped_disconnect;
Jeff Hugocb798022012-04-09 14:55:40 -0600275
276struct outside_notify_func {
277 void (*notify)(void *, int, unsigned long);
278 void *priv;
279 struct list_head list_node;
280};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600281/* End A2 power collaspe */
282
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600283/* subsystem restart */
284static int restart_notifier_cb(struct notifier_block *this,
285 unsigned long code,
286 void *data);
287
288static struct notifier_block restart_notifier = {
289 .notifier_call = restart_notifier_cb,
290};
291static int in_global_reset;
292/* end subsystem restart */
293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294#define bam_ch_is_open(x) \
295 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
296
297#define bam_ch_is_local_open(x) \
298 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
299
300#define bam_ch_is_remote_open(x) \
301 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
302
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600303#define bam_ch_is_in_reset(x) \
304 (bam_ch[(x)].status & BAM_CH_IN_RESET)
305
Eric Holmberg878923a2012-01-10 14:28:19 -0700306struct kfifo bam_dmux_state_log;
Eric Holmberg878923a2012-01-10 14:28:19 -0700307static int bam_dmux_uplink_vote;
308static int bam_dmux_power_state;
309
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +0530310static void *bam_ipc_log_txt;
311
312#define BAM_IPC_LOG_PAGES 5
313
Eric Holmberg878923a2012-01-10 14:28:19 -0700314/**
315 * Log a state change along with a small message.
Eric Holmberg878923a2012-01-10 14:28:19 -0700316 * Complete size of messsage is limited to @todo.
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530317 * Logging is done using IPC Logging infrastructure.
318 *
319 * States
320 * D: 1 = Power collapse disabled
321 * R: 1 = in global reset
322 * P: 1 = BAM is powered up
323 * A: 1 = BAM initialized and ready for data
324 * V: 1 = Uplink vote for power
325 * U: 1 = Uplink active
326 * W: 1 = Uplink Wait-for-ack
327 * A: 1 = Uplink ACK received
328 * #: >=1 On-demand uplink vote
329 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700330 */
Eric Holmberg878923a2012-01-10 14:28:19 -0700331
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530332#define BAM_DMUX_LOG(fmt, args...) \
333do { \
334 if (bam_ipc_log_txt) { \
335 ipc_log_string(bam_ipc_log_txt, \
336 "<DMUX> %c%c%c%c %c%c%c%c%d%c " fmt, \
337 a2_pc_disabled ? 'D' : 'd', \
338 in_global_reset ? 'R' : 'r', \
339 bam_dmux_power_state ? 'P' : 'p', \
340 bam_connection_is_active ? 'A' : 'a', \
341 bam_dmux_uplink_vote ? 'V' : 'v', \
342 bam_is_connected ? 'U' : 'u', \
343 wait_for_ack ? 'W' : 'w', \
344 ul_wakeup_ack_completion.done ? 'A' : 'a', \
345 atomic_read(&ul_ondemand_vote), \
346 disconnect_ack ? 'D' : 'd', \
347 args); \
348 } \
349} while (0)
Eric Holmberg878923a2012-01-10 14:28:19 -0700350
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530351#define DMUX_LOG_KERR(fmt, args...) \
352do { \
353 BAM_DMUX_LOG(fmt, args); \
354 pr_err(fmt, args); \
355} while (0)
Eric Holmberg878923a2012-01-10 14:28:19 -0700356
357static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
358{
359 unsigned long long t_now;
360
361 t_now = sched_clock();
362 pkt->ts_nsec = do_div(t_now, 1000000000U);
363 pkt->ts_sec = (unsigned)t_now;
364}
365
366static inline void verify_tx_queue_is_empty(const char *func)
367{
368 unsigned long flags;
369 struct tx_pkt_info *info;
370 int reported = 0;
371
372 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
373 list_for_each_entry(info, &bam_tx_pool, list_node) {
374 if (!reported) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530375 BAM_DMUX_LOG("%s: tx pool not empty\n", func);
Eric Holmberg454d9da2012-01-12 09:37:14 -0700376 if (!in_global_reset)
377 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700378 reported = 1;
379 }
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530380 BAM_DMUX_LOG("%s: node=%p ts=%u.%09lu\n", __func__,
Eric Holmberg454d9da2012-01-12 09:37:14 -0700381 &info->list_node, info->ts_sec, info->ts_nsec);
382 if (!in_global_reset)
383 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
384 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700385 }
386 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
387}
388
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389static void queue_rx(void)
390{
391 void *ptr;
392 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700393 int ret;
394 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600395
Jeff Hugoc9749932011-11-02 17:50:40 -0600396 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700397 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600398 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600399
Jeff Hugoc8058f82013-03-27 12:44:20 -0600400 while (bam_connection_is_active && rx_len_cached < num_buffers) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700401 if (in_global_reset)
402 goto fail;
403
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600404 info = kmalloc(sizeof(struct rx_pkt_info),
405 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700406 if (!info) {
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600407 DMUX_LOG_KERR(
408 "%s: unable to alloc rx_pkt_info, will retry later\n",
409 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700410 goto fail;
411 }
412
413 INIT_WORK(&info->work, handle_bam_mux_cmd);
414
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600415 info->skb = __dev_alloc_skb(BUFFER_SIZE,
416 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700417 if (info->skb == NULL) {
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600418 DMUX_LOG_KERR(
419 "%s: unable to alloc skb, will retry later\n",
420 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700421 goto fail_info;
422 }
423 ptr = skb_put(info->skb, BUFFER_SIZE);
424
425 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
426 DMA_FROM_DEVICE);
427 if (info->dma_address == 0 || info->dma_address == ~0) {
428 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
429 __func__, (void *)info->dma_address, ptr);
430 goto fail_skb;
431 }
432
433 mutex_lock(&bam_rx_pool_mutexlock);
434 list_add_tail(&info->list_node, &bam_rx_pool);
435 rx_len_cached = ++bam_rx_pool_len;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700436 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
437 BUFFER_SIZE, info,
438 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700439 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600440 list_del(&info->list_node);
441 rx_len_cached = --bam_rx_pool_len;
442 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700443 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
444 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600445
446 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
447 DMA_FROM_DEVICE);
448
449 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700450 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600451 mutex_unlock(&bam_rx_pool_mutexlock);
452
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700453 }
454 return;
455
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700456fail_skb:
457 dev_kfree_skb_any(info->skb);
458
459fail_info:
460 kfree(info);
461
462fail:
Arun Kumar Neelakantamd932d032012-12-13 18:06:49 +0530463 if (rx_len_cached == 0 && !in_global_reset) {
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600464 DMUX_LOG_KERR("%s: rescheduling\n", __func__);
465 schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700466 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467}
468
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600469static void queue_rx_work_func(struct work_struct *work)
470{
471 queue_rx();
472}
473
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474static void bam_mux_process_data(struct sk_buff *rx_skb)
475{
476 unsigned long flags;
477 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600478 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479
480 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
481
482 rx_skb->data = (unsigned char *)(rx_hdr + 1);
483 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
484 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600485 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600487 event_data = (unsigned long)(rx_skb);
488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600490 if (bam_ch[rx_hdr->ch_id].notify)
491 bam_ch[rx_hdr->ch_id].notify(
492 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
493 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 else
495 dev_kfree_skb_any(rx_skb);
496 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
497
498 queue_rx();
499}
500
Eric Holmberg006057d2012-01-11 10:10:42 -0700501static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
502{
503 unsigned long flags;
504 int ret;
505
Eric Holmberga623da82012-07-12 09:37:09 -0600506 mutex_lock(&bam_pdev_mutexlock);
507 if (in_global_reset) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530508 BAM_DMUX_LOG("%s: open cid %d aborted due to ssr\n",
Eric Holmberga623da82012-07-12 09:37:09 -0600509 __func__, rx_hdr->ch_id);
510 mutex_unlock(&bam_pdev_mutexlock);
511 queue_rx();
512 return;
513 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700514 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
515 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
516 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
517 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700518 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
519 if (ret)
520 pr_err("%s: platform_device_add() error: %d\n",
521 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600522 mutex_unlock(&bam_pdev_mutexlock);
523 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700524}
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static void handle_bam_mux_cmd(struct work_struct *work)
527{
528 unsigned long flags;
529 struct bam_mux_hdr *rx_hdr;
530 struct rx_pkt_info *info;
531 struct sk_buff *rx_skb;
532
533 info = container_of(work, struct rx_pkt_info, work);
534 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600535 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 kfree(info);
537
538 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
539
540 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
541 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
542 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
543 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
544 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700545 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
546 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 " pad %d ch %d len %d\n", __func__,
548 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
549 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
550 dev_kfree_skb_any(rx_skb);
551 queue_rx();
552 return;
553 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700554
555 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700556 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
557 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700558 " pad %d ch %d len %d\n", __func__,
559 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
560 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
561 dev_kfree_skb_any(rx_skb);
562 queue_rx();
563 return;
564 }
565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 switch (rx_hdr->cmd) {
567 case BAM_MUX_HDR_CMD_DATA:
568 DBG_INC_READ_CNT(rx_hdr->pkt_len);
569 bam_mux_process_data(rx_skb);
570 break;
571 case BAM_MUX_HDR_CMD_OPEN:
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530572 BAM_DMUX_LOG("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700573 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700574 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600575 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530576 BAM_DMUX_LOG("%s: deactivating disconnect ack\n",
Jeff Hugod7d2b062012-07-24 14:29:56 -0600577 __func__);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600578 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600579 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700580 dev_kfree_skb_any(rx_skb);
581 break;
582 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530583 BAM_DMUX_LOG("%s: opening cid %d PC disabled\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -0700584 rx_hdr->ch_id);
585
586 if (!a2_pc_disabled) {
587 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700588 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700589 }
590
591 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600592 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 break;
594 case BAM_MUX_HDR_CMD_CLOSE:
595 /* probably should drop pending write */
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530596 BAM_DMUX_LOG("%s: closing cid %d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700597 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600598 mutex_lock(&bam_pdev_mutexlock);
599 if (in_global_reset) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +0530600 BAM_DMUX_LOG("%s: close cid %d aborted due to ssr\n",
Eric Holmberga623da82012-07-12 09:37:09 -0600601 __func__, rx_hdr->ch_id);
602 mutex_unlock(&bam_pdev_mutexlock);
603 break;
604 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
606 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
607 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600608 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
609 bam_ch[rx_hdr->ch_id].pdev =
610 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
611 if (!bam_ch[rx_hdr->ch_id].pdev)
612 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600613 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600614 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600615 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 break;
617 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700618 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
619 " reserved %d cmd %d pad %d ch %d len %d\n",
620 __func__, rx_hdr->magic_num, rx_hdr->reserved,
621 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
622 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 dev_kfree_skb_any(rx_skb);
624 queue_rx();
625 return;
626 }
627}
628
629static int bam_mux_write_cmd(void *data, uint32_t len)
630{
631 int rc;
632 struct tx_pkt_info *pkt;
633 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700634 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600636 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 if (pkt == NULL) {
638 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
639 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 return rc;
641 }
642
643 dma_address = dma_map_single(NULL, data, len,
644 DMA_TO_DEVICE);
645 if (!dma_address) {
646 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700647 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 return rc;
650 }
651 pkt->skb = (struct sk_buff *)(data);
652 pkt->len = len;
653 pkt->dma_address = dma_address;
654 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700655 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600656 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700657 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600658 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
660 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600661 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700662 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
663 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600664 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700665 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700666 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700667 dma_unmap_single(NULL, pkt->dma_address,
668 pkt->len,
669 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600670 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700671 } else {
672 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600673 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600675 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676 return rc;
677}
678
679static void bam_mux_write_done(struct work_struct *work)
680{
681 struct sk_buff *skb;
682 struct bam_mux_hdr *hdr;
683 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700684 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600685 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700686 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600688 if (in_global_reset)
689 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700692
693 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
694 info_expected = list_first_entry(&bam_tx_pool,
695 struct tx_pkt_info, list_node);
696 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700697 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700698
Eric Holmberg878923a2012-01-10 14:28:19 -0700699 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
700 " list_node=%p, ts=%u.%09lu\n",
701 __func__, bam_tx_pool.next, &info->list_node,
702 info->ts_sec, info->ts_nsec
703 );
704
705 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
706 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
707 &errant_pkt->list_node, errant_pkt->ts_sec,
708 errant_pkt->ts_nsec);
709
710 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700711 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
712 BUG();
713 }
714 list_del(&info->list_node);
715 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
716
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600717 if (info->is_cmd) {
718 kfree(info->skb);
719 kfree(info);
720 return;
721 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 skb = info->skb;
723 kfree(info);
724 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700725 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600726 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700727 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
728 bam_ch[hdr->ch_id].num_tx_pkts--;
729 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600730 if (bam_ch[hdr->ch_id].notify)
731 bam_ch[hdr->ch_id].notify(
732 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
733 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 else
735 dev_kfree_skb_any(skb);
736}
737
738int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
739{
740 int rc = 0;
741 struct bam_mux_hdr *hdr;
742 unsigned long flags;
743 struct sk_buff *new_skb = NULL;
744 dma_addr_t dma_address;
745 struct tx_pkt_info *pkt;
746
747 if (id >= BAM_DMUX_NUM_CHANNELS)
748 return -EINVAL;
749 if (!skb)
750 return -EINVAL;
751 if (!bam_mux_initialized)
752 return -ENODEV;
753
754 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
755 spin_lock_irqsave(&bam_ch[id].lock, flags);
756 if (!bam_ch_is_open(id)) {
757 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
758 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
759 return -ENODEV;
760 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700761
762 if (bam_ch[id].use_wm &&
763 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
764 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
765 pr_err("%s: watermark exceeded: %d\n", __func__, id);
766 return -EAGAIN;
767 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
769
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600770 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600771 if (!bam_is_connected) {
772 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600773 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700774 if (unlikely(in_global_reset == 1))
775 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600776 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600777 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600778 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780 /* if skb do not have any tailroom for padding,
781 copy the skb into a new expanded skb */
782 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
783 /* revisit, probably dev_alloc_skb and memcpy is effecient */
784 new_skb = skb_copy_expand(skb, skb_headroom(skb),
785 4 - (skb->len & 0x3), GFP_ATOMIC);
786 if (new_skb == NULL) {
787 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600788 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 }
790 dev_kfree_skb_any(skb);
791 skb = new_skb;
792 DBG_INC_WRITE_CPY(skb->len);
793 }
794
795 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
796
797 /* caller should allocate for hdr and padding
798 hdr is fine, padding is tricky */
799 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
800 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
801 hdr->reserved = 0;
802 hdr->ch_id = id;
803 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
804 if (skb->len & 0x3)
805 skb_put(skb, 4 - (skb->len & 0x3));
806
807 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
808
809 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
810 __func__, skb->data, skb->tail, skb->len,
811 hdr->pkt_len, hdr->pad_len);
812
813 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
814 if (pkt == NULL) {
815 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600816 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 }
818
819 dma_address = dma_map_single(NULL, skb->data, skb->len,
820 DMA_TO_DEVICE);
821 if (!dma_address) {
822 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600823 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824 }
825 pkt->skb = skb;
826 pkt->dma_address = dma_address;
827 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700828 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700830 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600831 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
833 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600834 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700835 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
836 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600837 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700838 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700839 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700840 dma_unmap_single(NULL, pkt->dma_address,
841 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600842 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700843 if (new_skb)
844 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700845 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700846 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700847 spin_lock_irqsave(&bam_ch[id].lock, flags);
848 bam_ch[id].num_tx_pkts++;
849 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600850 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600851 ul_packet_written = 1;
852 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600854
855write_fail3:
856 kfree(pkt);
857write_fail2:
Arun Kumar Neelakantam381cd542013-01-17 18:58:04 +0530858 skb_pull(skb, sizeof(struct bam_mux_hdr));
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600859 if (new_skb)
860 dev_kfree_skb_any(new_skb);
861write_fail:
862 read_unlock(&ul_wakeup_lock);
863 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864}
865
866int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600867 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868{
869 struct bam_mux_hdr *hdr;
870 unsigned long flags;
871 int rc = 0;
872
873 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700874 if (!bam_mux_initialized) {
875 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700877 }
878 if (id >= BAM_DMUX_NUM_CHANNELS) {
879 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700881 }
882 if (notify == NULL) {
883 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600884 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700885 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886
887 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
888 if (hdr == NULL) {
889 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
890 return -ENOMEM;
891 }
892 spin_lock_irqsave(&bam_ch[id].lock, flags);
893 if (bam_ch_is_open(id)) {
894 DBG("%s: Already opened %d\n", __func__, id);
895 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
896 kfree(hdr);
897 goto open_done;
898 }
899 if (!bam_ch_is_remote_open(id)) {
900 DBG("%s: Remote not open; ch: %d\n", __func__, id);
901 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
902 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700903 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 }
905
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600906 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 bam_ch[id].priv = priv;
908 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700909 bam_ch[id].num_tx_pkts = 0;
910 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
912
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600913 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600914 if (!bam_is_connected) {
915 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600916 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700917 if (unlikely(in_global_reset == 1))
918 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600919 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600920 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600921 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
924 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
925 hdr->reserved = 0;
926 hdr->ch_id = id;
927 hdr->pkt_len = 0;
928 hdr->pad_len = 0;
929
930 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600931 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932
933open_done:
934 DBG("%s: opened ch %d\n", __func__, id);
935 return rc;
936}
937
938int msm_bam_dmux_close(uint32_t id)
939{
940 struct bam_mux_hdr *hdr;
941 unsigned long flags;
942 int rc;
943
944 if (id >= BAM_DMUX_NUM_CHANNELS)
945 return -EINVAL;
946 DBG("%s: closing ch %d\n", __func__, id);
947 if (!bam_mux_initialized)
948 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600950 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600951 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600952 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600953 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700954 if (unlikely(in_global_reset == 1))
955 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600956 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600957 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600958 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600959
Jeff Hugo061ce672011-10-21 17:15:32 -0600960 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600961 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962 bam_ch[id].priv = NULL;
963 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
964 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
965
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600966 if (bam_ch_is_in_reset(id)) {
967 read_unlock(&ul_wakeup_lock);
968 bam_ch[id].status &= ~BAM_CH_IN_RESET;
969 return 0;
970 }
971
Jeff Hugobb5802f2011-11-02 17:10:29 -0600972 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 if (hdr == NULL) {
974 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600975 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 return -ENOMEM;
977 }
978 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
979 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
980 hdr->reserved = 0;
981 hdr->ch_id = id;
982 hdr->pkt_len = 0;
983 hdr->pad_len = 0;
984
985 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600986 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987
988 DBG("%s: closed ch %d\n", __func__, id);
989 return rc;
990}
991
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700992int msm_bam_dmux_is_ch_full(uint32_t id)
993{
994 unsigned long flags;
995 int ret;
996
997 if (id >= BAM_DMUX_NUM_CHANNELS)
998 return -EINVAL;
999
1000 spin_lock_irqsave(&bam_ch[id].lock, flags);
1001 bam_ch[id].use_wm = 1;
1002 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1003 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1004 id, bam_ch[id].num_tx_pkts, ret);
1005 if (!bam_ch_is_local_open(id)) {
1006 ret = -ENODEV;
1007 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1008 }
1009 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1010
1011 return ret;
1012}
1013
1014int msm_bam_dmux_is_ch_low(uint32_t id)
1015{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001016 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001017 int ret;
1018
1019 if (id >= BAM_DMUX_NUM_CHANNELS)
1020 return -EINVAL;
1021
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001022 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001023 bam_ch[id].use_wm = 1;
1024 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1025 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1026 id, bam_ch[id].num_tx_pkts, ret);
1027 if (!bam_ch_is_local_open(id)) {
1028 ret = -ENODEV;
1029 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1030 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001031 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001032
1033 return ret;
1034}
1035
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001036static void rx_switch_to_interrupt_mode(void)
1037{
1038 struct sps_connect cur_rx_conn;
1039 struct sps_iovec iov;
1040 struct rx_pkt_info *info;
1041 int ret;
1042
1043 /*
1044 * Attempt to enable interrupts - if this fails,
1045 * continue polling and we will retry later.
1046 */
1047 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1048 if (ret) {
1049 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1050 goto fail;
1051 }
1052
1053 rx_register_event.options = SPS_O_EOT;
1054 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1055 if (ret) {
1056 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1057 goto fail;
1058 }
1059
1060 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1061 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1062 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1063 if (ret) {
1064 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1065 goto fail;
1066 }
1067 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001068 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001069
1070 /* handle any rx packets before interrupt was enabled */
1071 while (bam_connection_is_active && !polling_mode) {
1072 ret = sps_get_iovec(bam_rx_pipe, &iov);
1073 if (ret) {
1074 pr_err("%s: sps_get_iovec failed %d\n",
1075 __func__, ret);
1076 break;
1077 }
1078 if (iov.addr == 0)
1079 break;
1080
1081 mutex_lock(&bam_rx_pool_mutexlock);
1082 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001083 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1084 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001085 mutex_unlock(&bam_rx_pool_mutexlock);
1086 continue;
1087 }
1088 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1089 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001090 if (info->dma_address != iov.addr) {
1091 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1092 __func__,
1093 (void *)iov.addr,
1094 (void *)info->dma_address);
1095 list_for_each_entry(info, &bam_rx_pool, list_node) {
1096 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1097 (void *)info->dma_address);
1098 if (iov.addr == info->dma_address)
1099 break;
1100 }
1101 }
1102 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001103 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001104 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001105 mutex_unlock(&bam_rx_pool_mutexlock);
1106 handle_bam_mux_cmd(&info->work);
1107 }
1108 return;
1109
1110fail:
1111 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001112 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001113}
1114
Jeff Hugo949080a2011-08-30 11:58:56 -06001115static void rx_timer_work_func(struct work_struct *work)
1116{
1117 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001118 struct rx_pkt_info *info;
1119 int inactive_cycles = 0;
1120 int ret;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001121 u32 buffs_unused, buffs_used;
Jeff Hugo949080a2011-08-30 11:58:56 -06001122
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001123 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001124 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001125 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001126 if (in_global_reset)
1127 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001128
1129 ret = sps_get_iovec(bam_rx_pipe, &iov);
1130 if (ret) {
1131 pr_err("%s: sps_get_iovec failed %d\n",
1132 __func__, ret);
1133 break;
1134 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001135 if (iov.addr == 0)
1136 break;
1137 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001138 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001139 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001140 DMUX_LOG_KERR(
1141 "%s: have iovec %p but rx pool empty\n",
1142 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001143 mutex_unlock(&bam_rx_pool_mutexlock);
1144 continue;
1145 }
1146 info = list_first_entry(&bam_rx_pool,
1147 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001148 if (info->dma_address != iov.addr) {
1149 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1150 __func__,
1151 (void *)iov.addr,
1152 (void *)info->dma_address);
1153 list_for_each_entry(info, &bam_rx_pool,
1154 list_node) {
1155 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1156 (void *)info->dma_address);
1157 if (iov.addr == info->dma_address)
1158 break;
1159 }
1160 }
1161 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001162 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001163 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001164 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001165 handle_bam_mux_cmd(&info->work);
1166 }
1167
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001168 if (inactive_cycles >= POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001169 rx_switch_to_interrupt_mode();
1170 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001171 }
1172
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001173 if (bam_adaptive_timer_enabled) {
1174 usleep_range(rx_timer_interval, rx_timer_interval + 50);
1175
1176 ret = sps_get_unused_desc_num(bam_rx_pipe,
1177 &buffs_unused);
1178
1179 if (ret) {
1180 pr_err("%s: error getting num buffers unused after sleep\n",
1181 __func__);
1182
1183 break;
1184 }
1185
Jeff Hugoc8058f82013-03-27 12:44:20 -06001186 buffs_used = num_buffers - buffs_unused;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001187
1188 if (buffs_unused == 0) {
1189 rx_timer_interval = MIN_POLLING_SLEEP;
1190 } else {
1191 if (buffs_used > 0) {
1192 rx_timer_interval =
Jeff Hugoc8058f82013-03-27 12:44:20 -06001193 (2 * num_buffers *
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001194 rx_timer_interval)/
1195 (3 * buffs_used);
1196 } else {
1197 rx_timer_interval =
1198 MAX_POLLING_SLEEP;
1199 }
1200 }
1201
1202 if (rx_timer_interval > MAX_POLLING_SLEEP)
1203 rx_timer_interval = MAX_POLLING_SLEEP;
1204 else if (rx_timer_interval < MIN_POLLING_SLEEP)
1205 rx_timer_interval = MIN_POLLING_SLEEP;
1206 } else {
1207 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1208 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001209 }
1210}
1211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212static void bam_mux_tx_notify(struct sps_event_notify *notify)
1213{
1214 struct tx_pkt_info *pkt;
1215
1216 DBG("%s: event %d notified\n", __func__, notify->event_id);
1217
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001218 if (in_global_reset)
1219 return;
1220
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 switch (notify->event_id) {
1222 case SPS_EVENT_EOT:
1223 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001224 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001225 dma_unmap_single(NULL, pkt->dma_address,
1226 pkt->skb->len,
1227 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001228 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 dma_unmap_single(NULL, pkt->dma_address,
1230 pkt->len,
1231 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001232 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 break;
1234 default:
1235 pr_err("%s: recieved unexpected event id %d\n", __func__,
1236 notify->event_id);
1237 }
1238}
1239
Jeff Hugo33dbc002011-08-25 15:52:53 -06001240static void bam_mux_rx_notify(struct sps_event_notify *notify)
1241{
Jeff Hugo949080a2011-08-30 11:58:56 -06001242 int ret;
1243 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001244
1245 DBG("%s: event %d notified\n", __func__, notify->event_id);
1246
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001247 if (in_global_reset)
1248 return;
1249
Jeff Hugo33dbc002011-08-25 15:52:53 -06001250 switch (notify->event_id) {
1251 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001252 /* attempt to disable interrupts in this pipe */
1253 if (!polling_mode) {
1254 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1255 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001256 pr_err("%s: sps_get_config() failed %d, interrupts"
1257 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001258 break;
1259 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001260 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001261 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1262 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1263 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001264 pr_err("%s: sps_set_config() failed %d, interrupts"
1265 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001266 break;
1267 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001268 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001269 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001270 /*
1271 * run on core 0 so that netif_rx() in rmnet uses only
1272 * one queue
1273 */
1274 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001275 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001276 break;
1277 default:
1278 pr_err("%s: recieved unexpected event id %d\n", __func__,
1279 notify->event_id);
1280 }
1281}
1282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283#ifdef CONFIG_DEBUG_FS
1284
1285static int debug_tbl(char *buf, int max)
1286{
1287 int i = 0;
1288 int j;
1289
1290 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1291 i += scnprintf(buf + i, max - i,
1292 "ch%02d local open=%s remote open=%s\n",
1293 j, bam_ch_is_local_open(j) ? "Y" : "N",
1294 bam_ch_is_remote_open(j) ? "Y" : "N");
1295 }
1296
1297 return i;
1298}
1299
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001300static int debug_ul_pkt_cnt(char *buf, int max)
1301{
1302 struct list_head *p;
1303 unsigned long flags;
1304 int n = 0;
1305
1306 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1307 __list_for_each(p, &bam_tx_pool) {
1308 ++n;
1309 }
1310 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1311
1312 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1313}
1314
1315static int debug_stats(char *buf, int max)
1316{
1317 int i = 0;
1318
1319 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001320 "skb read cnt: %u\n"
1321 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001322 "skb copy cnt: %u\n"
1323 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001324 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001325 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001326 "rx queue len: %d\n"
1327 "a2 ack out cnt: %d\n"
1328 "a2 ack in cnt: %d\n"
1329 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001330 bam_dmux_read_cnt,
1331 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001332 bam_dmux_write_cpy_cnt,
1333 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001334 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001335 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001336 bam_rx_pool_len,
1337 atomic_read(&bam_dmux_ack_out_cnt),
1338 atomic_read(&bam_dmux_ack_in_cnt),
1339 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001340 );
1341
1342 return i;
1343}
1344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345#define DEBUG_BUFMAX 4096
1346static char debug_buffer[DEBUG_BUFMAX];
1347
1348static ssize_t debug_read(struct file *file, char __user *buf,
1349 size_t count, loff_t *ppos)
1350{
1351 int (*fill)(char *buf, int max) = file->private_data;
1352 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1353 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1354}
1355
1356static int debug_open(struct inode *inode, struct file *file)
1357{
1358 file->private_data = inode->i_private;
1359 return 0;
1360}
1361
1362
1363static const struct file_operations debug_ops = {
1364 .read = debug_read,
1365 .open = debug_open,
1366};
1367
1368static void debug_create(const char *name, mode_t mode,
1369 struct dentry *dent,
1370 int (*fill)(char *buf, int max))
1371{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001372 struct dentry *file;
1373
1374 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1375 if (IS_ERR(file))
1376 pr_err("%s: debugfs create failed %d\n", __func__,
1377 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378}
1379
1380#endif
1381
Jeff Hugod98b1082011-10-24 10:30:23 -06001382static void notify_all(int event, unsigned long data)
1383{
1384 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001385 struct list_head *temp;
1386 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001387
1388 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001389 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001390 bam_ch[i].notify(bam_ch[i].priv, event, data);
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301391 BAM_DMUX_LOG("%s: cid=%d, event=%d, data=%lu\n",
Eric Holmberg454d9da2012-01-12 09:37:14 -07001392 __func__, i, event, data);
1393 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001394 }
Jeff Hugocb798022012-04-09 14:55:40 -06001395
1396 __list_for_each(temp, &bam_other_notify_funcs) {
1397 func = container_of(temp, struct outside_notify_func,
1398 list_node);
1399 func->notify(func->priv, event, data);
1400 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001401}
1402
1403static void kickoff_ul_wakeup_func(struct work_struct *work)
1404{
1405 read_lock(&ul_wakeup_lock);
1406 if (!bam_is_connected) {
1407 read_unlock(&ul_wakeup_lock);
1408 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001409 if (unlikely(in_global_reset == 1))
1410 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001411 read_lock(&ul_wakeup_lock);
1412 ul_packet_written = 1;
1413 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1414 }
1415 read_unlock(&ul_wakeup_lock);
1416}
1417
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001418int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001419{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001420 int is_connected;
1421
1422 read_lock(&ul_wakeup_lock);
1423 ul_packet_written = 1;
1424 is_connected = bam_is_connected;
1425 if (!is_connected)
1426 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1427 read_unlock(&ul_wakeup_lock);
1428
1429 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001430}
1431
Eric Holmberg878923a2012-01-10 14:28:19 -07001432static void power_vote(int vote)
1433{
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301434 BAM_DMUX_LOG("%s: curr=%d, vote=%d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -07001435 bam_dmux_uplink_vote, vote);
1436
1437 if (bam_dmux_uplink_vote == vote)
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301438 BAM_DMUX_LOG("%s: warning - duplicate power vote\n", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07001439
1440 bam_dmux_uplink_vote = vote;
1441 if (vote)
1442 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1443 else
1444 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1445}
1446
Eric Holmberg454d9da2012-01-12 09:37:14 -07001447/*
1448 * @note: Must be called with ul_wakeup_lock locked.
1449 */
1450static inline void ul_powerdown(void)
1451{
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301452 BAM_DMUX_LOG("%s: powerdown\n", __func__);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001453 verify_tx_queue_is_empty(__func__);
1454
1455 if (a2_pc_disabled) {
1456 wait_for_dfab = 1;
1457 INIT_COMPLETION(dfab_unvote_completion);
1458 release_wakelock();
1459 } else {
1460 wait_for_ack = 1;
1461 INIT_COMPLETION(ul_wakeup_ack_completion);
1462 power_vote(0);
1463 }
1464 bam_is_connected = 0;
1465 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1466}
1467
1468static inline void ul_powerdown_finish(void)
1469{
1470 if (a2_pc_disabled && wait_for_dfab) {
1471 unvote_dfab();
1472 complete_all(&dfab_unvote_completion);
1473 wait_for_dfab = 0;
1474 }
1475}
1476
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001477/*
1478 * Votes for UL power and returns current power state.
1479 *
1480 * @returns true if currently connected
1481 */
1482int msm_bam_dmux_ul_power_vote(void)
1483{
1484 int is_connected;
1485
1486 read_lock(&ul_wakeup_lock);
1487 atomic_inc(&ul_ondemand_vote);
1488 is_connected = bam_is_connected;
1489 if (!is_connected)
1490 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1491 read_unlock(&ul_wakeup_lock);
1492
1493 return is_connected;
1494}
1495
1496/*
1497 * Unvotes for UL power.
1498 *
1499 * @returns true if vote count is 0 (UL shutdown possible)
1500 */
1501int msm_bam_dmux_ul_power_unvote(void)
1502{
1503 int vote;
1504
1505 read_lock(&ul_wakeup_lock);
1506 vote = atomic_dec_return(&ul_ondemand_vote);
1507 if (unlikely(vote) < 0)
1508 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1509 read_unlock(&ul_wakeup_lock);
1510
1511 return vote == 0;
1512}
1513
Jeff Hugocb798022012-04-09 14:55:40 -06001514int msm_bam_dmux_reg_notify(void *priv,
1515 void (*notify)(void *priv, int event_type,
1516 unsigned long data))
1517{
1518 struct outside_notify_func *func;
1519
1520 if (!notify)
1521 return -EINVAL;
1522
1523 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1524 if (!func)
1525 return -ENOMEM;
1526
1527 func->notify = notify;
1528 func->priv = priv;
1529 list_add(&func->list_node, &bam_other_notify_funcs);
1530
1531 return 0;
1532}
1533
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001534static void ul_timeout(struct work_struct *work)
1535{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001536 unsigned long flags;
1537 int ret;
1538
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001539 if (in_global_reset)
1540 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001541 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1542 if (!ret) { /* failed to grab lock, reschedule and bail */
1543 schedule_delayed_work(&ul_timeout_work,
1544 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1545 return;
1546 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001547 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001548 if (!ul_packet_written) {
1549 spin_lock(&bam_tx_pool_spinlock);
1550 if (!list_empty(&bam_tx_pool)) {
1551 struct tx_pkt_info *info;
1552
1553 info = list_first_entry(&bam_tx_pool,
1554 struct tx_pkt_info, list_node);
1555 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1556 __func__, info->ts_sec, info->ts_nsec);
1557 DBG_INC_TX_STALL_CNT();
1558 ul_packet_written = 1;
1559 }
1560 spin_unlock(&bam_tx_pool_spinlock);
1561 }
1562
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001563 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301564 BAM_DMUX_LOG("%s: pkt written %d\n",
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001565 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001566 ul_packet_written = 0;
1567 schedule_delayed_work(&ul_timeout_work,
1568 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001569 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001570 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001571 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001572 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001573 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001574 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001575}
Jeff Hugo4838f412012-01-20 11:19:37 -07001576
1577static int ssrestart_check(void)
1578{
Eric Holmberg90285e22012-02-22 12:33:05 -07001579 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1580 in_global_reset = 1;
Eric Holmberg90285e22012-02-22 12:33:05 -07001581 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001582}
1583
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001584static void ul_wakeup(void)
1585{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001586 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001587 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001588
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001589 mutex_lock(&wakeup_lock);
1590 if (bam_is_connected) { /* bam got connected before lock grabbed */
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301591 BAM_DMUX_LOG("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001592 mutex_unlock(&wakeup_lock);
1593 return;
1594 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001595
Jeff Hugoc2696142012-05-03 11:42:13 -06001596 /*
Jeff Hugo00424ff2012-08-27 13:19:09 -06001597 * if this gets hit, that means restart_notifier_cb() has started
1598 * but probably not finished, thus we know SSR has happened, but
1599 * haven't been able to send that info to our clients yet.
1600 * in that case, abort the ul_wakeup() so that we don't undo any
1601 * work restart_notifier_cb() has done. The clients will be notified
1602 * shortly. No cleanup necessary (reschedule the wakeup) as our and
1603 * their SSR handling will cover it
1604 */
1605 if (unlikely(in_global_reset == 1)) {
1606 mutex_unlock(&wakeup_lock);
1607 return;
1608 }
1609
1610 /*
Jeff Hugoc2696142012-05-03 11:42:13 -06001611 * if someone is voting for UL before bam is inited (modem up first
1612 * time), set flag for init to kickoff ul wakeup once bam is inited
1613 */
1614 mutex_lock(&delayed_ul_vote_lock);
1615 if (unlikely(!bam_mux_initialized)) {
1616 need_delayed_ul_vote = 1;
1617 mutex_unlock(&delayed_ul_vote_lock);
1618 mutex_unlock(&wakeup_lock);
1619 return;
1620 }
1621 mutex_unlock(&delayed_ul_vote_lock);
1622
Eric Holmberg006057d2012-01-11 10:10:42 -07001623 if (a2_pc_disabled) {
1624 /*
1625 * don't grab the wakelock the first time because it is
1626 * already grabbed when a2 powers on
1627 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001628 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001629 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001630 do_vote_dfab = 1; /* vote must occur after wait */
1631 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001632 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001633 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001634 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001635 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001636 &dfab_unvote_completion, HZ);
1637 BUG_ON(ret == 0);
1638 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001639 if (likely(do_vote_dfab))
1640 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001641 schedule_delayed_work(&ul_timeout_work,
1642 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1643 bam_is_connected = 1;
1644 mutex_unlock(&wakeup_lock);
1645 return;
1646 }
1647
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001648 /*
1649 * must wait for the previous power down request to have been acked
1650 * chances are it already came in and this will just fall through
1651 * instead of waiting
1652 */
1653 if (wait_for_ack) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301654 BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001655 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001656 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001657 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001658 if (unlikely(ret == 0) && ssrestart_check()) {
1659 mutex_unlock(&wakeup_lock);
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301660 BAM_DMUX_LOG("%s timeout previous ack\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001661 return;
1662 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001663 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001664 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001665 power_vote(1);
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301666 BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001667 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001668 if (unlikely(ret == 0) && ssrestart_check()) {
1669 mutex_unlock(&wakeup_lock);
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301670 BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001671 return;
1672 }
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301673 BAM_DMUX_LOG("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001674 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001675 if (unlikely(ret == 0) && ssrestart_check()) {
1676 mutex_unlock(&wakeup_lock);
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301677 BAM_DMUX_LOG("%s timeout power on\n", __func__);
Jeff Hugo4838f412012-01-20 11:19:37 -07001678 return;
1679 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001680
1681 bam_is_connected = 1;
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301682 BAM_DMUX_LOG("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001683 schedule_delayed_work(&ul_timeout_work,
1684 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1685 mutex_unlock(&wakeup_lock);
1686}
1687
1688static void reconnect_to_bam(void)
1689{
1690 int i;
1691
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001692 in_global_reset = 0;
Jeff Hugo73f356f2012-12-14 17:56:19 -07001693 in_ssr = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001694 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001695 if (!power_management_only_mode) {
Jeff Hugo73f356f2012-12-14 17:56:19 -07001696 if (ssr_skipped_disconnect) {
1697 /* delayed to here to prevent bus stall */
1698 sps_disconnect(bam_tx_pipe);
1699 sps_disconnect(bam_rx_pipe);
1700 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1701 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1702 }
1703 ssr_skipped_disconnect = 0;
Jeff Hugo18792a32012-06-20 15:25:55 -06001704 i = sps_device_reset(a2_device_handle);
1705 if (i)
1706 pr_err("%s: device reset failed rc = %d\n", __func__,
1707 i);
1708 i = sps_connect(bam_tx_pipe, &tx_connection);
1709 if (i)
1710 pr_err("%s: tx connection failed rc = %d\n", __func__,
1711 i);
1712 i = sps_connect(bam_rx_pipe, &rx_connection);
1713 if (i)
1714 pr_err("%s: rx connection failed rc = %d\n", __func__,
1715 i);
1716 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1717 if (i)
1718 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1719 i);
1720 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1721 if (i)
1722 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1723 i);
1724 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001725
1726 bam_connection_is_active = 1;
1727
1728 if (polling_mode)
1729 rx_switch_to_interrupt_mode();
1730
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001731 toggle_apps_ack();
1732 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001733 if (!power_management_only_mode)
1734 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001735}
1736
1737static void disconnect_to_bam(void)
1738{
1739 struct list_head *node;
1740 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001741 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001742
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001743 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001744
1745 /* handle disconnect during active UL */
1746 write_lock_irqsave(&ul_wakeup_lock, flags);
1747 if (bam_is_connected) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301748 BAM_DMUX_LOG("%s: UL active - forcing powerdown\n", __func__);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001749 ul_powerdown();
1750 }
1751 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1752 ul_powerdown_finish();
1753
1754 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001755 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo73f356f2012-12-14 17:56:19 -07001756
1757 /* in_ssr documentation/assumptions found in restart_notifier_cb */
1758 if (!power_management_only_mode) {
1759 if (likely(!in_ssr)) {
1760 sps_disconnect(bam_tx_pipe);
1761 sps_disconnect(bam_rx_pipe);
1762 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1763 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1764 sps_device_reset(a2_device_handle);
1765 } else {
1766 ssr_skipped_disconnect = 1;
1767 }
1768 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001769 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001770
1771 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001772 while (!list_empty(&bam_rx_pool)) {
1773 node = bam_rx_pool.next;
1774 list_del(node);
1775 info = container_of(node, struct rx_pkt_info, list_node);
1776 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1777 DMA_FROM_DEVICE);
1778 dev_kfree_skb_any(info->skb);
1779 kfree(info);
1780 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001781 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001782 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001783
Jeff Hugo0b13a352012-03-17 23:18:30 -06001784 if (disconnect_ack)
1785 toggle_apps_ack();
1786
Eric Holmberg878923a2012-01-10 14:28:19 -07001787 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001788}
1789
1790static void vote_dfab(void)
1791{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001792 int rc;
1793
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301794 BAM_DMUX_LOG("%s\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001795 mutex_lock(&dfab_status_lock);
1796 if (dfab_is_on) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301797 BAM_DMUX_LOG("%s: dfab is already on\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001798 mutex_unlock(&dfab_status_lock);
1799 return;
1800 }
Jeff Hugo0c9371a2012-08-09 15:32:49 -06001801 if (dfab_clk) {
1802 rc = clk_prepare_enable(dfab_clk);
1803 if (rc)
1804 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n",
1805 rc);
1806 }
1807 if (xo_clk) {
1808 rc = clk_prepare_enable(xo_clk);
1809 if (rc)
1810 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n",
1811 rc);
1812 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001813 dfab_is_on = 1;
1814 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001815}
1816
1817static void unvote_dfab(void)
1818{
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301819 BAM_DMUX_LOG("%s\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07001820 mutex_lock(&dfab_status_lock);
1821 if (!dfab_is_on) {
1822 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1823 dump_stack();
1824 mutex_unlock(&dfab_status_lock);
1825 return;
1826 }
Jeff Hugo0c9371a2012-08-09 15:32:49 -06001827 if (dfab_clk)
1828 clk_disable_unprepare(dfab_clk);
1829 if (xo_clk)
1830 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001831 dfab_is_on = 0;
1832 mutex_unlock(&dfab_status_lock);
1833}
1834
1835/* reference counting wrapper around wakelock */
1836static void grab_wakelock(void)
1837{
1838 unsigned long flags;
1839
1840 spin_lock_irqsave(&wakelock_reference_lock, flags);
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301841 BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -07001842 wakelock_reference_count);
1843 if (wakelock_reference_count == 0)
1844 wake_lock(&bam_wakelock);
1845 ++wakelock_reference_count;
1846 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1847}
1848
1849static void release_wakelock(void)
1850{
1851 unsigned long flags;
1852
1853 spin_lock_irqsave(&wakelock_reference_lock, flags);
1854 if (wakelock_reference_count == 0) {
1855 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1856 dump_stack();
1857 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1858 return;
1859 }
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301860 BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
Eric Holmberg006057d2012-01-11 10:10:42 -07001861 wakelock_reference_count);
1862 --wakelock_reference_count;
1863 if (wakelock_reference_count == 0)
1864 wake_unlock(&bam_wakelock);
1865 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001866}
1867
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001868static int restart_notifier_cb(struct notifier_block *this,
1869 unsigned long code,
1870 void *data)
1871{
1872 int i;
1873 struct list_head *node;
1874 struct tx_pkt_info *info;
1875 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001876 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001877
Jeff Hugo73f356f2012-12-14 17:56:19 -07001878 /*
1879 * Bam_dmux counts on the fact that the BEFORE_SHUTDOWN level of
1880 * notifications are guarenteed to execute before the AFTER_SHUTDOWN
1881 * level of notifications, and that BEFORE_SHUTDOWN always occurs in
1882 * all SSR events, no matter what triggered the SSR. Also, bam_dmux
1883 * assumes that SMD does its SSR processing in the AFTER_SHUTDOWN level
1884 * thus bam_dmux is guarenteed to detect SSR before SMD, since the
1885 * callbacks for all the drivers within the AFTER_SHUTDOWN level could
1886 * occur in any order. Bam_dmux uses this knowledge to skip accessing
1887 * the bam hardware when disconnect_to_bam() is triggered by SMD's SSR
1888 * processing. We do not wat to access the bam hardware during SSR
1889 * because a watchdog crash from a bus stall would likely occur.
1890 */
Jeff Hugob6f72f12013-02-25 13:46:56 -07001891 if (code == SUBSYS_BEFORE_SHUTDOWN) {
1892 in_global_reset = 1;
Jeff Hugo73f356f2012-12-14 17:56:19 -07001893 in_ssr = 1;
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301894 BAM_DMUX_LOG("%s: begin\n", __func__);
Jeff Hugob6f72f12013-02-25 13:46:56 -07001895 flush_workqueue(bam_mux_rx_workqueue);
1896 }
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001897 if (code != SUBSYS_AFTER_SHUTDOWN)
1898 return NOTIFY_DONE;
1899
Eric Holmberg454d9da2012-01-12 09:37:14 -07001900 /* Handle uplink Powerdown */
1901 write_lock_irqsave(&ul_wakeup_lock, flags);
1902 if (bam_is_connected) {
1903 ul_powerdown();
1904 wait_for_ack = 0;
1905 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001906 /*
1907 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1908 * reset to 0. harmless if bam_is_connected check above passes
1909 */
1910 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001911 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1912 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001913 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001914 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugof62029d2012-07-17 13:39:53 -06001915 disconnect_ack = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001916
1917 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001918 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001919 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1920 temp_remote_status = bam_ch_is_remote_open(i);
1921 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001922 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001923 if (bam_ch_is_local_open(i))
1924 bam_ch[i].status |= BAM_CH_IN_RESET;
1925 if (temp_remote_status) {
1926 platform_device_unregister(bam_ch[i].pdev);
1927 bam_ch[i].pdev = platform_device_alloc(
1928 bam_ch[i].name, 2);
1929 }
1930 }
Eric Holmberga623da82012-07-12 09:37:09 -06001931 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001932
1933 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001934 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001935 while (!list_empty(&bam_tx_pool)) {
1936 node = bam_tx_pool.next;
1937 list_del(node);
1938 info = container_of(node, struct tx_pkt_info,
1939 list_node);
1940 if (!info->is_cmd) {
1941 dma_unmap_single(NULL, info->dma_address,
1942 info->skb->len,
1943 DMA_TO_DEVICE);
1944 dev_kfree_skb_any(info->skb);
1945 } else {
1946 dma_unmap_single(NULL, info->dma_address,
1947 info->len,
1948 DMA_TO_DEVICE);
1949 kfree(info->skb);
1950 }
1951 kfree(info);
1952 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001953 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001954
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05301955 BAM_DMUX_LOG("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001956 return NOTIFY_DONE;
1957}
1958
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001959static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960{
1961 u32 h;
1962 dma_addr_t dma_addr;
1963 int ret;
1964 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001965 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001967 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 /* init BAM */
Jeff Hugo7bf02052012-08-21 14:08:20 -06001969 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
1970 a2_phys_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971 if (!a2_virt_addr) {
1972 pr_err("%s: ioremap failed\n", __func__);
1973 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001974 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975 }
Jeff Hugo7bf02052012-08-21 14:08:20 -06001976 a2_props.phys_addr = (u32)(a2_phys_base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo7bf02052012-08-21 14:08:20 -06001978 a2_props.virt_size = a2_phys_size;
1979 a2_props.irq = a2_bam_irq;
Jeff Hugo927cba62011-11-11 11:49:52 -07001980 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981 a2_props.num_pipes = A2_NUM_PIPES;
1982 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo0682dad2012-10-22 11:34:28 -06001983 if (cpu_is_msm9615() || satellite_mode)
Jeff Hugo75913c82011-12-05 15:59:01 -07001984 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 /* need to free on tear down */
1986 ret = sps_register_bam_device(&a2_props, &h);
1987 if (ret < 0) {
1988 pr_err("%s: register bam error %d\n", __func__, ret);
1989 goto register_bam_failed;
1990 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001991 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001992
1993 bam_tx_pipe = sps_alloc_endpoint();
1994 if (bam_tx_pipe == NULL) {
1995 pr_err("%s: tx alloc endpoint failed\n", __func__);
1996 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001997 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001998 }
1999 ret = sps_get_config(bam_tx_pipe, &tx_connection);
2000 if (ret) {
2001 pr_err("%s: tx get config failed %d\n", __func__, ret);
2002 goto tx_get_config_failed;
2003 }
2004
2005 tx_connection.source = SPS_DEV_HANDLE_MEM;
2006 tx_connection.src_pipe_index = 0;
2007 tx_connection.destination = h;
2008 tx_connection.dest_pipe_index = 4;
2009 tx_connection.mode = SPS_MODE_DEST;
2010 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2011 tx_desc_mem_buf.size = 0x800; /* 2k */
2012 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2013 &dma_addr, 0);
2014 if (tx_desc_mem_buf.base == NULL) {
2015 pr_err("%s: tx memory alloc failed\n", __func__);
2016 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002017 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 }
2019 tx_desc_mem_buf.phys_base = dma_addr;
2020 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2021 tx_connection.desc = tx_desc_mem_buf;
2022 tx_connection.event_thresh = 0x10;
2023
2024 ret = sps_connect(bam_tx_pipe, &tx_connection);
2025 if (ret < 0) {
2026 pr_err("%s: tx connect error %d\n", __func__, ret);
2027 goto tx_connect_failed;
2028 }
2029
2030 bam_rx_pipe = sps_alloc_endpoint();
2031 if (bam_rx_pipe == NULL) {
2032 pr_err("%s: rx alloc endpoint failed\n", __func__);
2033 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002034 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035 }
2036 ret = sps_get_config(bam_rx_pipe, &rx_connection);
2037 if (ret) {
2038 pr_err("%s: rx get config failed %d\n", __func__, ret);
2039 goto rx_get_config_failed;
2040 }
2041
2042 rx_connection.source = h;
2043 rx_connection.src_pipe_index = 5;
2044 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2045 rx_connection.dest_pipe_index = 1;
2046 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002047 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2048 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002049 rx_desc_mem_buf.size = 0x800; /* 2k */
2050 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2051 &dma_addr, 0);
2052 if (rx_desc_mem_buf.base == NULL) {
2053 pr_err("%s: rx memory alloc failed\n", __func__);
2054 ret = -ENOMEM;
2055 goto rx_mem_failed;
2056 }
2057 rx_desc_mem_buf.phys_base = dma_addr;
2058 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2059 rx_connection.desc = rx_desc_mem_buf;
2060 rx_connection.event_thresh = 0x10;
2061
2062 ret = sps_connect(bam_rx_pipe, &rx_connection);
2063 if (ret < 0) {
2064 pr_err("%s: rx connect error %d\n", __func__, ret);
2065 goto rx_connect_failed;
2066 }
2067
2068 tx_register_event.options = SPS_O_EOT;
2069 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2070 tx_register_event.xfer_done = NULL;
2071 tx_register_event.callback = bam_mux_tx_notify;
2072 tx_register_event.user = NULL;
2073 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2074 if (ret < 0) {
2075 pr_err("%s: tx register event error %d\n", __func__, ret);
2076 goto rx_event_reg_failed;
2077 }
2078
Jeff Hugo33dbc002011-08-25 15:52:53 -06002079 rx_register_event.options = SPS_O_EOT;
2080 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2081 rx_register_event.xfer_done = NULL;
2082 rx_register_event.callback = bam_mux_rx_notify;
2083 rx_register_event.user = NULL;
2084 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2085 if (ret < 0) {
2086 pr_err("%s: tx register event error %d\n", __func__, ret);
2087 goto rx_event_reg_failed;
2088 }
2089
Jeff Hugoc2696142012-05-03 11:42:13 -06002090 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002092 if (need_delayed_ul_vote) {
2093 need_delayed_ul_vote = 0;
2094 msm_bam_dmux_kickoff_ul_wakeup();
2095 }
2096 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002097 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002098 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002099 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002100 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002101 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102
2103rx_event_reg_failed:
2104 sps_disconnect(bam_rx_pipe);
2105rx_connect_failed:
2106 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2107 rx_desc_mem_buf.phys_base);
2108rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002109rx_get_config_failed:
2110 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002111rx_alloc_endpoint_failed:
2112 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002113tx_connect_failed:
2114 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2115 tx_desc_mem_buf.phys_base);
2116tx_get_config_failed:
2117 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002118tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002119 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002120 /*
2121 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2122 * same handle below will cause a crash, so skip it if we've freed
2123 * the handle here.
2124 */
2125 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002127 if (!skip_iounmap)
2128 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002129ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002131 return ret;
2132}
2133
2134static int bam_init_fallback(void)
2135{
2136 u32 h;
2137 int ret;
2138 void *a2_virt_addr;
2139
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002140 /* init BAM */
Jeff Hugo7bf02052012-08-21 14:08:20 -06002141 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2142 a2_phys_size);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002143 if (!a2_virt_addr) {
2144 pr_err("%s: ioremap failed\n", __func__);
2145 ret = -ENOMEM;
2146 goto ioremap_failed;
2147 }
Jeff Hugo7bf02052012-08-21 14:08:20 -06002148 a2_props.phys_addr = (u32)(a2_phys_base);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002149 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002150 a2_props.virt_size = a2_phys_size;
2151 a2_props.irq = a2_bam_irq;
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002152 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2153 a2_props.num_pipes = A2_NUM_PIPES;
2154 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo0682dad2012-10-22 11:34:28 -06002155 if (cpu_is_msm9615() || satellite_mode)
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002156 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2157 ret = sps_register_bam_device(&a2_props, &h);
2158 if (ret < 0) {
2159 pr_err("%s: register bam error %d\n", __func__, ret);
2160 goto register_bam_failed;
2161 }
2162 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002163
2164 mutex_lock(&delayed_ul_vote_lock);
2165 bam_mux_initialized = 1;
2166 if (need_delayed_ul_vote) {
2167 need_delayed_ul_vote = 0;
2168 msm_bam_dmux_kickoff_ul_wakeup();
2169 }
2170 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002171 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002172
Jeff Hugo18792a32012-06-20 15:25:55 -06002173 power_management_only_mode = 1;
2174 bam_connection_is_active = 1;
2175 complete_all(&bam_connection_completion);
2176
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002177 return 0;
2178
2179register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002180 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002181ioremap_failed:
2182 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002183}
Jeff Hugoade1f842011-08-03 15:53:59 -06002184
Jeff Hugoa670b762012-03-15 15:58:28 -06002185static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002186{
2187 int ret = 0;
2188
2189 ret = bam_init();
2190 if (ret) {
2191 ret = bam_init_fallback();
2192 if (ret)
2193 pr_err("%s: bam init fallback failed: %d",
2194 __func__, ret);
2195 }
2196}
2197
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002198static void toggle_apps_ack(void)
2199{
2200 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002201
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302202 BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -07002203 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002204 smsm_change_state(SMSM_APPS_STATE,
2205 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2206 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2207 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002208 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002209}
2210
Jeff Hugoade1f842011-08-03 15:53:59 -06002211static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2212{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002213 static int last_processed_state;
2214
2215 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002216 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002217 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302218 BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
Eric Holmberg878923a2012-01-10 14:28:19 -07002219 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002220 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302221 BAM_DMUX_LOG("%s: already processed this state\n", __func__);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002222 mutex_unlock(&smsm_cb_lock);
2223 return;
2224 }
2225
2226 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002227
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002228 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302229 BAM_DMUX_LOG("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002230 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002231 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002232 } else if (bam_mux_initialized &&
2233 !(new_state & SMSM_A2_POWER_CONTROL)) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302234 BAM_DMUX_LOG("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002235 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002236 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002237 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302238 BAM_DMUX_LOG("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002239 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002240 if (cpu_is_msm9615())
2241 msm9615_bam_init();
2242 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002243 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002244 } else {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302245 BAM_DMUX_LOG("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002246 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002247 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002248 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002249
2250}
2251
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002252static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2253 uint32_t new_state)
2254{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002255 DBG_INC_ACK_IN_CNT();
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302256 BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
Eric Holmberg878923a2012-01-10 14:28:19 -07002257 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002258 complete_all(&ul_wakeup_ack_completion);
2259}
2260
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002261static int bam_dmux_probe(struct platform_device *pdev)
2262{
2263 int rc;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002264 struct resource *r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002265
2266 DBG("%s probe called\n", __func__);
2267 if (bam_mux_initialized)
2268 return 0;
2269
Jeff Hugo7bf02052012-08-21 14:08:20 -06002270 if (pdev->dev.of_node) {
2271 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2272 if (!r) {
2273 pr_err("%s: reg field missing\n", __func__);
2274 return -ENODEV;
2275 }
2276 a2_phys_base = (void *)(r->start);
2277 a2_phys_size = (uint32_t)(resource_size(r));
2278 a2_bam_irq = platform_get_irq(pdev, 0);
2279 if (a2_bam_irq == -ENXIO) {
2280 pr_err("%s: irq field missing\n", __func__);
2281 return -ENODEV;
2282 }
Jeff Hugo0682dad2012-10-22 11:34:28 -06002283 satellite_mode = of_property_read_bool(pdev->dev.of_node,
2284 "qcom,satellite-mode");
2285
Jeff Hugoc8058f82013-03-27 12:44:20 -06002286 rc = of_property_read_u32(pdev->dev.of_node,
2287 "qcom,rx-ring-size",
2288 &num_buffers);
2289 if (rc) {
2290 DBG("%s: falling back to num_buffs default, rc:%d\n",
2291 __func__, rc);
2292 num_buffers = DEFAULT_NUM_BUFFERS;
2293 }
2294
2295 DBG("%s: base:%p size:%x irq:%d satellite:%d num_buffs:%d\n",
2296 __func__,
Jeff Hugo7bf02052012-08-21 14:08:20 -06002297 a2_phys_base,
2298 a2_phys_size,
Jeff Hugo0682dad2012-10-22 11:34:28 -06002299 a2_bam_irq,
Jeff Hugoc8058f82013-03-27 12:44:20 -06002300 satellite_mode,
2301 num_buffers);
Jeff Hugo7bf02052012-08-21 14:08:20 -06002302 } else { /* fallback to default init data */
2303 a2_phys_base = (void *)(A2_PHYS_BASE);
2304 a2_phys_size = A2_PHYS_SIZE;
2305 a2_bam_irq = A2_BAM_IRQ;
Jeff Hugoc8058f82013-03-27 12:44:20 -06002306 num_buffers = DEFAULT_NUM_BUFFERS;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002307 }
2308
Stephen Boyd69d35e32012-02-14 15:33:30 -08002309 xo_clk = clk_get(&pdev->dev, "xo");
2310 if (IS_ERR(xo_clk)) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302311 BAM_DMUX_LOG("%s: did not get xo clock\n", __func__);
Jeff Hugo0c9371a2012-08-09 15:32:49 -06002312 xo_clk = NULL;
Stephen Boyd69d35e32012-02-14 15:33:30 -08002313 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002314 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002315 if (IS_ERR(dfab_clk)) {
Zaheerulla Meerffb54ce2013-02-13 15:49:14 +05302316 BAM_DMUX_LOG("%s: did not get dfab clock\n", __func__);
Jeff Hugo0c9371a2012-08-09 15:32:49 -06002317 dfab_clk = NULL;
2318 } else {
2319 rc = clk_set_rate(dfab_clk, 64000000);
2320 if (rc)
2321 pr_err("%s: unable to set dfab clock rate\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002322 }
2323
Jeff Hugofff43af92012-03-29 17:54:52 -06002324 /*
2325 * setup the workqueue so that it can be pinned to core 0 and not
2326 * block the watchdog pet function, so that netif_rx() in rmnet
2327 * only uses one queue.
2328 */
2329 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2330 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002331 if (!bam_mux_rx_workqueue)
2332 return -ENOMEM;
2333
2334 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2335 if (!bam_mux_tx_workqueue) {
2336 destroy_workqueue(bam_mux_rx_workqueue);
2337 return -ENOMEM;
2338 }
2339
Jeff Hugo7960abd2011-08-02 15:39:38 -06002340 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002341 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002342 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2343 "bam_dmux_ch_%d", rc);
2344 /* bus 2, ie a2 stream 2 */
2345 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2346 if (!bam_ch[rc].pdev) {
2347 pr_err("%s: platform device alloc failed\n", __func__);
2348 destroy_workqueue(bam_mux_rx_workqueue);
2349 destroy_workqueue(bam_mux_tx_workqueue);
2350 return -ENOMEM;
2351 }
2352 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002354 init_completion(&ul_wakeup_ack_completion);
2355 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002356 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002357 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoad75d8d2012-10-03 15:53:54 -06002358 INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002359 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002360
Jeff Hugoade1f842011-08-03 15:53:59 -06002361 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2362 bam_dmux_smsm_cb, NULL);
2363
2364 if (rc) {
2365 destroy_workqueue(bam_mux_rx_workqueue);
2366 destroy_workqueue(bam_mux_tx_workqueue);
2367 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2368 return -ENOMEM;
2369 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002370
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002371 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2372 bam_dmux_smsm_ack_cb, NULL);
2373
2374 if (rc) {
2375 destroy_workqueue(bam_mux_rx_workqueue);
2376 destroy_workqueue(bam_mux_tx_workqueue);
2377 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2378 SMSM_A2_POWER_CONTROL,
2379 bam_dmux_smsm_cb, NULL);
2380 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2381 rc);
2382 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2383 platform_device_put(bam_ch[rc].pdev);
2384 return -ENOMEM;
2385 }
2386
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002387 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2388 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002390 return 0;
2391}
2392
Jeff Hugo7bf02052012-08-21 14:08:20 -06002393static struct of_device_id msm_match_table[] = {
2394 {.compatible = "qcom,bam_dmux"},
2395 {},
2396};
2397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002398static struct platform_driver bam_dmux_driver = {
2399 .probe = bam_dmux_probe,
2400 .driver = {
2401 .name = "BAM_RMNT",
2402 .owner = THIS_MODULE,
Jeff Hugo7bf02052012-08-21 14:08:20 -06002403 .of_match_table = msm_match_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 },
2405};
2406
2407static int __init bam_dmux_init(void)
2408{
2409#ifdef CONFIG_DEBUG_FS
2410 struct dentry *dent;
2411
2412 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002413 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002414 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002415 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2416 debug_create("stats", 0444, dent, debug_stats);
2417 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002418#endif
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +05302419
2420 bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux");
2421 if (!bam_ipc_log_txt) {
2422 pr_err("%s : unable to create IPC Logging Context", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07002423 }
2424
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07002425 rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
2426
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002427 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002428 return platform_driver_register(&bam_dmux_driver);
2429}
2430
Jeff Hugoade1f842011-08-03 15:53:59 -06002431late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002432MODULE_DESCRIPTION("MSM BAM DMUX");
2433MODULE_LICENSE("GPL v2");