blob: 813824e80c8a6d673a35e5236cfbd3fce470f3f7 [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070068static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
69static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
70static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72#define DBG(x...) do { \
73 if (msm_bam_dmux_debug_enable) \
74 pr_debug(x); \
75 } while (0)
76
77#define DBG_INC_READ_CNT(x) do { \
78 bam_dmux_read_cnt += (x); \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total read bytes %u\n", \
81 __func__, bam_dmux_read_cnt); \
82 } while (0)
83
84#define DBG_INC_WRITE_CNT(x) do { \
85 bam_dmux_write_cnt += (x); \
86 if (msm_bam_dmux_debug_enable) \
87 pr_debug("%s: total written bytes %u\n", \
88 __func__, bam_dmux_write_cnt); \
89 } while (0)
90
91#define DBG_INC_WRITE_CPY(x) do { \
92 bam_dmux_write_cpy_bytes += (x); \
93 bam_dmux_write_cpy_cnt++; \
94 if (msm_bam_dmux_debug_enable) \
95 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
96 __func__, bam_dmux_write_cpy_cnt, \
97 bam_dmux_write_cpy_bytes); \
98 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070099
100#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
101 bam_dmux_tx_sps_failure_cnt++; \
102} while (0)
103
Eric Holmberg6074aba2012-01-18 17:59:44 -0700104#define DBG_INC_TX_STALL_CNT() do { \
105 bam_dmux_tx_stall_cnt++; \
106} while (0)
107
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700108#define DBG_INC_ACK_OUT_CNT() \
109 atomic_inc(&bam_dmux_ack_out_cnt)
110
111#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
112 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
113
114#define DBG_INC_ACK_IN_CNT() \
115 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116#else
117#define DBG(x...) do { } while (0)
118#define DBG_INC_READ_CNT(x...) do { } while (0)
119#define DBG_INC_WRITE_CNT(x...) do { } while (0)
120#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700121#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700122#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700123#define DBG_INC_ACK_OUT_CNT() do { } while (0)
124#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
125 do { } while (0)
126#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127#endif
128
129struct bam_ch_info {
130 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600131 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 void *priv;
133 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600134 struct platform_device *pdev;
135 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700136 int num_tx_pkts;
137 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct tx_pkt_info {
141 struct sk_buff *skb;
142 dma_addr_t dma_address;
143 char is_cmd;
144 uint32_t len;
145 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600146 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700147 unsigned ts_sec;
148 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149};
150
151struct rx_pkt_info {
152 struct sk_buff *skb;
153 dma_addr_t dma_address;
154 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600155 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156};
157
158#define A2_NUM_PIPES 6
159#define A2_SUMMING_THRESHOLD 4096
160#define A2_DEFAULT_DESCRIPTORS 32
161#define A2_PHYS_BASE 0x124C2000
162#define A2_PHYS_SIZE 0x2000
163#define BUFFER_SIZE 2048
164#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600166static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static struct sps_pipe *bam_tx_pipe;
168static struct sps_pipe *bam_rx_pipe;
169static struct sps_connect tx_connection;
170static struct sps_connect rx_connection;
171static struct sps_mem_buffer tx_desc_mem_buf;
172static struct sps_mem_buffer rx_desc_mem_buf;
173static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600174static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
177static int bam_mux_initialized;
178
Jeff Hugo949080a2011-08-30 11:58:56 -0600179static int polling_mode;
180
181static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600182static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700183static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600184static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600185static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600186static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188struct bam_mux_hdr {
189 uint16_t magic_num;
190 uint8_t reserved;
191 uint8_t cmd;
192 uint8_t pad_len;
193 uint8_t ch_id;
194 uint16_t pkt_len;
195};
196
Jeff Hugod98b1082011-10-24 10:30:23 -0600197static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198static void bam_mux_write_done(struct work_struct *work);
199static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600200static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201
Jeff Hugo949080a2011-08-30 11:58:56 -0600202static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203
204static struct workqueue_struct *bam_mux_rx_workqueue;
205static struct workqueue_struct *bam_mux_tx_workqueue;
206
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600207/* A2 power collaspe */
208#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600209#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600210static void toggle_apps_ack(void);
211static void reconnect_to_bam(void);
212static void disconnect_to_bam(void);
213static void ul_wakeup(void);
214static void ul_timeout(struct work_struct *work);
215static void vote_dfab(void);
216static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600217static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700218static void grab_wakelock(void);
219static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600220
221static int bam_is_connected;
222static DEFINE_MUTEX(wakeup_lock);
223static struct completion ul_wakeup_ack_completion;
224static struct completion bam_connection_completion;
225static struct delayed_work ul_timeout_work;
226static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700227static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800228static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600229static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600230static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600231static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700232static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700233static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700234static int a2_pc_disabled;
235static DEFINE_MUTEX(dfab_status_lock);
236static int dfab_is_on;
237static int wait_for_dfab;
238static struct completion dfab_unvote_completion;
239static DEFINE_SPINLOCK(wakelock_reference_lock);
240static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700241static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600242static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600243static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600244static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600245static DEFINE_MUTEX(delayed_ul_vote_lock);
246static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600247static int power_management_only_mode;
Jeff Hugocb798022012-04-09 14:55:40 -0600248
249struct outside_notify_func {
250 void (*notify)(void *, int, unsigned long);
251 void *priv;
252 struct list_head list_node;
253};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600254/* End A2 power collaspe */
255
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600256/* subsystem restart */
257static int restart_notifier_cb(struct notifier_block *this,
258 unsigned long code,
259 void *data);
260
261static struct notifier_block restart_notifier = {
262 .notifier_call = restart_notifier_cb,
263};
264static int in_global_reset;
265/* end subsystem restart */
266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267#define bam_ch_is_open(x) \
268 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
269
270#define bam_ch_is_local_open(x) \
271 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
272
273#define bam_ch_is_remote_open(x) \
274 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
275
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600276#define bam_ch_is_in_reset(x) \
277 (bam_ch[(x)].status & BAM_CH_IN_RESET)
278
Eric Holmberg878923a2012-01-10 14:28:19 -0700279#define LOG_MESSAGE_MAX_SIZE 80
280struct kfifo bam_dmux_state_log;
281static uint32_t bam_dmux_state_logging_disabled;
282static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
283static int bam_dmux_uplink_vote;
284static int bam_dmux_power_state;
285
286
287#define DMUX_LOG_KERR(fmt...) \
288do { \
289 bam_dmux_log(fmt); \
290 pr_err(fmt); \
291} while (0)
292
293/**
294 * Log a state change along with a small message.
295 *
296 * Complete size of messsage is limited to @todo.
297 */
298static void bam_dmux_log(const char *fmt, ...)
299{
300 char buff[LOG_MESSAGE_MAX_SIZE];
301 unsigned long flags;
302 va_list arg_list;
303 unsigned long long t_now;
304 unsigned long nanosec_rem;
305 int len = 0;
306
307 if (bam_dmux_state_logging_disabled)
308 return;
309
310 t_now = sched_clock();
311 nanosec_rem = do_div(t_now, 1000000000U);
312
313 /*
314 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700315 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700316 * R: 1 = in global reset
317 * P: 1 = BAM is powered up
318 * A: 1 = BAM initialized and ready for data
319 *
320 * V: 1 = Uplink vote for power
321 * U: 1 = Uplink active
322 * W: 1 = Uplink Wait-for-ack
323 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700324 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600325 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700326 */
327 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600328 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700329 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700330 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700331 in_global_reset ? 'R' : 'r',
332 bam_dmux_power_state ? 'P' : 'p',
333 bam_connection_is_active ? 'A' : 'a',
334 bam_dmux_uplink_vote ? 'V' : 'v',
335 bam_is_connected ? 'U' : 'u',
336 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700337 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600338 atomic_read(&ul_ondemand_vote),
339 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700340 );
341
342 va_start(arg_list, fmt);
343 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
344 va_end(arg_list);
345 memset(buff + len, 0x0, sizeof(buff) - len);
346
347 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
348 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
349 char junk[LOG_MESSAGE_MAX_SIZE];
350 int ret;
351
352 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
353 if (ret != LOG_MESSAGE_MAX_SIZE) {
354 pr_err("%s: unable to empty log %d\n", __func__, ret);
355 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
356 flags);
357 return;
358 }
359 }
360 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
361 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
362}
363
364static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
365{
366 unsigned long long t_now;
367
368 t_now = sched_clock();
369 pkt->ts_nsec = do_div(t_now, 1000000000U);
370 pkt->ts_sec = (unsigned)t_now;
371}
372
373static inline void verify_tx_queue_is_empty(const char *func)
374{
375 unsigned long flags;
376 struct tx_pkt_info *info;
377 int reported = 0;
378
379 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
380 list_for_each_entry(info, &bam_tx_pool, list_node) {
381 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700382 bam_dmux_log("%s: tx pool not empty\n", func);
383 if (!in_global_reset)
384 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700385 reported = 1;
386 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700387 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
388 &info->list_node, info->ts_sec, info->ts_nsec);
389 if (!in_global_reset)
390 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
391 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700392 }
393 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
394}
395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396static void queue_rx(void)
397{
398 void *ptr;
399 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700400 int ret;
401 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600402
Jeff Hugoc9749932011-11-02 17:50:40 -0600403 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700404 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600405 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600406
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700407 while (rx_len_cached < NUM_BUFFERS) {
408 if (in_global_reset)
409 goto fail;
410
411 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
412 if (!info) {
413 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
414 goto fail;
415 }
416
417 INIT_WORK(&info->work, handle_bam_mux_cmd);
418
419 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
420 if (info->skb == NULL) {
421 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
422 goto fail_info;
423 }
424 ptr = skb_put(info->skb, BUFFER_SIZE);
425
426 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
427 DMA_FROM_DEVICE);
428 if (info->dma_address == 0 || info->dma_address == ~0) {
429 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
430 __func__, (void *)info->dma_address, ptr);
431 goto fail_skb;
432 }
433
434 mutex_lock(&bam_rx_pool_mutexlock);
435 list_add_tail(&info->list_node, &bam_rx_pool);
436 rx_len_cached = ++bam_rx_pool_len;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700437 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
438 BUFFER_SIZE, info,
439 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700440 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600441 list_del(&info->list_node);
442 rx_len_cached = --bam_rx_pool_len;
443 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700444 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
445 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600446
447 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
448 DMA_FROM_DEVICE);
449
450 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700451 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600452 mutex_unlock(&bam_rx_pool_mutexlock);
453
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700454 }
455 return;
456
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700457fail_skb:
458 dev_kfree_skb_any(info->skb);
459
460fail_info:
461 kfree(info);
462
463fail:
464 if (rx_len_cached == 0) {
465 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
466 in_global_reset = 1;
467 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468}
469
470static void bam_mux_process_data(struct sk_buff *rx_skb)
471{
472 unsigned long flags;
473 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600474 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475
476 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
477
478 rx_skb->data = (unsigned char *)(rx_hdr + 1);
479 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
480 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600481 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600483 event_data = (unsigned long)(rx_skb);
484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600486 if (bam_ch[rx_hdr->ch_id].notify)
487 bam_ch[rx_hdr->ch_id].notify(
488 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
489 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 else
491 dev_kfree_skb_any(rx_skb);
492 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
493
494 queue_rx();
495}
496
Eric Holmberg006057d2012-01-11 10:10:42 -0700497static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
498{
499 unsigned long flags;
500 int ret;
501
Eric Holmberga623da82012-07-12 09:37:09 -0600502 mutex_lock(&bam_pdev_mutexlock);
503 if (in_global_reset) {
504 bam_dmux_log("%s: open cid %d aborted due to ssr\n",
505 __func__, rx_hdr->ch_id);
506 mutex_unlock(&bam_pdev_mutexlock);
507 queue_rx();
508 return;
509 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700510 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
511 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
512 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
513 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700514 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
515 if (ret)
516 pr_err("%s: platform_device_add() error: %d\n",
517 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600518 mutex_unlock(&bam_pdev_mutexlock);
519 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700520}
521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522static void handle_bam_mux_cmd(struct work_struct *work)
523{
524 unsigned long flags;
525 struct bam_mux_hdr *rx_hdr;
526 struct rx_pkt_info *info;
527 struct sk_buff *rx_skb;
528
529 info = container_of(work, struct rx_pkt_info, work);
530 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600531 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 kfree(info);
533
534 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
535
536 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
537 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
538 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
539 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
540 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700541 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
542 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 " pad %d ch %d len %d\n", __func__,
544 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
545 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
546 dev_kfree_skb_any(rx_skb);
547 queue_rx();
548 return;
549 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700550
551 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700552 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
553 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700554 " pad %d ch %d len %d\n", __func__,
555 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
556 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
557 dev_kfree_skb_any(rx_skb);
558 queue_rx();
559 return;
560 }
561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 switch (rx_hdr->cmd) {
563 case BAM_MUX_HDR_CMD_DATA:
564 DBG_INC_READ_CNT(rx_hdr->pkt_len);
565 bam_mux_process_data(rx_skb);
566 break;
567 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700568 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700569 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700570 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600571 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
572 bam_dmux_log("%s: deactivating disconnect ack\n");
573 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600574 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700575 dev_kfree_skb_any(rx_skb);
576 break;
577 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
578 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
579 rx_hdr->ch_id);
580
581 if (!a2_pc_disabled) {
582 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700583 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700584 }
585
586 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600587 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 break;
589 case BAM_MUX_HDR_CMD_CLOSE:
590 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700591 bam_dmux_log("%s: closing cid %d\n", __func__,
592 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600593 mutex_lock(&bam_pdev_mutexlock);
594 if (in_global_reset) {
595 bam_dmux_log("%s: close cid %d aborted due to ssr\n",
596 __func__, rx_hdr->ch_id);
597 mutex_unlock(&bam_pdev_mutexlock);
598 break;
599 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
601 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
602 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600603 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
604 bam_ch[rx_hdr->ch_id].pdev =
605 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
606 if (!bam_ch[rx_hdr->ch_id].pdev)
607 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600608 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600609 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600610 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 break;
612 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700613 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
614 " reserved %d cmd %d pad %d ch %d len %d\n",
615 __func__, rx_hdr->magic_num, rx_hdr->reserved,
616 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
617 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 dev_kfree_skb_any(rx_skb);
619 queue_rx();
620 return;
621 }
622}
623
624static int bam_mux_write_cmd(void *data, uint32_t len)
625{
626 int rc;
627 struct tx_pkt_info *pkt;
628 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700629 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600631 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 if (pkt == NULL) {
633 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
634 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 return rc;
636 }
637
638 dma_address = dma_map_single(NULL, data, len,
639 DMA_TO_DEVICE);
640 if (!dma_address) {
641 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700642 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 return rc;
645 }
646 pkt->skb = (struct sk_buff *)(data);
647 pkt->len = len;
648 pkt->dma_address = dma_address;
649 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700650 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600651 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700652 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600653 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
655 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600656 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700657 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
658 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600659 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700660 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700661 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700662 dma_unmap_single(NULL, pkt->dma_address,
663 pkt->len,
664 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600665 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700666 } else {
667 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600668 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600670 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 return rc;
672}
673
674static void bam_mux_write_done(struct work_struct *work)
675{
676 struct sk_buff *skb;
677 struct bam_mux_hdr *hdr;
678 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700679 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600680 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700681 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600683 if (in_global_reset)
684 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700687
688 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
689 info_expected = list_first_entry(&bam_tx_pool,
690 struct tx_pkt_info, list_node);
691 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700692 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700693
Eric Holmberg878923a2012-01-10 14:28:19 -0700694 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
695 " list_node=%p, ts=%u.%09lu\n",
696 __func__, bam_tx_pool.next, &info->list_node,
697 info->ts_sec, info->ts_nsec
698 );
699
700 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
701 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
702 &errant_pkt->list_node, errant_pkt->ts_sec,
703 errant_pkt->ts_nsec);
704
705 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700706 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
707 BUG();
708 }
709 list_del(&info->list_node);
710 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
711
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600712 if (info->is_cmd) {
713 kfree(info->skb);
714 kfree(info);
715 return;
716 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 skb = info->skb;
718 kfree(info);
719 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700720 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600721 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700722 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
723 bam_ch[hdr->ch_id].num_tx_pkts--;
724 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600725 if (bam_ch[hdr->ch_id].notify)
726 bam_ch[hdr->ch_id].notify(
727 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
728 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 else
730 dev_kfree_skb_any(skb);
731}
732
733int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
734{
735 int rc = 0;
736 struct bam_mux_hdr *hdr;
737 unsigned long flags;
738 struct sk_buff *new_skb = NULL;
739 dma_addr_t dma_address;
740 struct tx_pkt_info *pkt;
741
742 if (id >= BAM_DMUX_NUM_CHANNELS)
743 return -EINVAL;
744 if (!skb)
745 return -EINVAL;
746 if (!bam_mux_initialized)
747 return -ENODEV;
748
749 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
750 spin_lock_irqsave(&bam_ch[id].lock, flags);
751 if (!bam_ch_is_open(id)) {
752 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
753 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
754 return -ENODEV;
755 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700756
757 if (bam_ch[id].use_wm &&
758 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
759 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
760 pr_err("%s: watermark exceeded: %d\n", __func__, id);
761 return -EAGAIN;
762 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
764
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600765 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600766 if (!bam_is_connected) {
767 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600768 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700769 if (unlikely(in_global_reset == 1))
770 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600771 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600772 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600773 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 /* if skb do not have any tailroom for padding,
776 copy the skb into a new expanded skb */
777 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
778 /* revisit, probably dev_alloc_skb and memcpy is effecient */
779 new_skb = skb_copy_expand(skb, skb_headroom(skb),
780 4 - (skb->len & 0x3), GFP_ATOMIC);
781 if (new_skb == NULL) {
782 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600783 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 }
785 dev_kfree_skb_any(skb);
786 skb = new_skb;
787 DBG_INC_WRITE_CPY(skb->len);
788 }
789
790 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
791
792 /* caller should allocate for hdr and padding
793 hdr is fine, padding is tricky */
794 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
795 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
796 hdr->reserved = 0;
797 hdr->ch_id = id;
798 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
799 if (skb->len & 0x3)
800 skb_put(skb, 4 - (skb->len & 0x3));
801
802 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
803
804 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
805 __func__, skb->data, skb->tail, skb->len,
806 hdr->pkt_len, hdr->pad_len);
807
808 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
809 if (pkt == NULL) {
810 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600811 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 }
813
814 dma_address = dma_map_single(NULL, skb->data, skb->len,
815 DMA_TO_DEVICE);
816 if (!dma_address) {
817 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600818 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 }
820 pkt->skb = skb;
821 pkt->dma_address = dma_address;
822 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700823 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700825 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600826 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
828 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600829 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700830 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
831 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600832 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700833 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700834 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700835 dma_unmap_single(NULL, pkt->dma_address,
836 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600837 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700838 if (new_skb)
839 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700840 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700841 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700842 spin_lock_irqsave(&bam_ch[id].lock, flags);
843 bam_ch[id].num_tx_pkts++;
844 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600845 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600846 ul_packet_written = 1;
847 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600849
850write_fail3:
851 kfree(pkt);
852write_fail2:
853 if (new_skb)
854 dev_kfree_skb_any(new_skb);
855write_fail:
856 read_unlock(&ul_wakeup_lock);
857 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700858}
859
860int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600861 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862{
863 struct bam_mux_hdr *hdr;
864 unsigned long flags;
865 int rc = 0;
866
867 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700868 if (!bam_mux_initialized) {
869 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700871 }
872 if (id >= BAM_DMUX_NUM_CHANNELS) {
873 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700875 }
876 if (notify == NULL) {
877 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600878 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700879 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880
881 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
882 if (hdr == NULL) {
883 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
884 return -ENOMEM;
885 }
886 spin_lock_irqsave(&bam_ch[id].lock, flags);
887 if (bam_ch_is_open(id)) {
888 DBG("%s: Already opened %d\n", __func__, id);
889 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
890 kfree(hdr);
891 goto open_done;
892 }
893 if (!bam_ch_is_remote_open(id)) {
894 DBG("%s: Remote not open; ch: %d\n", __func__, id);
895 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
896 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700897 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 }
899
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600900 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 bam_ch[id].priv = priv;
902 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700903 bam_ch[id].num_tx_pkts = 0;
904 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
906
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600907 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600908 if (!bam_is_connected) {
909 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600910 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700911 if (unlikely(in_global_reset == 1))
912 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600913 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600914 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600915 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
918 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
919 hdr->reserved = 0;
920 hdr->ch_id = id;
921 hdr->pkt_len = 0;
922 hdr->pad_len = 0;
923
924 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600925 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926
927open_done:
928 DBG("%s: opened ch %d\n", __func__, id);
929 return rc;
930}
931
932int msm_bam_dmux_close(uint32_t id)
933{
934 struct bam_mux_hdr *hdr;
935 unsigned long flags;
936 int rc;
937
938 if (id >= BAM_DMUX_NUM_CHANNELS)
939 return -EINVAL;
940 DBG("%s: closing ch %d\n", __func__, id);
941 if (!bam_mux_initialized)
942 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600944 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600945 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600946 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600947 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700948 if (unlikely(in_global_reset == 1))
949 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600950 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600951 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600952 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600953
Jeff Hugo061ce672011-10-21 17:15:32 -0600954 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600955 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 bam_ch[id].priv = NULL;
957 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
958 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
959
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600960 if (bam_ch_is_in_reset(id)) {
961 read_unlock(&ul_wakeup_lock);
962 bam_ch[id].status &= ~BAM_CH_IN_RESET;
963 return 0;
964 }
965
Jeff Hugobb5802f2011-11-02 17:10:29 -0600966 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 if (hdr == NULL) {
968 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600969 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 return -ENOMEM;
971 }
972 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
973 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
974 hdr->reserved = 0;
975 hdr->ch_id = id;
976 hdr->pkt_len = 0;
977 hdr->pad_len = 0;
978
979 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600980 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981
982 DBG("%s: closed ch %d\n", __func__, id);
983 return rc;
984}
985
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700986int msm_bam_dmux_is_ch_full(uint32_t id)
987{
988 unsigned long flags;
989 int ret;
990
991 if (id >= BAM_DMUX_NUM_CHANNELS)
992 return -EINVAL;
993
994 spin_lock_irqsave(&bam_ch[id].lock, flags);
995 bam_ch[id].use_wm = 1;
996 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
997 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
998 id, bam_ch[id].num_tx_pkts, ret);
999 if (!bam_ch_is_local_open(id)) {
1000 ret = -ENODEV;
1001 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1002 }
1003 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1004
1005 return ret;
1006}
1007
1008int msm_bam_dmux_is_ch_low(uint32_t id)
1009{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001010 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001011 int ret;
1012
1013 if (id >= BAM_DMUX_NUM_CHANNELS)
1014 return -EINVAL;
1015
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001016 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001017 bam_ch[id].use_wm = 1;
1018 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1019 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1020 id, bam_ch[id].num_tx_pkts, ret);
1021 if (!bam_ch_is_local_open(id)) {
1022 ret = -ENODEV;
1023 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1024 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001025 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001026
1027 return ret;
1028}
1029
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001030static void rx_switch_to_interrupt_mode(void)
1031{
1032 struct sps_connect cur_rx_conn;
1033 struct sps_iovec iov;
1034 struct rx_pkt_info *info;
1035 int ret;
1036
1037 /*
1038 * Attempt to enable interrupts - if this fails,
1039 * continue polling and we will retry later.
1040 */
1041 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1042 if (ret) {
1043 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1044 goto fail;
1045 }
1046
1047 rx_register_event.options = SPS_O_EOT;
1048 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1049 if (ret) {
1050 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1051 goto fail;
1052 }
1053
1054 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1055 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1056 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1057 if (ret) {
1058 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1059 goto fail;
1060 }
1061 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001062 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001063
1064 /* handle any rx packets before interrupt was enabled */
1065 while (bam_connection_is_active && !polling_mode) {
1066 ret = sps_get_iovec(bam_rx_pipe, &iov);
1067 if (ret) {
1068 pr_err("%s: sps_get_iovec failed %d\n",
1069 __func__, ret);
1070 break;
1071 }
1072 if (iov.addr == 0)
1073 break;
1074
1075 mutex_lock(&bam_rx_pool_mutexlock);
1076 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001077 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1078 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001079 mutex_unlock(&bam_rx_pool_mutexlock);
1080 continue;
1081 }
1082 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1083 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001084 if (info->dma_address != iov.addr) {
1085 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1086 __func__,
1087 (void *)iov.addr,
1088 (void *)info->dma_address);
1089 list_for_each_entry(info, &bam_rx_pool, list_node) {
1090 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1091 (void *)info->dma_address);
1092 if (iov.addr == info->dma_address)
1093 break;
1094 }
1095 }
1096 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001097 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001098 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001099 mutex_unlock(&bam_rx_pool_mutexlock);
1100 handle_bam_mux_cmd(&info->work);
1101 }
1102 return;
1103
1104fail:
1105 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001106 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001107}
1108
Jeff Hugo949080a2011-08-30 11:58:56 -06001109static void rx_timer_work_func(struct work_struct *work)
1110{
1111 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001112 struct rx_pkt_info *info;
1113 int inactive_cycles = 0;
1114 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -06001115
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001116 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001117 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001118 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001119 if (in_global_reset)
1120 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001121
1122 ret = sps_get_iovec(bam_rx_pipe, &iov);
1123 if (ret) {
1124 pr_err("%s: sps_get_iovec failed %d\n",
1125 __func__, ret);
1126 break;
1127 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001128 if (iov.addr == 0)
1129 break;
1130 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001131 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001132 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001133 DMUX_LOG_KERR(
1134 "%s: have iovec %p but rx pool empty\n",
1135 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001136 mutex_unlock(&bam_rx_pool_mutexlock);
1137 continue;
1138 }
1139 info = list_first_entry(&bam_rx_pool,
1140 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001141 if (info->dma_address != iov.addr) {
1142 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1143 __func__,
1144 (void *)iov.addr,
1145 (void *)info->dma_address);
1146 list_for_each_entry(info, &bam_rx_pool,
1147 list_node) {
1148 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1149 (void *)info->dma_address);
1150 if (iov.addr == info->dma_address)
1151 break;
1152 }
1153 }
1154 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001155 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001156 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001157 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001158 handle_bam_mux_cmd(&info->work);
1159 }
1160
1161 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001162 rx_switch_to_interrupt_mode();
1163 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001164 }
1165
1166 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1167 }
1168}
1169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170static void bam_mux_tx_notify(struct sps_event_notify *notify)
1171{
1172 struct tx_pkt_info *pkt;
1173
1174 DBG("%s: event %d notified\n", __func__, notify->event_id);
1175
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001176 if (in_global_reset)
1177 return;
1178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 switch (notify->event_id) {
1180 case SPS_EVENT_EOT:
1181 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001182 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 dma_unmap_single(NULL, pkt->dma_address,
1184 pkt->skb->len,
1185 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001186 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 dma_unmap_single(NULL, pkt->dma_address,
1188 pkt->len,
1189 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001190 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 break;
1192 default:
1193 pr_err("%s: recieved unexpected event id %d\n", __func__,
1194 notify->event_id);
1195 }
1196}
1197
Jeff Hugo33dbc002011-08-25 15:52:53 -06001198static void bam_mux_rx_notify(struct sps_event_notify *notify)
1199{
Jeff Hugo949080a2011-08-30 11:58:56 -06001200 int ret;
1201 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001202
1203 DBG("%s: event %d notified\n", __func__, notify->event_id);
1204
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001205 if (in_global_reset)
1206 return;
1207
Jeff Hugo33dbc002011-08-25 15:52:53 -06001208 switch (notify->event_id) {
1209 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001210 /* attempt to disable interrupts in this pipe */
1211 if (!polling_mode) {
1212 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1213 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001214 pr_err("%s: sps_get_config() failed %d, interrupts"
1215 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001216 break;
1217 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001218 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001219 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1220 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1221 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001222 pr_err("%s: sps_set_config() failed %d, interrupts"
1223 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001224 break;
1225 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001226 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001227 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001228 /*
1229 * run on core 0 so that netif_rx() in rmnet uses only
1230 * one queue
1231 */
1232 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001233 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001234 break;
1235 default:
1236 pr_err("%s: recieved unexpected event id %d\n", __func__,
1237 notify->event_id);
1238 }
1239}
1240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241#ifdef CONFIG_DEBUG_FS
1242
1243static int debug_tbl(char *buf, int max)
1244{
1245 int i = 0;
1246 int j;
1247
1248 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1249 i += scnprintf(buf + i, max - i,
1250 "ch%02d local open=%s remote open=%s\n",
1251 j, bam_ch_is_local_open(j) ? "Y" : "N",
1252 bam_ch_is_remote_open(j) ? "Y" : "N");
1253 }
1254
1255 return i;
1256}
1257
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001258static int debug_ul_pkt_cnt(char *buf, int max)
1259{
1260 struct list_head *p;
1261 unsigned long flags;
1262 int n = 0;
1263
1264 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1265 __list_for_each(p, &bam_tx_pool) {
1266 ++n;
1267 }
1268 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1269
1270 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1271}
1272
1273static int debug_stats(char *buf, int max)
1274{
1275 int i = 0;
1276
1277 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001278 "skb read cnt: %u\n"
1279 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001280 "skb copy cnt: %u\n"
1281 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001282 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001283 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001284 "rx queue len: %d\n"
1285 "a2 ack out cnt: %d\n"
1286 "a2 ack in cnt: %d\n"
1287 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001288 bam_dmux_read_cnt,
1289 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001290 bam_dmux_write_cpy_cnt,
1291 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001292 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001293 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001294 bam_rx_pool_len,
1295 atomic_read(&bam_dmux_ack_out_cnt),
1296 atomic_read(&bam_dmux_ack_in_cnt),
1297 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001298 );
1299
1300 return i;
1301}
1302
Eric Holmberg878923a2012-01-10 14:28:19 -07001303static int debug_log(char *buff, int max, loff_t *ppos)
1304{
1305 unsigned long flags;
1306 int i = 0;
1307
1308 if (bam_dmux_state_logging_disabled) {
1309 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1310 return i;
1311 }
1312
1313 if (*ppos == 0) {
1314 i += scnprintf(buff - i, max - i,
1315 "<DMUX> timestamp FLAGS [Message]\n"
1316 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001317 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001318 "\tR: 1 = in global reset\n"
1319 "\tP: 1 = BAM is powered up\n"
1320 "\tA: 1 = BAM initialized and ready for data\n"
1321 "\n"
1322 "\tV: 1 = Uplink vote for power\n"
1323 "\tU: 1 = Uplink active\n"
1324 "\tW: 1 = Uplink Wait-for-ack\n"
1325 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001326 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001327 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001328 );
1329 buff += i;
1330 }
1331
1332 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1333 while (kfifo_len(&bam_dmux_state_log)
1334 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1335 int k_len;
1336 k_len = kfifo_out(&bam_dmux_state_log,
1337 buff, LOG_MESSAGE_MAX_SIZE);
1338 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1339 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1340 break;
1341 }
1342
1343 /* keep non-null portion of string and add line break */
1344 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1345 buff += k_len;
1346 i += k_len;
1347 if (k_len && *(buff - 1) != '\n') {
1348 *buff++ = '\n';
1349 ++i;
1350 }
1351 }
1352 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1353
1354 return i;
1355}
1356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357#define DEBUG_BUFMAX 4096
1358static char debug_buffer[DEBUG_BUFMAX];
1359
1360static ssize_t debug_read(struct file *file, char __user *buf,
1361 size_t count, loff_t *ppos)
1362{
1363 int (*fill)(char *buf, int max) = file->private_data;
1364 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1365 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1366}
1367
Eric Holmberg878923a2012-01-10 14:28:19 -07001368static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1369 size_t count, loff_t *ppos)
1370{
1371 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1372 char *buffer;
1373 int bsize;
1374
1375 buffer = kmalloc(count, GFP_KERNEL);
1376 if (!buffer)
1377 return -ENOMEM;
1378
1379 bsize = util_func(buffer, count, ppos);
1380
1381 if (bsize >= 0) {
1382 if (copy_to_user(buff, buffer, bsize)) {
1383 kfree(buffer);
1384 return -EFAULT;
1385 }
1386 *ppos += bsize;
1387 }
1388 kfree(buffer);
1389 return bsize;
1390}
1391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392static int debug_open(struct inode *inode, struct file *file)
1393{
1394 file->private_data = inode->i_private;
1395 return 0;
1396}
1397
1398
1399static const struct file_operations debug_ops = {
1400 .read = debug_read,
1401 .open = debug_open,
1402};
1403
Eric Holmberg878923a2012-01-10 14:28:19 -07001404static const struct file_operations debug_ops_multiple = {
1405 .read = debug_read_multiple,
1406 .open = debug_open,
1407};
1408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409static void debug_create(const char *name, mode_t mode,
1410 struct dentry *dent,
1411 int (*fill)(char *buf, int max))
1412{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001413 struct dentry *file;
1414
1415 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1416 if (IS_ERR(file))
1417 pr_err("%s: debugfs create failed %d\n", __func__,
1418 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419}
1420
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001421static void debug_create_multiple(const char *name, mode_t mode,
1422 struct dentry *dent,
1423 int (*fill)(char *buf, int max, loff_t *ppos))
1424{
1425 struct dentry *file;
1426
1427 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1428 if (IS_ERR(file))
1429 pr_err("%s: debugfs create failed %d\n", __func__,
1430 (int)PTR_ERR(file));
1431}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432#endif
1433
Jeff Hugod98b1082011-10-24 10:30:23 -06001434static void notify_all(int event, unsigned long data)
1435{
1436 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001437 struct list_head *temp;
1438 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001439
1440 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001441 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001442 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001443 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1444 __func__, i, event, data);
1445 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001446 }
Jeff Hugocb798022012-04-09 14:55:40 -06001447
1448 __list_for_each(temp, &bam_other_notify_funcs) {
1449 func = container_of(temp, struct outside_notify_func,
1450 list_node);
1451 func->notify(func->priv, event, data);
1452 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001453}
1454
1455static void kickoff_ul_wakeup_func(struct work_struct *work)
1456{
1457 read_lock(&ul_wakeup_lock);
1458 if (!bam_is_connected) {
1459 read_unlock(&ul_wakeup_lock);
1460 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001461 if (unlikely(in_global_reset == 1))
1462 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001463 read_lock(&ul_wakeup_lock);
1464 ul_packet_written = 1;
1465 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1466 }
1467 read_unlock(&ul_wakeup_lock);
1468}
1469
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001470int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001471{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001472 int is_connected;
1473
1474 read_lock(&ul_wakeup_lock);
1475 ul_packet_written = 1;
1476 is_connected = bam_is_connected;
1477 if (!is_connected)
1478 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1479 read_unlock(&ul_wakeup_lock);
1480
1481 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001482}
1483
Eric Holmberg878923a2012-01-10 14:28:19 -07001484static void power_vote(int vote)
1485{
1486 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1487 bam_dmux_uplink_vote, vote);
1488
1489 if (bam_dmux_uplink_vote == vote)
1490 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1491
1492 bam_dmux_uplink_vote = vote;
1493 if (vote)
1494 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1495 else
1496 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1497}
1498
Eric Holmberg454d9da2012-01-12 09:37:14 -07001499/*
1500 * @note: Must be called with ul_wakeup_lock locked.
1501 */
1502static inline void ul_powerdown(void)
1503{
1504 bam_dmux_log("%s: powerdown\n", __func__);
1505 verify_tx_queue_is_empty(__func__);
1506
1507 if (a2_pc_disabled) {
1508 wait_for_dfab = 1;
1509 INIT_COMPLETION(dfab_unvote_completion);
1510 release_wakelock();
1511 } else {
1512 wait_for_ack = 1;
1513 INIT_COMPLETION(ul_wakeup_ack_completion);
1514 power_vote(0);
1515 }
1516 bam_is_connected = 0;
1517 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1518}
1519
1520static inline void ul_powerdown_finish(void)
1521{
1522 if (a2_pc_disabled && wait_for_dfab) {
1523 unvote_dfab();
1524 complete_all(&dfab_unvote_completion);
1525 wait_for_dfab = 0;
1526 }
1527}
1528
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001529/*
1530 * Votes for UL power and returns current power state.
1531 *
1532 * @returns true if currently connected
1533 */
1534int msm_bam_dmux_ul_power_vote(void)
1535{
1536 int is_connected;
1537
1538 read_lock(&ul_wakeup_lock);
1539 atomic_inc(&ul_ondemand_vote);
1540 is_connected = bam_is_connected;
1541 if (!is_connected)
1542 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1543 read_unlock(&ul_wakeup_lock);
1544
1545 return is_connected;
1546}
1547
1548/*
1549 * Unvotes for UL power.
1550 *
1551 * @returns true if vote count is 0 (UL shutdown possible)
1552 */
1553int msm_bam_dmux_ul_power_unvote(void)
1554{
1555 int vote;
1556
1557 read_lock(&ul_wakeup_lock);
1558 vote = atomic_dec_return(&ul_ondemand_vote);
1559 if (unlikely(vote) < 0)
1560 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1561 read_unlock(&ul_wakeup_lock);
1562
1563 return vote == 0;
1564}
1565
Jeff Hugocb798022012-04-09 14:55:40 -06001566int msm_bam_dmux_reg_notify(void *priv,
1567 void (*notify)(void *priv, int event_type,
1568 unsigned long data))
1569{
1570 struct outside_notify_func *func;
1571
1572 if (!notify)
1573 return -EINVAL;
1574
1575 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1576 if (!func)
1577 return -ENOMEM;
1578
1579 func->notify = notify;
1580 func->priv = priv;
1581 list_add(&func->list_node, &bam_other_notify_funcs);
1582
1583 return 0;
1584}
1585
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001586static void ul_timeout(struct work_struct *work)
1587{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001588 unsigned long flags;
1589 int ret;
1590
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001591 if (in_global_reset)
1592 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001593 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1594 if (!ret) { /* failed to grab lock, reschedule and bail */
1595 schedule_delayed_work(&ul_timeout_work,
1596 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1597 return;
1598 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001599 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001600 if (!ul_packet_written) {
1601 spin_lock(&bam_tx_pool_spinlock);
1602 if (!list_empty(&bam_tx_pool)) {
1603 struct tx_pkt_info *info;
1604
1605 info = list_first_entry(&bam_tx_pool,
1606 struct tx_pkt_info, list_node);
1607 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1608 __func__, info->ts_sec, info->ts_nsec);
1609 DBG_INC_TX_STALL_CNT();
1610 ul_packet_written = 1;
1611 }
1612 spin_unlock(&bam_tx_pool_spinlock);
1613 }
1614
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001615 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1616 bam_dmux_log("%s: pkt written %d\n",
1617 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001618 ul_packet_written = 0;
1619 schedule_delayed_work(&ul_timeout_work,
1620 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001621 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001622 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001623 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001624 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001625 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001626 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001627}
Jeff Hugo4838f412012-01-20 11:19:37 -07001628
1629static int ssrestart_check(void)
1630{
Eric Holmberg90285e22012-02-22 12:33:05 -07001631 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1632 in_global_reset = 1;
1633 if (get_restart_level() <= RESET_SOC)
1634 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1635 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001636}
1637
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001638static void ul_wakeup(void)
1639{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001640 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001641 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001642
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001643 mutex_lock(&wakeup_lock);
1644 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001645 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001646 mutex_unlock(&wakeup_lock);
1647 return;
1648 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001649
Jeff Hugoc2696142012-05-03 11:42:13 -06001650 /*
1651 * if someone is voting for UL before bam is inited (modem up first
1652 * time), set flag for init to kickoff ul wakeup once bam is inited
1653 */
1654 mutex_lock(&delayed_ul_vote_lock);
1655 if (unlikely(!bam_mux_initialized)) {
1656 need_delayed_ul_vote = 1;
1657 mutex_unlock(&delayed_ul_vote_lock);
1658 mutex_unlock(&wakeup_lock);
1659 return;
1660 }
1661 mutex_unlock(&delayed_ul_vote_lock);
1662
Eric Holmberg006057d2012-01-11 10:10:42 -07001663 if (a2_pc_disabled) {
1664 /*
1665 * don't grab the wakelock the first time because it is
1666 * already grabbed when a2 powers on
1667 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001668 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001669 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001670 do_vote_dfab = 1; /* vote must occur after wait */
1671 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001672 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001673 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001674 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001675 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001676 &dfab_unvote_completion, HZ);
1677 BUG_ON(ret == 0);
1678 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001679 if (likely(do_vote_dfab))
1680 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001681 schedule_delayed_work(&ul_timeout_work,
1682 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1683 bam_is_connected = 1;
1684 mutex_unlock(&wakeup_lock);
1685 return;
1686 }
1687
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001688 /*
1689 * must wait for the previous power down request to have been acked
1690 * chances are it already came in and this will just fall through
1691 * instead of waiting
1692 */
1693 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001694 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001695 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001696 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001697 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001698 if (unlikely(ret == 0) && ssrestart_check()) {
1699 mutex_unlock(&wakeup_lock);
1700 bam_dmux_log("%s timeout previous ack\n", __func__);
1701 return;
1702 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001703 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001704 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001705 power_vote(1);
1706 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001707 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001708 if (unlikely(ret == 0) && ssrestart_check()) {
1709 mutex_unlock(&wakeup_lock);
1710 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1711 return;
1712 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001713 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001714 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001715 if (unlikely(ret == 0) && ssrestart_check()) {
1716 mutex_unlock(&wakeup_lock);
1717 bam_dmux_log("%s timeout power on\n", __func__);
1718 return;
1719 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001720
1721 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001722 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001723 schedule_delayed_work(&ul_timeout_work,
1724 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1725 mutex_unlock(&wakeup_lock);
1726}
1727
1728static void reconnect_to_bam(void)
1729{
1730 int i;
1731
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001732 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001733 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001734 if (!power_management_only_mode) {
1735 i = sps_device_reset(a2_device_handle);
1736 if (i)
1737 pr_err("%s: device reset failed rc = %d\n", __func__,
1738 i);
1739 i = sps_connect(bam_tx_pipe, &tx_connection);
1740 if (i)
1741 pr_err("%s: tx connection failed rc = %d\n", __func__,
1742 i);
1743 i = sps_connect(bam_rx_pipe, &rx_connection);
1744 if (i)
1745 pr_err("%s: rx connection failed rc = %d\n", __func__,
1746 i);
1747 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1748 if (i)
1749 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1750 i);
1751 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1752 if (i)
1753 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1754 i);
1755 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001756
1757 bam_connection_is_active = 1;
1758
1759 if (polling_mode)
1760 rx_switch_to_interrupt_mode();
1761
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001762 toggle_apps_ack();
1763 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001764 if (!power_management_only_mode)
1765 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001766}
1767
1768static void disconnect_to_bam(void)
1769{
1770 struct list_head *node;
1771 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001772 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001773
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001774 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001775
1776 /* handle disconnect during active UL */
1777 write_lock_irqsave(&ul_wakeup_lock, flags);
1778 if (bam_is_connected) {
1779 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1780 ul_powerdown();
1781 }
1782 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1783 ul_powerdown_finish();
1784
1785 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001786 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001787 if (!power_management_only_mode) {
1788 sps_disconnect(bam_tx_pipe);
1789 sps_disconnect(bam_rx_pipe);
1790 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1791 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1792 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001793 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001794
1795 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001796 while (!list_empty(&bam_rx_pool)) {
1797 node = bam_rx_pool.next;
1798 list_del(node);
1799 info = container_of(node, struct rx_pkt_info, list_node);
1800 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1801 DMA_FROM_DEVICE);
1802 dev_kfree_skb_any(info->skb);
1803 kfree(info);
1804 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001805 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001806 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001807
Jeff Hugo0b13a352012-03-17 23:18:30 -06001808 if (disconnect_ack)
1809 toggle_apps_ack();
1810
Eric Holmberg878923a2012-01-10 14:28:19 -07001811 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001812}
1813
1814static void vote_dfab(void)
1815{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001816 int rc;
1817
Eric Holmberg006057d2012-01-11 10:10:42 -07001818 bam_dmux_log("%s\n", __func__);
1819 mutex_lock(&dfab_status_lock);
1820 if (dfab_is_on) {
1821 bam_dmux_log("%s: dfab is already on\n", __func__);
1822 mutex_unlock(&dfab_status_lock);
1823 return;
1824 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001825 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001826 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001827 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001828 rc = clk_prepare_enable(xo_clk);
1829 if (rc)
1830 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc);
Eric Holmberg006057d2012-01-11 10:10:42 -07001831 dfab_is_on = 1;
1832 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001833}
1834
1835static void unvote_dfab(void)
1836{
Eric Holmberg006057d2012-01-11 10:10:42 -07001837 bam_dmux_log("%s\n", __func__);
1838 mutex_lock(&dfab_status_lock);
1839 if (!dfab_is_on) {
1840 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1841 dump_stack();
1842 mutex_unlock(&dfab_status_lock);
1843 return;
1844 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001845 clk_disable_unprepare(dfab_clk);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001846 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001847 dfab_is_on = 0;
1848 mutex_unlock(&dfab_status_lock);
1849}
1850
1851/* reference counting wrapper around wakelock */
1852static void grab_wakelock(void)
1853{
1854 unsigned long flags;
1855
1856 spin_lock_irqsave(&wakelock_reference_lock, flags);
1857 bam_dmux_log("%s: ref count = %d\n", __func__,
1858 wakelock_reference_count);
1859 if (wakelock_reference_count == 0)
1860 wake_lock(&bam_wakelock);
1861 ++wakelock_reference_count;
1862 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1863}
1864
1865static void release_wakelock(void)
1866{
1867 unsigned long flags;
1868
1869 spin_lock_irqsave(&wakelock_reference_lock, flags);
1870 if (wakelock_reference_count == 0) {
1871 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1872 dump_stack();
1873 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1874 return;
1875 }
1876 bam_dmux_log("%s: ref count = %d\n", __func__,
1877 wakelock_reference_count);
1878 --wakelock_reference_count;
1879 if (wakelock_reference_count == 0)
1880 wake_unlock(&bam_wakelock);
1881 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001882}
1883
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001884static int restart_notifier_cb(struct notifier_block *this,
1885 unsigned long code,
1886 void *data)
1887{
1888 int i;
1889 struct list_head *node;
1890 struct tx_pkt_info *info;
1891 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001892 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001893
1894 if (code != SUBSYS_AFTER_SHUTDOWN)
1895 return NOTIFY_DONE;
1896
Eric Holmberg878923a2012-01-10 14:28:19 -07001897 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001898 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001899
1900 /* Handle uplink Powerdown */
1901 write_lock_irqsave(&ul_wakeup_lock, flags);
1902 if (bam_is_connected) {
1903 ul_powerdown();
1904 wait_for_ack = 0;
1905 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001906 /*
1907 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1908 * reset to 0. harmless if bam_is_connected check above passes
1909 */
1910 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001911 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1912 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001913 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001914 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -06001915 disconnect_ack = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001916
1917 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001918 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001919 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1920 temp_remote_status = bam_ch_is_remote_open(i);
1921 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001922 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001923 if (bam_ch_is_local_open(i))
1924 bam_ch[i].status |= BAM_CH_IN_RESET;
1925 if (temp_remote_status) {
1926 platform_device_unregister(bam_ch[i].pdev);
1927 bam_ch[i].pdev = platform_device_alloc(
1928 bam_ch[i].name, 2);
1929 }
1930 }
Eric Holmberga623da82012-07-12 09:37:09 -06001931 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001932
1933 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001934 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001935 while (!list_empty(&bam_tx_pool)) {
1936 node = bam_tx_pool.next;
1937 list_del(node);
1938 info = container_of(node, struct tx_pkt_info,
1939 list_node);
1940 if (!info->is_cmd) {
1941 dma_unmap_single(NULL, info->dma_address,
1942 info->skb->len,
1943 DMA_TO_DEVICE);
1944 dev_kfree_skb_any(info->skb);
1945 } else {
1946 dma_unmap_single(NULL, info->dma_address,
1947 info->len,
1948 DMA_TO_DEVICE);
1949 kfree(info->skb);
1950 }
1951 kfree(info);
1952 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001953 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001954
Eric Holmberg878923a2012-01-10 14:28:19 -07001955 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001956 return NOTIFY_DONE;
1957}
1958
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001959static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960{
1961 u32 h;
1962 dma_addr_t dma_addr;
1963 int ret;
1964 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001965 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001967 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 /* init BAM */
1969 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1970 if (!a2_virt_addr) {
1971 pr_err("%s: ioremap failed\n", __func__);
1972 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001973 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001974 }
1975 a2_props.phys_addr = A2_PHYS_BASE;
1976 a2_props.virt_addr = a2_virt_addr;
1977 a2_props.virt_size = A2_PHYS_SIZE;
1978 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001979 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 a2_props.num_pipes = A2_NUM_PIPES;
1981 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001982 if (cpu_is_msm9615())
1983 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 /* need to free on tear down */
1985 ret = sps_register_bam_device(&a2_props, &h);
1986 if (ret < 0) {
1987 pr_err("%s: register bam error %d\n", __func__, ret);
1988 goto register_bam_failed;
1989 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001990 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991
1992 bam_tx_pipe = sps_alloc_endpoint();
1993 if (bam_tx_pipe == NULL) {
1994 pr_err("%s: tx alloc endpoint failed\n", __func__);
1995 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001996 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 }
1998 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1999 if (ret) {
2000 pr_err("%s: tx get config failed %d\n", __func__, ret);
2001 goto tx_get_config_failed;
2002 }
2003
2004 tx_connection.source = SPS_DEV_HANDLE_MEM;
2005 tx_connection.src_pipe_index = 0;
2006 tx_connection.destination = h;
2007 tx_connection.dest_pipe_index = 4;
2008 tx_connection.mode = SPS_MODE_DEST;
2009 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2010 tx_desc_mem_buf.size = 0x800; /* 2k */
2011 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2012 &dma_addr, 0);
2013 if (tx_desc_mem_buf.base == NULL) {
2014 pr_err("%s: tx memory alloc failed\n", __func__);
2015 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002016 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 }
2018 tx_desc_mem_buf.phys_base = dma_addr;
2019 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2020 tx_connection.desc = tx_desc_mem_buf;
2021 tx_connection.event_thresh = 0x10;
2022
2023 ret = sps_connect(bam_tx_pipe, &tx_connection);
2024 if (ret < 0) {
2025 pr_err("%s: tx connect error %d\n", __func__, ret);
2026 goto tx_connect_failed;
2027 }
2028
2029 bam_rx_pipe = sps_alloc_endpoint();
2030 if (bam_rx_pipe == NULL) {
2031 pr_err("%s: rx alloc endpoint failed\n", __func__);
2032 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002033 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034 }
2035 ret = sps_get_config(bam_rx_pipe, &rx_connection);
2036 if (ret) {
2037 pr_err("%s: rx get config failed %d\n", __func__, ret);
2038 goto rx_get_config_failed;
2039 }
2040
2041 rx_connection.source = h;
2042 rx_connection.src_pipe_index = 5;
2043 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2044 rx_connection.dest_pipe_index = 1;
2045 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002046 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2047 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048 rx_desc_mem_buf.size = 0x800; /* 2k */
2049 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2050 &dma_addr, 0);
2051 if (rx_desc_mem_buf.base == NULL) {
2052 pr_err("%s: rx memory alloc failed\n", __func__);
2053 ret = -ENOMEM;
2054 goto rx_mem_failed;
2055 }
2056 rx_desc_mem_buf.phys_base = dma_addr;
2057 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2058 rx_connection.desc = rx_desc_mem_buf;
2059 rx_connection.event_thresh = 0x10;
2060
2061 ret = sps_connect(bam_rx_pipe, &rx_connection);
2062 if (ret < 0) {
2063 pr_err("%s: rx connect error %d\n", __func__, ret);
2064 goto rx_connect_failed;
2065 }
2066
2067 tx_register_event.options = SPS_O_EOT;
2068 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2069 tx_register_event.xfer_done = NULL;
2070 tx_register_event.callback = bam_mux_tx_notify;
2071 tx_register_event.user = NULL;
2072 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2073 if (ret < 0) {
2074 pr_err("%s: tx register event error %d\n", __func__, ret);
2075 goto rx_event_reg_failed;
2076 }
2077
Jeff Hugo33dbc002011-08-25 15:52:53 -06002078 rx_register_event.options = SPS_O_EOT;
2079 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2080 rx_register_event.xfer_done = NULL;
2081 rx_register_event.callback = bam_mux_rx_notify;
2082 rx_register_event.user = NULL;
2083 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2084 if (ret < 0) {
2085 pr_err("%s: tx register event error %d\n", __func__, ret);
2086 goto rx_event_reg_failed;
2087 }
2088
Jeff Hugoc2696142012-05-03 11:42:13 -06002089 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002090 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002091 if (need_delayed_ul_vote) {
2092 need_delayed_ul_vote = 0;
2093 msm_bam_dmux_kickoff_ul_wakeup();
2094 }
2095 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002096 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002097 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002098 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002099 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002100 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002101
2102rx_event_reg_failed:
2103 sps_disconnect(bam_rx_pipe);
2104rx_connect_failed:
2105 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2106 rx_desc_mem_buf.phys_base);
2107rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108rx_get_config_failed:
2109 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002110rx_alloc_endpoint_failed:
2111 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002112tx_connect_failed:
2113 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2114 tx_desc_mem_buf.phys_base);
2115tx_get_config_failed:
2116 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002117tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002118 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002119 /*
2120 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2121 * same handle below will cause a crash, so skip it if we've freed
2122 * the handle here.
2123 */
2124 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002125register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002126 if (!skip_iounmap)
2127 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002128ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002130 return ret;
2131}
2132
2133static int bam_init_fallback(void)
2134{
2135 u32 h;
2136 int ret;
2137 void *a2_virt_addr;
2138
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002139 /* init BAM */
2140 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
2141 if (!a2_virt_addr) {
2142 pr_err("%s: ioremap failed\n", __func__);
2143 ret = -ENOMEM;
2144 goto ioremap_failed;
2145 }
2146 a2_props.phys_addr = A2_PHYS_BASE;
2147 a2_props.virt_addr = a2_virt_addr;
2148 a2_props.virt_size = A2_PHYS_SIZE;
2149 a2_props.irq = A2_BAM_IRQ;
2150 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2151 a2_props.num_pipes = A2_NUM_PIPES;
2152 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2153 if (cpu_is_msm9615())
2154 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2155 ret = sps_register_bam_device(&a2_props, &h);
2156 if (ret < 0) {
2157 pr_err("%s: register bam error %d\n", __func__, ret);
2158 goto register_bam_failed;
2159 }
2160 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002161
2162 mutex_lock(&delayed_ul_vote_lock);
2163 bam_mux_initialized = 1;
2164 if (need_delayed_ul_vote) {
2165 need_delayed_ul_vote = 0;
2166 msm_bam_dmux_kickoff_ul_wakeup();
2167 }
2168 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002169 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002170
Jeff Hugo18792a32012-06-20 15:25:55 -06002171 power_management_only_mode = 1;
2172 bam_connection_is_active = 1;
2173 complete_all(&bam_connection_completion);
2174
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002175 return 0;
2176
2177register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002178 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002179ioremap_failed:
2180 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002181}
Jeff Hugoade1f842011-08-03 15:53:59 -06002182
Jeff Hugoa670b762012-03-15 15:58:28 -06002183static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002184{
2185 int ret = 0;
2186
2187 ret = bam_init();
2188 if (ret) {
2189 ret = bam_init_fallback();
2190 if (ret)
2191 pr_err("%s: bam init fallback failed: %d",
2192 __func__, ret);
2193 }
2194}
2195
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002196static void toggle_apps_ack(void)
2197{
2198 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002199
2200 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2201 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002202 smsm_change_state(SMSM_APPS_STATE,
2203 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2204 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2205 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002206 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002207}
2208
Jeff Hugoade1f842011-08-03 15:53:59 -06002209static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2210{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002211 static int last_processed_state;
2212
2213 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002214 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002215 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002216 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2217 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002218 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
2219 bam_dmux_log("%s: already processed this state\n", __func__);
2220 mutex_unlock(&smsm_cb_lock);
2221 return;
2222 }
2223
2224 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002225
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002226 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002227 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002228 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002229 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002230 } else if (bam_mux_initialized &&
2231 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002232 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002233 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002234 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002235 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002236 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002237 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002238 if (cpu_is_msm9615())
2239 msm9615_bam_init();
2240 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002241 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002242 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002243 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002244 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002245 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002246 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002247
2248}
2249
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002250static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2251 uint32_t new_state)
2252{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002253 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002254 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2255 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002256 complete_all(&ul_wakeup_ack_completion);
2257}
2258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002259static int bam_dmux_probe(struct platform_device *pdev)
2260{
2261 int rc;
2262
2263 DBG("%s probe called\n", __func__);
2264 if (bam_mux_initialized)
2265 return 0;
2266
Stephen Boyd69d35e32012-02-14 15:33:30 -08002267 xo_clk = clk_get(&pdev->dev, "xo");
2268 if (IS_ERR(xo_clk)) {
2269 pr_err("%s: did not get xo clock\n", __func__);
2270 return PTR_ERR(xo_clk);
2271 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002272 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002273 if (IS_ERR(dfab_clk)) {
2274 pr_err("%s: did not get dfab clock\n", __func__);
2275 return -EFAULT;
2276 }
2277
2278 rc = clk_set_rate(dfab_clk, 64000000);
2279 if (rc)
2280 pr_err("%s: unable to set dfab clock rate\n", __func__);
2281
Jeff Hugofff43af92012-03-29 17:54:52 -06002282 /*
2283 * setup the workqueue so that it can be pinned to core 0 and not
2284 * block the watchdog pet function, so that netif_rx() in rmnet
2285 * only uses one queue.
2286 */
2287 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2288 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289 if (!bam_mux_rx_workqueue)
2290 return -ENOMEM;
2291
2292 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2293 if (!bam_mux_tx_workqueue) {
2294 destroy_workqueue(bam_mux_rx_workqueue);
2295 return -ENOMEM;
2296 }
2297
Jeff Hugo7960abd2011-08-02 15:39:38 -06002298 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002299 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002300 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2301 "bam_dmux_ch_%d", rc);
2302 /* bus 2, ie a2 stream 2 */
2303 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2304 if (!bam_ch[rc].pdev) {
2305 pr_err("%s: platform device alloc failed\n", __func__);
2306 destroy_workqueue(bam_mux_rx_workqueue);
2307 destroy_workqueue(bam_mux_tx_workqueue);
2308 return -ENOMEM;
2309 }
2310 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002311
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002312 init_completion(&ul_wakeup_ack_completion);
2313 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002314 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002315 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002316 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002317
Jeff Hugoade1f842011-08-03 15:53:59 -06002318 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2319 bam_dmux_smsm_cb, NULL);
2320
2321 if (rc) {
2322 destroy_workqueue(bam_mux_rx_workqueue);
2323 destroy_workqueue(bam_mux_tx_workqueue);
2324 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2325 return -ENOMEM;
2326 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002327
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002328 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2329 bam_dmux_smsm_ack_cb, NULL);
2330
2331 if (rc) {
2332 destroy_workqueue(bam_mux_rx_workqueue);
2333 destroy_workqueue(bam_mux_tx_workqueue);
2334 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2335 SMSM_A2_POWER_CONTROL,
2336 bam_dmux_smsm_cb, NULL);
2337 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2338 rc);
2339 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2340 platform_device_put(bam_ch[rc].pdev);
2341 return -ENOMEM;
2342 }
2343
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002344 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2345 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002347 return 0;
2348}
2349
2350static struct platform_driver bam_dmux_driver = {
2351 .probe = bam_dmux_probe,
2352 .driver = {
2353 .name = "BAM_RMNT",
2354 .owner = THIS_MODULE,
2355 },
2356};
2357
2358static int __init bam_dmux_init(void)
2359{
Eric Holmberg878923a2012-01-10 14:28:19 -07002360 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002361#ifdef CONFIG_DEBUG_FS
2362 struct dentry *dent;
2363
2364 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002365 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002366 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002367 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2368 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002369 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002370 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002372 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2373 if (ret) {
2374 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2375 bam_dmux_state_logging_disabled = 1;
2376 }
2377
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002378 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002379 return platform_driver_register(&bam_dmux_driver);
2380}
2381
Jeff Hugoade1f842011-08-03 15:53:59 -06002382late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002383MODULE_DESCRIPTION("MSM BAM DMUX");
2384MODULE_LICENSE("GPL v2");