blob: b35e94901e45ea996a377e51535b304f5115ad73 [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070068static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
69static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
70static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72#define DBG(x...) do { \
73 if (msm_bam_dmux_debug_enable) \
74 pr_debug(x); \
75 } while (0)
76
77#define DBG_INC_READ_CNT(x) do { \
78 bam_dmux_read_cnt += (x); \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total read bytes %u\n", \
81 __func__, bam_dmux_read_cnt); \
82 } while (0)
83
84#define DBG_INC_WRITE_CNT(x) do { \
85 bam_dmux_write_cnt += (x); \
86 if (msm_bam_dmux_debug_enable) \
87 pr_debug("%s: total written bytes %u\n", \
88 __func__, bam_dmux_write_cnt); \
89 } while (0)
90
91#define DBG_INC_WRITE_CPY(x) do { \
92 bam_dmux_write_cpy_bytes += (x); \
93 bam_dmux_write_cpy_cnt++; \
94 if (msm_bam_dmux_debug_enable) \
95 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
96 __func__, bam_dmux_write_cpy_cnt, \
97 bam_dmux_write_cpy_bytes); \
98 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070099
100#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
101 bam_dmux_tx_sps_failure_cnt++; \
102} while (0)
103
Eric Holmberg6074aba2012-01-18 17:59:44 -0700104#define DBG_INC_TX_STALL_CNT() do { \
105 bam_dmux_tx_stall_cnt++; \
106} while (0)
107
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700108#define DBG_INC_ACK_OUT_CNT() \
109 atomic_inc(&bam_dmux_ack_out_cnt)
110
111#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
112 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
113
114#define DBG_INC_ACK_IN_CNT() \
115 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116#else
117#define DBG(x...) do { } while (0)
118#define DBG_INC_READ_CNT(x...) do { } while (0)
119#define DBG_INC_WRITE_CNT(x...) do { } while (0)
120#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700121#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700122#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700123#define DBG_INC_ACK_OUT_CNT() do { } while (0)
124#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
125 do { } while (0)
126#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127#endif
128
129struct bam_ch_info {
130 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600131 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 void *priv;
133 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600134 struct platform_device *pdev;
135 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700136 int num_tx_pkts;
137 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct tx_pkt_info {
141 struct sk_buff *skb;
142 dma_addr_t dma_address;
143 char is_cmd;
144 uint32_t len;
145 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600146 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700147 unsigned ts_sec;
148 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149};
150
151struct rx_pkt_info {
152 struct sk_buff *skb;
153 dma_addr_t dma_address;
154 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600155 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156};
157
158#define A2_NUM_PIPES 6
159#define A2_SUMMING_THRESHOLD 4096
160#define A2_DEFAULT_DESCRIPTORS 32
161#define A2_PHYS_BASE 0x124C2000
162#define A2_PHYS_SIZE 0x2000
163#define BUFFER_SIZE 2048
164#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600166static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static struct sps_pipe *bam_tx_pipe;
168static struct sps_pipe *bam_rx_pipe;
169static struct sps_connect tx_connection;
170static struct sps_connect rx_connection;
171static struct sps_mem_buffer tx_desc_mem_buf;
172static struct sps_mem_buffer rx_desc_mem_buf;
173static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600174static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
177static int bam_mux_initialized;
178
Jeff Hugo949080a2011-08-30 11:58:56 -0600179static int polling_mode;
180
181static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600182static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700183static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600184static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600185static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600186static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188struct bam_mux_hdr {
189 uint16_t magic_num;
190 uint8_t reserved;
191 uint8_t cmd;
192 uint8_t pad_len;
193 uint8_t ch_id;
194 uint16_t pkt_len;
195};
196
Jeff Hugod98b1082011-10-24 10:30:23 -0600197static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198static void bam_mux_write_done(struct work_struct *work);
199static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600200static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201
Jeff Hugo949080a2011-08-30 11:58:56 -0600202static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203
204static struct workqueue_struct *bam_mux_rx_workqueue;
205static struct workqueue_struct *bam_mux_tx_workqueue;
206
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600207/* A2 power collaspe */
208#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600209#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600210static void toggle_apps_ack(void);
211static void reconnect_to_bam(void);
212static void disconnect_to_bam(void);
213static void ul_wakeup(void);
214static void ul_timeout(struct work_struct *work);
215static void vote_dfab(void);
216static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600217static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700218static void grab_wakelock(void);
219static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600220
221static int bam_is_connected;
222static DEFINE_MUTEX(wakeup_lock);
223static struct completion ul_wakeup_ack_completion;
224static struct completion bam_connection_completion;
225static struct delayed_work ul_timeout_work;
226static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700227static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800228static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600229static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600230static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600231static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700232static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700233static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700234static int a2_pc_disabled;
235static DEFINE_MUTEX(dfab_status_lock);
236static int dfab_is_on;
237static int wait_for_dfab;
238static struct completion dfab_unvote_completion;
239static DEFINE_SPINLOCK(wakelock_reference_lock);
240static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700241static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600242static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600243static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600244static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600245static DEFINE_MUTEX(delayed_ul_vote_lock);
246static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600247static int power_management_only_mode;
Jeff Hugocb798022012-04-09 14:55:40 -0600248
249struct outside_notify_func {
250 void (*notify)(void *, int, unsigned long);
251 void *priv;
252 struct list_head list_node;
253};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600254/* End A2 power collaspe */
255
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600256/* subsystem restart */
257static int restart_notifier_cb(struct notifier_block *this,
258 unsigned long code,
259 void *data);
260
261static struct notifier_block restart_notifier = {
262 .notifier_call = restart_notifier_cb,
263};
264static int in_global_reset;
265/* end subsystem restart */
266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267#define bam_ch_is_open(x) \
268 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
269
270#define bam_ch_is_local_open(x) \
271 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
272
273#define bam_ch_is_remote_open(x) \
274 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
275
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600276#define bam_ch_is_in_reset(x) \
277 (bam_ch[(x)].status & BAM_CH_IN_RESET)
278
Eric Holmberg878923a2012-01-10 14:28:19 -0700279#define LOG_MESSAGE_MAX_SIZE 80
280struct kfifo bam_dmux_state_log;
281static uint32_t bam_dmux_state_logging_disabled;
282static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
283static int bam_dmux_uplink_vote;
284static int bam_dmux_power_state;
285
Jeff Hugod7d2b062012-07-24 14:29:56 -0600286static void bam_dmux_log(const char *fmt, ...)
287 __printf(1, 2);
288
Eric Holmberg878923a2012-01-10 14:28:19 -0700289
290#define DMUX_LOG_KERR(fmt...) \
291do { \
292 bam_dmux_log(fmt); \
293 pr_err(fmt); \
294} while (0)
295
296/**
297 * Log a state change along with a small message.
298 *
299 * Complete size of messsage is limited to @todo.
300 */
301static void bam_dmux_log(const char *fmt, ...)
302{
303 char buff[LOG_MESSAGE_MAX_SIZE];
304 unsigned long flags;
305 va_list arg_list;
306 unsigned long long t_now;
307 unsigned long nanosec_rem;
308 int len = 0;
309
310 if (bam_dmux_state_logging_disabled)
311 return;
312
313 t_now = sched_clock();
314 nanosec_rem = do_div(t_now, 1000000000U);
315
316 /*
317 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700318 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700319 * R: 1 = in global reset
320 * P: 1 = BAM is powered up
321 * A: 1 = BAM initialized and ready for data
322 *
323 * V: 1 = Uplink vote for power
324 * U: 1 = Uplink active
325 * W: 1 = Uplink Wait-for-ack
326 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700327 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600328 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700329 */
330 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600331 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700332 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700333 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700334 in_global_reset ? 'R' : 'r',
335 bam_dmux_power_state ? 'P' : 'p',
336 bam_connection_is_active ? 'A' : 'a',
337 bam_dmux_uplink_vote ? 'V' : 'v',
338 bam_is_connected ? 'U' : 'u',
339 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700340 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600341 atomic_read(&ul_ondemand_vote),
342 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700343 );
344
345 va_start(arg_list, fmt);
346 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
347 va_end(arg_list);
348 memset(buff + len, 0x0, sizeof(buff) - len);
349
350 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
351 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
352 char junk[LOG_MESSAGE_MAX_SIZE];
353 int ret;
354
355 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
356 if (ret != LOG_MESSAGE_MAX_SIZE) {
357 pr_err("%s: unable to empty log %d\n", __func__, ret);
358 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
359 flags);
360 return;
361 }
362 }
363 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
364 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
365}
366
367static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
368{
369 unsigned long long t_now;
370
371 t_now = sched_clock();
372 pkt->ts_nsec = do_div(t_now, 1000000000U);
373 pkt->ts_sec = (unsigned)t_now;
374}
375
376static inline void verify_tx_queue_is_empty(const char *func)
377{
378 unsigned long flags;
379 struct tx_pkt_info *info;
380 int reported = 0;
381
382 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
383 list_for_each_entry(info, &bam_tx_pool, list_node) {
384 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700385 bam_dmux_log("%s: tx pool not empty\n", func);
386 if (!in_global_reset)
387 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700388 reported = 1;
389 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700390 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
391 &info->list_node, info->ts_sec, info->ts_nsec);
392 if (!in_global_reset)
393 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
394 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700395 }
396 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
397}
398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399static void queue_rx(void)
400{
401 void *ptr;
402 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700403 int ret;
404 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600405
Jeff Hugoc9749932011-11-02 17:50:40 -0600406 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700407 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600408 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600409
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700410 while (rx_len_cached < NUM_BUFFERS) {
411 if (in_global_reset)
412 goto fail;
413
414 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
415 if (!info) {
416 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
417 goto fail;
418 }
419
420 INIT_WORK(&info->work, handle_bam_mux_cmd);
421
422 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
423 if (info->skb == NULL) {
424 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
425 goto fail_info;
426 }
427 ptr = skb_put(info->skb, BUFFER_SIZE);
428
429 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
430 DMA_FROM_DEVICE);
431 if (info->dma_address == 0 || info->dma_address == ~0) {
432 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
433 __func__, (void *)info->dma_address, ptr);
434 goto fail_skb;
435 }
436
437 mutex_lock(&bam_rx_pool_mutexlock);
438 list_add_tail(&info->list_node, &bam_rx_pool);
439 rx_len_cached = ++bam_rx_pool_len;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700440 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
441 BUFFER_SIZE, info,
442 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700443 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600444 list_del(&info->list_node);
445 rx_len_cached = --bam_rx_pool_len;
446 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700447 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
448 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600449
450 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
451 DMA_FROM_DEVICE);
452
453 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700454 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600455 mutex_unlock(&bam_rx_pool_mutexlock);
456
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700457 }
458 return;
459
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700460fail_skb:
461 dev_kfree_skb_any(info->skb);
462
463fail_info:
464 kfree(info);
465
466fail:
467 if (rx_len_cached == 0) {
468 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
469 in_global_reset = 1;
470 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471}
472
473static void bam_mux_process_data(struct sk_buff *rx_skb)
474{
475 unsigned long flags;
476 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600477 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478
479 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
480
481 rx_skb->data = (unsigned char *)(rx_hdr + 1);
482 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
483 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600484 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600486 event_data = (unsigned long)(rx_skb);
487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600489 if (bam_ch[rx_hdr->ch_id].notify)
490 bam_ch[rx_hdr->ch_id].notify(
491 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
492 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 else
494 dev_kfree_skb_any(rx_skb);
495 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
496
497 queue_rx();
498}
499
Eric Holmberg006057d2012-01-11 10:10:42 -0700500static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
501{
502 unsigned long flags;
503 int ret;
504
Eric Holmberga623da82012-07-12 09:37:09 -0600505 mutex_lock(&bam_pdev_mutexlock);
506 if (in_global_reset) {
507 bam_dmux_log("%s: open cid %d aborted due to ssr\n",
508 __func__, rx_hdr->ch_id);
509 mutex_unlock(&bam_pdev_mutexlock);
510 queue_rx();
511 return;
512 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700513 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
514 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
515 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
516 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700517 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
518 if (ret)
519 pr_err("%s: platform_device_add() error: %d\n",
520 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600521 mutex_unlock(&bam_pdev_mutexlock);
522 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700523}
524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525static void handle_bam_mux_cmd(struct work_struct *work)
526{
527 unsigned long flags;
528 struct bam_mux_hdr *rx_hdr;
529 struct rx_pkt_info *info;
530 struct sk_buff *rx_skb;
531
532 info = container_of(work, struct rx_pkt_info, work);
533 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600534 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 kfree(info);
536
537 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
538
539 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
540 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
541 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
542 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
543 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700544 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
545 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 " pad %d ch %d len %d\n", __func__,
547 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
548 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
549 dev_kfree_skb_any(rx_skb);
550 queue_rx();
551 return;
552 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700553
554 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700555 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
556 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700557 " pad %d ch %d len %d\n", __func__,
558 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
559 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
560 dev_kfree_skb_any(rx_skb);
561 queue_rx();
562 return;
563 }
564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 switch (rx_hdr->cmd) {
566 case BAM_MUX_HDR_CMD_DATA:
567 DBG_INC_READ_CNT(rx_hdr->pkt_len);
568 bam_mux_process_data(rx_skb);
569 break;
570 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700571 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700572 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700573 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600574 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
Jeff Hugod7d2b062012-07-24 14:29:56 -0600575 bam_dmux_log("%s: deactivating disconnect ack\n",
576 __func__);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600577 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600578 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700579 dev_kfree_skb_any(rx_skb);
580 break;
581 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
582 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
583 rx_hdr->ch_id);
584
585 if (!a2_pc_disabled) {
586 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700587 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700588 }
589
590 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600591 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 break;
593 case BAM_MUX_HDR_CMD_CLOSE:
594 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700595 bam_dmux_log("%s: closing cid %d\n", __func__,
596 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600597 mutex_lock(&bam_pdev_mutexlock);
598 if (in_global_reset) {
599 bam_dmux_log("%s: close cid %d aborted due to ssr\n",
600 __func__, rx_hdr->ch_id);
601 mutex_unlock(&bam_pdev_mutexlock);
602 break;
603 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
605 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
606 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600607 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
608 bam_ch[rx_hdr->ch_id].pdev =
609 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
610 if (!bam_ch[rx_hdr->ch_id].pdev)
611 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600612 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600613 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600614 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 break;
616 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700617 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
618 " reserved %d cmd %d pad %d ch %d len %d\n",
619 __func__, rx_hdr->magic_num, rx_hdr->reserved,
620 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
621 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 dev_kfree_skb_any(rx_skb);
623 queue_rx();
624 return;
625 }
626}
627
628static int bam_mux_write_cmd(void *data, uint32_t len)
629{
630 int rc;
631 struct tx_pkt_info *pkt;
632 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700633 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600635 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 if (pkt == NULL) {
637 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
638 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 return rc;
640 }
641
642 dma_address = dma_map_single(NULL, data, len,
643 DMA_TO_DEVICE);
644 if (!dma_address) {
645 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700646 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 return rc;
649 }
650 pkt->skb = (struct sk_buff *)(data);
651 pkt->len = len;
652 pkt->dma_address = dma_address;
653 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700654 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600655 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700656 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600657 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
659 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600660 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700661 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
662 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600663 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700664 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700665 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700666 dma_unmap_single(NULL, pkt->dma_address,
667 pkt->len,
668 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600669 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700670 } else {
671 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600672 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600674 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 return rc;
676}
677
678static void bam_mux_write_done(struct work_struct *work)
679{
680 struct sk_buff *skb;
681 struct bam_mux_hdr *hdr;
682 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700683 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600684 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700685 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600687 if (in_global_reset)
688 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700691
692 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
693 info_expected = list_first_entry(&bam_tx_pool,
694 struct tx_pkt_info, list_node);
695 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700696 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700697
Eric Holmberg878923a2012-01-10 14:28:19 -0700698 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
699 " list_node=%p, ts=%u.%09lu\n",
700 __func__, bam_tx_pool.next, &info->list_node,
701 info->ts_sec, info->ts_nsec
702 );
703
704 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
705 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
706 &errant_pkt->list_node, errant_pkt->ts_sec,
707 errant_pkt->ts_nsec);
708
709 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700710 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
711 BUG();
712 }
713 list_del(&info->list_node);
714 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
715
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600716 if (info->is_cmd) {
717 kfree(info->skb);
718 kfree(info);
719 return;
720 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 skb = info->skb;
722 kfree(info);
723 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700724 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600725 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700726 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
727 bam_ch[hdr->ch_id].num_tx_pkts--;
728 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600729 if (bam_ch[hdr->ch_id].notify)
730 bam_ch[hdr->ch_id].notify(
731 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
732 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700733 else
734 dev_kfree_skb_any(skb);
735}
736
737int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
738{
739 int rc = 0;
740 struct bam_mux_hdr *hdr;
741 unsigned long flags;
742 struct sk_buff *new_skb = NULL;
743 dma_addr_t dma_address;
744 struct tx_pkt_info *pkt;
745
746 if (id >= BAM_DMUX_NUM_CHANNELS)
747 return -EINVAL;
748 if (!skb)
749 return -EINVAL;
750 if (!bam_mux_initialized)
751 return -ENODEV;
752
753 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
754 spin_lock_irqsave(&bam_ch[id].lock, flags);
755 if (!bam_ch_is_open(id)) {
756 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
757 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
758 return -ENODEV;
759 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700760
761 if (bam_ch[id].use_wm &&
762 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
763 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
764 pr_err("%s: watermark exceeded: %d\n", __func__, id);
765 return -EAGAIN;
766 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
768
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600769 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600770 if (!bam_is_connected) {
771 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600772 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700773 if (unlikely(in_global_reset == 1))
774 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600775 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600776 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600777 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 /* if skb do not have any tailroom for padding,
780 copy the skb into a new expanded skb */
781 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
782 /* revisit, probably dev_alloc_skb and memcpy is effecient */
783 new_skb = skb_copy_expand(skb, skb_headroom(skb),
784 4 - (skb->len & 0x3), GFP_ATOMIC);
785 if (new_skb == NULL) {
786 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600787 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 }
789 dev_kfree_skb_any(skb);
790 skb = new_skb;
791 DBG_INC_WRITE_CPY(skb->len);
792 }
793
794 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
795
796 /* caller should allocate for hdr and padding
797 hdr is fine, padding is tricky */
798 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
799 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
800 hdr->reserved = 0;
801 hdr->ch_id = id;
802 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
803 if (skb->len & 0x3)
804 skb_put(skb, 4 - (skb->len & 0x3));
805
806 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
807
808 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
809 __func__, skb->data, skb->tail, skb->len,
810 hdr->pkt_len, hdr->pad_len);
811
812 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
813 if (pkt == NULL) {
814 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600815 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 }
817
818 dma_address = dma_map_single(NULL, skb->data, skb->len,
819 DMA_TO_DEVICE);
820 if (!dma_address) {
821 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600822 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 }
824 pkt->skb = skb;
825 pkt->dma_address = dma_address;
826 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700827 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700829 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600830 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
832 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600833 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700834 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
835 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600836 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700837 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700838 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700839 dma_unmap_single(NULL, pkt->dma_address,
840 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600841 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700842 if (new_skb)
843 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700844 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700845 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700846 spin_lock_irqsave(&bam_ch[id].lock, flags);
847 bam_ch[id].num_tx_pkts++;
848 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600849 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600850 ul_packet_written = 1;
851 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600853
854write_fail3:
855 kfree(pkt);
856write_fail2:
857 if (new_skb)
858 dev_kfree_skb_any(new_skb);
859write_fail:
860 read_unlock(&ul_wakeup_lock);
861 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862}
863
864int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600865 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866{
867 struct bam_mux_hdr *hdr;
868 unsigned long flags;
869 int rc = 0;
870
871 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700872 if (!bam_mux_initialized) {
873 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700875 }
876 if (id >= BAM_DMUX_NUM_CHANNELS) {
877 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700879 }
880 if (notify == NULL) {
881 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600882 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700883 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884
885 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
886 if (hdr == NULL) {
887 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
888 return -ENOMEM;
889 }
890 spin_lock_irqsave(&bam_ch[id].lock, flags);
891 if (bam_ch_is_open(id)) {
892 DBG("%s: Already opened %d\n", __func__, id);
893 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
894 kfree(hdr);
895 goto open_done;
896 }
897 if (!bam_ch_is_remote_open(id)) {
898 DBG("%s: Remote not open; ch: %d\n", __func__, id);
899 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
900 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700901 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 }
903
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600904 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 bam_ch[id].priv = priv;
906 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700907 bam_ch[id].num_tx_pkts = 0;
908 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
910
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600911 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600912 if (!bam_is_connected) {
913 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600914 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700915 if (unlikely(in_global_reset == 1))
916 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600917 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600918 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600919 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
922 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
923 hdr->reserved = 0;
924 hdr->ch_id = id;
925 hdr->pkt_len = 0;
926 hdr->pad_len = 0;
927
928 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600929 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700930
931open_done:
932 DBG("%s: opened ch %d\n", __func__, id);
933 return rc;
934}
935
936int msm_bam_dmux_close(uint32_t id)
937{
938 struct bam_mux_hdr *hdr;
939 unsigned long flags;
940 int rc;
941
942 if (id >= BAM_DMUX_NUM_CHANNELS)
943 return -EINVAL;
944 DBG("%s: closing ch %d\n", __func__, id);
945 if (!bam_mux_initialized)
946 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600948 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600949 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600950 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600951 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700952 if (unlikely(in_global_reset == 1))
953 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600954 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600955 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600956 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600957
Jeff Hugo061ce672011-10-21 17:15:32 -0600958 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600959 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 bam_ch[id].priv = NULL;
961 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
962 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
963
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600964 if (bam_ch_is_in_reset(id)) {
965 read_unlock(&ul_wakeup_lock);
966 bam_ch[id].status &= ~BAM_CH_IN_RESET;
967 return 0;
968 }
969
Jeff Hugobb5802f2011-11-02 17:10:29 -0600970 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 if (hdr == NULL) {
972 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600973 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 return -ENOMEM;
975 }
976 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
977 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
978 hdr->reserved = 0;
979 hdr->ch_id = id;
980 hdr->pkt_len = 0;
981 hdr->pad_len = 0;
982
983 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600984 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985
986 DBG("%s: closed ch %d\n", __func__, id);
987 return rc;
988}
989
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700990int msm_bam_dmux_is_ch_full(uint32_t id)
991{
992 unsigned long flags;
993 int ret;
994
995 if (id >= BAM_DMUX_NUM_CHANNELS)
996 return -EINVAL;
997
998 spin_lock_irqsave(&bam_ch[id].lock, flags);
999 bam_ch[id].use_wm = 1;
1000 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1001 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1002 id, bam_ch[id].num_tx_pkts, ret);
1003 if (!bam_ch_is_local_open(id)) {
1004 ret = -ENODEV;
1005 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1006 }
1007 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1008
1009 return ret;
1010}
1011
1012int msm_bam_dmux_is_ch_low(uint32_t id)
1013{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001014 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001015 int ret;
1016
1017 if (id >= BAM_DMUX_NUM_CHANNELS)
1018 return -EINVAL;
1019
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001020 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001021 bam_ch[id].use_wm = 1;
1022 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1023 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1024 id, bam_ch[id].num_tx_pkts, ret);
1025 if (!bam_ch_is_local_open(id)) {
1026 ret = -ENODEV;
1027 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1028 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001029 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001030
1031 return ret;
1032}
1033
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001034static void rx_switch_to_interrupt_mode(void)
1035{
1036 struct sps_connect cur_rx_conn;
1037 struct sps_iovec iov;
1038 struct rx_pkt_info *info;
1039 int ret;
1040
1041 /*
1042 * Attempt to enable interrupts - if this fails,
1043 * continue polling and we will retry later.
1044 */
1045 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1046 if (ret) {
1047 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1048 goto fail;
1049 }
1050
1051 rx_register_event.options = SPS_O_EOT;
1052 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1053 if (ret) {
1054 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1055 goto fail;
1056 }
1057
1058 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1059 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1060 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1061 if (ret) {
1062 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1063 goto fail;
1064 }
1065 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001066 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001067
1068 /* handle any rx packets before interrupt was enabled */
1069 while (bam_connection_is_active && !polling_mode) {
1070 ret = sps_get_iovec(bam_rx_pipe, &iov);
1071 if (ret) {
1072 pr_err("%s: sps_get_iovec failed %d\n",
1073 __func__, ret);
1074 break;
1075 }
1076 if (iov.addr == 0)
1077 break;
1078
1079 mutex_lock(&bam_rx_pool_mutexlock);
1080 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001081 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1082 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001083 mutex_unlock(&bam_rx_pool_mutexlock);
1084 continue;
1085 }
1086 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1087 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001088 if (info->dma_address != iov.addr) {
1089 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1090 __func__,
1091 (void *)iov.addr,
1092 (void *)info->dma_address);
1093 list_for_each_entry(info, &bam_rx_pool, list_node) {
1094 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1095 (void *)info->dma_address);
1096 if (iov.addr == info->dma_address)
1097 break;
1098 }
1099 }
1100 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001101 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001102 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001103 mutex_unlock(&bam_rx_pool_mutexlock);
1104 handle_bam_mux_cmd(&info->work);
1105 }
1106 return;
1107
1108fail:
1109 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001110 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001111}
1112
Jeff Hugo949080a2011-08-30 11:58:56 -06001113static void rx_timer_work_func(struct work_struct *work)
1114{
1115 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001116 struct rx_pkt_info *info;
1117 int inactive_cycles = 0;
1118 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -06001119
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001120 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001121 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001122 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001123 if (in_global_reset)
1124 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001125
1126 ret = sps_get_iovec(bam_rx_pipe, &iov);
1127 if (ret) {
1128 pr_err("%s: sps_get_iovec failed %d\n",
1129 __func__, ret);
1130 break;
1131 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001132 if (iov.addr == 0)
1133 break;
1134 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001135 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001136 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001137 DMUX_LOG_KERR(
1138 "%s: have iovec %p but rx pool empty\n",
1139 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001140 mutex_unlock(&bam_rx_pool_mutexlock);
1141 continue;
1142 }
1143 info = list_first_entry(&bam_rx_pool,
1144 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001145 if (info->dma_address != iov.addr) {
1146 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1147 __func__,
1148 (void *)iov.addr,
1149 (void *)info->dma_address);
1150 list_for_each_entry(info, &bam_rx_pool,
1151 list_node) {
1152 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1153 (void *)info->dma_address);
1154 if (iov.addr == info->dma_address)
1155 break;
1156 }
1157 }
1158 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001159 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001160 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001161 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001162 handle_bam_mux_cmd(&info->work);
1163 }
1164
1165 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001166 rx_switch_to_interrupt_mode();
1167 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001168 }
1169
1170 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1171 }
1172}
1173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174static void bam_mux_tx_notify(struct sps_event_notify *notify)
1175{
1176 struct tx_pkt_info *pkt;
1177
1178 DBG("%s: event %d notified\n", __func__, notify->event_id);
1179
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001180 if (in_global_reset)
1181 return;
1182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 switch (notify->event_id) {
1184 case SPS_EVENT_EOT:
1185 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001186 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 dma_unmap_single(NULL, pkt->dma_address,
1188 pkt->skb->len,
1189 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001190 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 dma_unmap_single(NULL, pkt->dma_address,
1192 pkt->len,
1193 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001194 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 break;
1196 default:
1197 pr_err("%s: recieved unexpected event id %d\n", __func__,
1198 notify->event_id);
1199 }
1200}
1201
Jeff Hugo33dbc002011-08-25 15:52:53 -06001202static void bam_mux_rx_notify(struct sps_event_notify *notify)
1203{
Jeff Hugo949080a2011-08-30 11:58:56 -06001204 int ret;
1205 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001206
1207 DBG("%s: event %d notified\n", __func__, notify->event_id);
1208
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001209 if (in_global_reset)
1210 return;
1211
Jeff Hugo33dbc002011-08-25 15:52:53 -06001212 switch (notify->event_id) {
1213 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001214 /* attempt to disable interrupts in this pipe */
1215 if (!polling_mode) {
1216 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1217 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001218 pr_err("%s: sps_get_config() failed %d, interrupts"
1219 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001220 break;
1221 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001222 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001223 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1224 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1225 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001226 pr_err("%s: sps_set_config() failed %d, interrupts"
1227 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001228 break;
1229 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001230 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001231 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001232 /*
1233 * run on core 0 so that netif_rx() in rmnet uses only
1234 * one queue
1235 */
1236 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001237 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001238 break;
1239 default:
1240 pr_err("%s: recieved unexpected event id %d\n", __func__,
1241 notify->event_id);
1242 }
1243}
1244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245#ifdef CONFIG_DEBUG_FS
1246
1247static int debug_tbl(char *buf, int max)
1248{
1249 int i = 0;
1250 int j;
1251
1252 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1253 i += scnprintf(buf + i, max - i,
1254 "ch%02d local open=%s remote open=%s\n",
1255 j, bam_ch_is_local_open(j) ? "Y" : "N",
1256 bam_ch_is_remote_open(j) ? "Y" : "N");
1257 }
1258
1259 return i;
1260}
1261
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001262static int debug_ul_pkt_cnt(char *buf, int max)
1263{
1264 struct list_head *p;
1265 unsigned long flags;
1266 int n = 0;
1267
1268 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1269 __list_for_each(p, &bam_tx_pool) {
1270 ++n;
1271 }
1272 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1273
1274 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1275}
1276
1277static int debug_stats(char *buf, int max)
1278{
1279 int i = 0;
1280
1281 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001282 "skb read cnt: %u\n"
1283 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001284 "skb copy cnt: %u\n"
1285 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001286 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001287 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001288 "rx queue len: %d\n"
1289 "a2 ack out cnt: %d\n"
1290 "a2 ack in cnt: %d\n"
1291 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001292 bam_dmux_read_cnt,
1293 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001294 bam_dmux_write_cpy_cnt,
1295 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001296 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001297 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001298 bam_rx_pool_len,
1299 atomic_read(&bam_dmux_ack_out_cnt),
1300 atomic_read(&bam_dmux_ack_in_cnt),
1301 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001302 );
1303
1304 return i;
1305}
1306
Eric Holmberg878923a2012-01-10 14:28:19 -07001307static int debug_log(char *buff, int max, loff_t *ppos)
1308{
1309 unsigned long flags;
1310 int i = 0;
1311
1312 if (bam_dmux_state_logging_disabled) {
1313 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1314 return i;
1315 }
1316
1317 if (*ppos == 0) {
1318 i += scnprintf(buff - i, max - i,
1319 "<DMUX> timestamp FLAGS [Message]\n"
1320 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001321 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001322 "\tR: 1 = in global reset\n"
1323 "\tP: 1 = BAM is powered up\n"
1324 "\tA: 1 = BAM initialized and ready for data\n"
1325 "\n"
1326 "\tV: 1 = Uplink vote for power\n"
1327 "\tU: 1 = Uplink active\n"
1328 "\tW: 1 = Uplink Wait-for-ack\n"
1329 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001330 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001331 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001332 );
1333 buff += i;
1334 }
1335
1336 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1337 while (kfifo_len(&bam_dmux_state_log)
1338 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1339 int k_len;
1340 k_len = kfifo_out(&bam_dmux_state_log,
1341 buff, LOG_MESSAGE_MAX_SIZE);
1342 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1343 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1344 break;
1345 }
1346
1347 /* keep non-null portion of string and add line break */
1348 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1349 buff += k_len;
1350 i += k_len;
1351 if (k_len && *(buff - 1) != '\n') {
1352 *buff++ = '\n';
1353 ++i;
1354 }
1355 }
1356 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1357
1358 return i;
1359}
1360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361#define DEBUG_BUFMAX 4096
1362static char debug_buffer[DEBUG_BUFMAX];
1363
1364static ssize_t debug_read(struct file *file, char __user *buf,
1365 size_t count, loff_t *ppos)
1366{
1367 int (*fill)(char *buf, int max) = file->private_data;
1368 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1369 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1370}
1371
Eric Holmberg878923a2012-01-10 14:28:19 -07001372static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1373 size_t count, loff_t *ppos)
1374{
1375 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1376 char *buffer;
1377 int bsize;
1378
1379 buffer = kmalloc(count, GFP_KERNEL);
1380 if (!buffer)
1381 return -ENOMEM;
1382
1383 bsize = util_func(buffer, count, ppos);
1384
1385 if (bsize >= 0) {
1386 if (copy_to_user(buff, buffer, bsize)) {
1387 kfree(buffer);
1388 return -EFAULT;
1389 }
1390 *ppos += bsize;
1391 }
1392 kfree(buffer);
1393 return bsize;
1394}
1395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396static int debug_open(struct inode *inode, struct file *file)
1397{
1398 file->private_data = inode->i_private;
1399 return 0;
1400}
1401
1402
1403static const struct file_operations debug_ops = {
1404 .read = debug_read,
1405 .open = debug_open,
1406};
1407
Eric Holmberg878923a2012-01-10 14:28:19 -07001408static const struct file_operations debug_ops_multiple = {
1409 .read = debug_read_multiple,
1410 .open = debug_open,
1411};
1412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413static void debug_create(const char *name, mode_t mode,
1414 struct dentry *dent,
1415 int (*fill)(char *buf, int max))
1416{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001417 struct dentry *file;
1418
1419 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1420 if (IS_ERR(file))
1421 pr_err("%s: debugfs create failed %d\n", __func__,
1422 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423}
1424
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001425static void debug_create_multiple(const char *name, mode_t mode,
1426 struct dentry *dent,
1427 int (*fill)(char *buf, int max, loff_t *ppos))
1428{
1429 struct dentry *file;
1430
1431 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1432 if (IS_ERR(file))
1433 pr_err("%s: debugfs create failed %d\n", __func__,
1434 (int)PTR_ERR(file));
1435}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436#endif
1437
Jeff Hugod98b1082011-10-24 10:30:23 -06001438static void notify_all(int event, unsigned long data)
1439{
1440 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001441 struct list_head *temp;
1442 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001443
1444 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001445 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001446 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001447 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1448 __func__, i, event, data);
1449 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001450 }
Jeff Hugocb798022012-04-09 14:55:40 -06001451
1452 __list_for_each(temp, &bam_other_notify_funcs) {
1453 func = container_of(temp, struct outside_notify_func,
1454 list_node);
1455 func->notify(func->priv, event, data);
1456 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001457}
1458
1459static void kickoff_ul_wakeup_func(struct work_struct *work)
1460{
1461 read_lock(&ul_wakeup_lock);
1462 if (!bam_is_connected) {
1463 read_unlock(&ul_wakeup_lock);
1464 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001465 if (unlikely(in_global_reset == 1))
1466 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001467 read_lock(&ul_wakeup_lock);
1468 ul_packet_written = 1;
1469 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1470 }
1471 read_unlock(&ul_wakeup_lock);
1472}
1473
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001474int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001475{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001476 int is_connected;
1477
1478 read_lock(&ul_wakeup_lock);
1479 ul_packet_written = 1;
1480 is_connected = bam_is_connected;
1481 if (!is_connected)
1482 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1483 read_unlock(&ul_wakeup_lock);
1484
1485 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001486}
1487
Eric Holmberg878923a2012-01-10 14:28:19 -07001488static void power_vote(int vote)
1489{
1490 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1491 bam_dmux_uplink_vote, vote);
1492
1493 if (bam_dmux_uplink_vote == vote)
1494 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1495
1496 bam_dmux_uplink_vote = vote;
1497 if (vote)
1498 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1499 else
1500 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1501}
1502
Eric Holmberg454d9da2012-01-12 09:37:14 -07001503/*
1504 * @note: Must be called with ul_wakeup_lock locked.
1505 */
1506static inline void ul_powerdown(void)
1507{
1508 bam_dmux_log("%s: powerdown\n", __func__);
1509 verify_tx_queue_is_empty(__func__);
1510
1511 if (a2_pc_disabled) {
1512 wait_for_dfab = 1;
1513 INIT_COMPLETION(dfab_unvote_completion);
1514 release_wakelock();
1515 } else {
1516 wait_for_ack = 1;
1517 INIT_COMPLETION(ul_wakeup_ack_completion);
1518 power_vote(0);
1519 }
1520 bam_is_connected = 0;
1521 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1522}
1523
1524static inline void ul_powerdown_finish(void)
1525{
1526 if (a2_pc_disabled && wait_for_dfab) {
1527 unvote_dfab();
1528 complete_all(&dfab_unvote_completion);
1529 wait_for_dfab = 0;
1530 }
1531}
1532
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001533/*
1534 * Votes for UL power and returns current power state.
1535 *
1536 * @returns true if currently connected
1537 */
1538int msm_bam_dmux_ul_power_vote(void)
1539{
1540 int is_connected;
1541
1542 read_lock(&ul_wakeup_lock);
1543 atomic_inc(&ul_ondemand_vote);
1544 is_connected = bam_is_connected;
1545 if (!is_connected)
1546 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1547 read_unlock(&ul_wakeup_lock);
1548
1549 return is_connected;
1550}
1551
1552/*
1553 * Unvotes for UL power.
1554 *
1555 * @returns true if vote count is 0 (UL shutdown possible)
1556 */
1557int msm_bam_dmux_ul_power_unvote(void)
1558{
1559 int vote;
1560
1561 read_lock(&ul_wakeup_lock);
1562 vote = atomic_dec_return(&ul_ondemand_vote);
1563 if (unlikely(vote) < 0)
1564 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1565 read_unlock(&ul_wakeup_lock);
1566
1567 return vote == 0;
1568}
1569
Jeff Hugocb798022012-04-09 14:55:40 -06001570int msm_bam_dmux_reg_notify(void *priv,
1571 void (*notify)(void *priv, int event_type,
1572 unsigned long data))
1573{
1574 struct outside_notify_func *func;
1575
1576 if (!notify)
1577 return -EINVAL;
1578
1579 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1580 if (!func)
1581 return -ENOMEM;
1582
1583 func->notify = notify;
1584 func->priv = priv;
1585 list_add(&func->list_node, &bam_other_notify_funcs);
1586
1587 return 0;
1588}
1589
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001590static void ul_timeout(struct work_struct *work)
1591{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001592 unsigned long flags;
1593 int ret;
1594
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001595 if (in_global_reset)
1596 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001597 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1598 if (!ret) { /* failed to grab lock, reschedule and bail */
1599 schedule_delayed_work(&ul_timeout_work,
1600 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1601 return;
1602 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001603 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001604 if (!ul_packet_written) {
1605 spin_lock(&bam_tx_pool_spinlock);
1606 if (!list_empty(&bam_tx_pool)) {
1607 struct tx_pkt_info *info;
1608
1609 info = list_first_entry(&bam_tx_pool,
1610 struct tx_pkt_info, list_node);
1611 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1612 __func__, info->ts_sec, info->ts_nsec);
1613 DBG_INC_TX_STALL_CNT();
1614 ul_packet_written = 1;
1615 }
1616 spin_unlock(&bam_tx_pool_spinlock);
1617 }
1618
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001619 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1620 bam_dmux_log("%s: pkt written %d\n",
1621 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001622 ul_packet_written = 0;
1623 schedule_delayed_work(&ul_timeout_work,
1624 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001625 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001626 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001627 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001628 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001629 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001630 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001631}
Jeff Hugo4838f412012-01-20 11:19:37 -07001632
1633static int ssrestart_check(void)
1634{
Eric Holmberg90285e22012-02-22 12:33:05 -07001635 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1636 in_global_reset = 1;
1637 if (get_restart_level() <= RESET_SOC)
1638 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1639 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001640}
1641
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001642static void ul_wakeup(void)
1643{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001644 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001645 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001646
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001647 mutex_lock(&wakeup_lock);
1648 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001649 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001650 mutex_unlock(&wakeup_lock);
1651 return;
1652 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001653
Jeff Hugoc2696142012-05-03 11:42:13 -06001654 /*
1655 * if someone is voting for UL before bam is inited (modem up first
1656 * time), set flag for init to kickoff ul wakeup once bam is inited
1657 */
1658 mutex_lock(&delayed_ul_vote_lock);
1659 if (unlikely(!bam_mux_initialized)) {
1660 need_delayed_ul_vote = 1;
1661 mutex_unlock(&delayed_ul_vote_lock);
1662 mutex_unlock(&wakeup_lock);
1663 return;
1664 }
1665 mutex_unlock(&delayed_ul_vote_lock);
1666
Eric Holmberg006057d2012-01-11 10:10:42 -07001667 if (a2_pc_disabled) {
1668 /*
1669 * don't grab the wakelock the first time because it is
1670 * already grabbed when a2 powers on
1671 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001672 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001673 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001674 do_vote_dfab = 1; /* vote must occur after wait */
1675 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001676 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001677 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001678 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001679 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001680 &dfab_unvote_completion, HZ);
1681 BUG_ON(ret == 0);
1682 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001683 if (likely(do_vote_dfab))
1684 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001685 schedule_delayed_work(&ul_timeout_work,
1686 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1687 bam_is_connected = 1;
1688 mutex_unlock(&wakeup_lock);
1689 return;
1690 }
1691
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001692 /*
1693 * must wait for the previous power down request to have been acked
1694 * chances are it already came in and this will just fall through
1695 * instead of waiting
1696 */
1697 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001698 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001699 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001700 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001701 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001702 if (unlikely(ret == 0) && ssrestart_check()) {
1703 mutex_unlock(&wakeup_lock);
1704 bam_dmux_log("%s timeout previous ack\n", __func__);
1705 return;
1706 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001707 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001708 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001709 power_vote(1);
1710 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001711 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001712 if (unlikely(ret == 0) && ssrestart_check()) {
1713 mutex_unlock(&wakeup_lock);
1714 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1715 return;
1716 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001717 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001718 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001719 if (unlikely(ret == 0) && ssrestart_check()) {
1720 mutex_unlock(&wakeup_lock);
1721 bam_dmux_log("%s timeout power on\n", __func__);
1722 return;
1723 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001724
1725 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001726 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001727 schedule_delayed_work(&ul_timeout_work,
1728 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1729 mutex_unlock(&wakeup_lock);
1730}
1731
1732static void reconnect_to_bam(void)
1733{
1734 int i;
1735
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001736 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001737 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001738 if (!power_management_only_mode) {
1739 i = sps_device_reset(a2_device_handle);
1740 if (i)
1741 pr_err("%s: device reset failed rc = %d\n", __func__,
1742 i);
1743 i = sps_connect(bam_tx_pipe, &tx_connection);
1744 if (i)
1745 pr_err("%s: tx connection failed rc = %d\n", __func__,
1746 i);
1747 i = sps_connect(bam_rx_pipe, &rx_connection);
1748 if (i)
1749 pr_err("%s: rx connection failed rc = %d\n", __func__,
1750 i);
1751 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1752 if (i)
1753 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1754 i);
1755 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1756 if (i)
1757 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1758 i);
1759 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001760
1761 bam_connection_is_active = 1;
1762
1763 if (polling_mode)
1764 rx_switch_to_interrupt_mode();
1765
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001766 toggle_apps_ack();
1767 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001768 if (!power_management_only_mode)
1769 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001770}
1771
1772static void disconnect_to_bam(void)
1773{
1774 struct list_head *node;
1775 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001776 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001777
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001778 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001779
1780 /* handle disconnect during active UL */
1781 write_lock_irqsave(&ul_wakeup_lock, flags);
1782 if (bam_is_connected) {
1783 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1784 ul_powerdown();
1785 }
1786 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1787 ul_powerdown_finish();
1788
1789 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001790 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001791 if (!power_management_only_mode) {
1792 sps_disconnect(bam_tx_pipe);
1793 sps_disconnect(bam_rx_pipe);
1794 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1795 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1796 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001797 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001798
1799 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001800 while (!list_empty(&bam_rx_pool)) {
1801 node = bam_rx_pool.next;
1802 list_del(node);
1803 info = container_of(node, struct rx_pkt_info, list_node);
1804 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1805 DMA_FROM_DEVICE);
1806 dev_kfree_skb_any(info->skb);
1807 kfree(info);
1808 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001809 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001810 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001811
Jeff Hugo0b13a352012-03-17 23:18:30 -06001812 if (disconnect_ack)
1813 toggle_apps_ack();
1814
Eric Holmberg878923a2012-01-10 14:28:19 -07001815 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001816}
1817
1818static void vote_dfab(void)
1819{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001820 int rc;
1821
Eric Holmberg006057d2012-01-11 10:10:42 -07001822 bam_dmux_log("%s\n", __func__);
1823 mutex_lock(&dfab_status_lock);
1824 if (dfab_is_on) {
1825 bam_dmux_log("%s: dfab is already on\n", __func__);
1826 mutex_unlock(&dfab_status_lock);
1827 return;
1828 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001829 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001830 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001831 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001832 rc = clk_prepare_enable(xo_clk);
1833 if (rc)
1834 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc);
Eric Holmberg006057d2012-01-11 10:10:42 -07001835 dfab_is_on = 1;
1836 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001837}
1838
1839static void unvote_dfab(void)
1840{
Eric Holmberg006057d2012-01-11 10:10:42 -07001841 bam_dmux_log("%s\n", __func__);
1842 mutex_lock(&dfab_status_lock);
1843 if (!dfab_is_on) {
1844 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1845 dump_stack();
1846 mutex_unlock(&dfab_status_lock);
1847 return;
1848 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001849 clk_disable_unprepare(dfab_clk);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001850 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001851 dfab_is_on = 0;
1852 mutex_unlock(&dfab_status_lock);
1853}
1854
1855/* reference counting wrapper around wakelock */
1856static void grab_wakelock(void)
1857{
1858 unsigned long flags;
1859
1860 spin_lock_irqsave(&wakelock_reference_lock, flags);
1861 bam_dmux_log("%s: ref count = %d\n", __func__,
1862 wakelock_reference_count);
1863 if (wakelock_reference_count == 0)
1864 wake_lock(&bam_wakelock);
1865 ++wakelock_reference_count;
1866 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1867}
1868
1869static void release_wakelock(void)
1870{
1871 unsigned long flags;
1872
1873 spin_lock_irqsave(&wakelock_reference_lock, flags);
1874 if (wakelock_reference_count == 0) {
1875 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1876 dump_stack();
1877 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1878 return;
1879 }
1880 bam_dmux_log("%s: ref count = %d\n", __func__,
1881 wakelock_reference_count);
1882 --wakelock_reference_count;
1883 if (wakelock_reference_count == 0)
1884 wake_unlock(&bam_wakelock);
1885 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001886}
1887
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001888static int restart_notifier_cb(struct notifier_block *this,
1889 unsigned long code,
1890 void *data)
1891{
1892 int i;
1893 struct list_head *node;
1894 struct tx_pkt_info *info;
1895 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001896 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001897
1898 if (code != SUBSYS_AFTER_SHUTDOWN)
1899 return NOTIFY_DONE;
1900
Eric Holmberg878923a2012-01-10 14:28:19 -07001901 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001902 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001903
1904 /* Handle uplink Powerdown */
1905 write_lock_irqsave(&ul_wakeup_lock, flags);
1906 if (bam_is_connected) {
1907 ul_powerdown();
1908 wait_for_ack = 0;
1909 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001910 /*
1911 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1912 * reset to 0. harmless if bam_is_connected check above passes
1913 */
1914 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001915 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1916 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001917 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001918 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugof62029d2012-07-17 13:39:53 -06001919 disconnect_ack = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001920
1921 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001922 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001923 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1924 temp_remote_status = bam_ch_is_remote_open(i);
1925 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001926 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001927 if (bam_ch_is_local_open(i))
1928 bam_ch[i].status |= BAM_CH_IN_RESET;
1929 if (temp_remote_status) {
1930 platform_device_unregister(bam_ch[i].pdev);
1931 bam_ch[i].pdev = platform_device_alloc(
1932 bam_ch[i].name, 2);
1933 }
1934 }
Eric Holmberga623da82012-07-12 09:37:09 -06001935 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001936
1937 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001938 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001939 while (!list_empty(&bam_tx_pool)) {
1940 node = bam_tx_pool.next;
1941 list_del(node);
1942 info = container_of(node, struct tx_pkt_info,
1943 list_node);
1944 if (!info->is_cmd) {
1945 dma_unmap_single(NULL, info->dma_address,
1946 info->skb->len,
1947 DMA_TO_DEVICE);
1948 dev_kfree_skb_any(info->skb);
1949 } else {
1950 dma_unmap_single(NULL, info->dma_address,
1951 info->len,
1952 DMA_TO_DEVICE);
1953 kfree(info->skb);
1954 }
1955 kfree(info);
1956 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001957 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001958
Eric Holmberg878923a2012-01-10 14:28:19 -07001959 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001960 return NOTIFY_DONE;
1961}
1962
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001963static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001964{
1965 u32 h;
1966 dma_addr_t dma_addr;
1967 int ret;
1968 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001969 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001970
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001971 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 /* init BAM */
1973 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1974 if (!a2_virt_addr) {
1975 pr_err("%s: ioremap failed\n", __func__);
1976 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001977 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 }
1979 a2_props.phys_addr = A2_PHYS_BASE;
1980 a2_props.virt_addr = a2_virt_addr;
1981 a2_props.virt_size = A2_PHYS_SIZE;
1982 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001983 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 a2_props.num_pipes = A2_NUM_PIPES;
1985 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001986 if (cpu_is_msm9615())
1987 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 /* need to free on tear down */
1989 ret = sps_register_bam_device(&a2_props, &h);
1990 if (ret < 0) {
1991 pr_err("%s: register bam error %d\n", __func__, ret);
1992 goto register_bam_failed;
1993 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001994 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995
1996 bam_tx_pipe = sps_alloc_endpoint();
1997 if (bam_tx_pipe == NULL) {
1998 pr_err("%s: tx alloc endpoint failed\n", __func__);
1999 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002000 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001 }
2002 ret = sps_get_config(bam_tx_pipe, &tx_connection);
2003 if (ret) {
2004 pr_err("%s: tx get config failed %d\n", __func__, ret);
2005 goto tx_get_config_failed;
2006 }
2007
2008 tx_connection.source = SPS_DEV_HANDLE_MEM;
2009 tx_connection.src_pipe_index = 0;
2010 tx_connection.destination = h;
2011 tx_connection.dest_pipe_index = 4;
2012 tx_connection.mode = SPS_MODE_DEST;
2013 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2014 tx_desc_mem_buf.size = 0x800; /* 2k */
2015 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2016 &dma_addr, 0);
2017 if (tx_desc_mem_buf.base == NULL) {
2018 pr_err("%s: tx memory alloc failed\n", __func__);
2019 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002020 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002021 }
2022 tx_desc_mem_buf.phys_base = dma_addr;
2023 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2024 tx_connection.desc = tx_desc_mem_buf;
2025 tx_connection.event_thresh = 0x10;
2026
2027 ret = sps_connect(bam_tx_pipe, &tx_connection);
2028 if (ret < 0) {
2029 pr_err("%s: tx connect error %d\n", __func__, ret);
2030 goto tx_connect_failed;
2031 }
2032
2033 bam_rx_pipe = sps_alloc_endpoint();
2034 if (bam_rx_pipe == NULL) {
2035 pr_err("%s: rx alloc endpoint failed\n", __func__);
2036 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002037 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002038 }
2039 ret = sps_get_config(bam_rx_pipe, &rx_connection);
2040 if (ret) {
2041 pr_err("%s: rx get config failed %d\n", __func__, ret);
2042 goto rx_get_config_failed;
2043 }
2044
2045 rx_connection.source = h;
2046 rx_connection.src_pipe_index = 5;
2047 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2048 rx_connection.dest_pipe_index = 1;
2049 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002050 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2051 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 rx_desc_mem_buf.size = 0x800; /* 2k */
2053 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2054 &dma_addr, 0);
2055 if (rx_desc_mem_buf.base == NULL) {
2056 pr_err("%s: rx memory alloc failed\n", __func__);
2057 ret = -ENOMEM;
2058 goto rx_mem_failed;
2059 }
2060 rx_desc_mem_buf.phys_base = dma_addr;
2061 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2062 rx_connection.desc = rx_desc_mem_buf;
2063 rx_connection.event_thresh = 0x10;
2064
2065 ret = sps_connect(bam_rx_pipe, &rx_connection);
2066 if (ret < 0) {
2067 pr_err("%s: rx connect error %d\n", __func__, ret);
2068 goto rx_connect_failed;
2069 }
2070
2071 tx_register_event.options = SPS_O_EOT;
2072 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2073 tx_register_event.xfer_done = NULL;
2074 tx_register_event.callback = bam_mux_tx_notify;
2075 tx_register_event.user = NULL;
2076 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2077 if (ret < 0) {
2078 pr_err("%s: tx register event error %d\n", __func__, ret);
2079 goto rx_event_reg_failed;
2080 }
2081
Jeff Hugo33dbc002011-08-25 15:52:53 -06002082 rx_register_event.options = SPS_O_EOT;
2083 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2084 rx_register_event.xfer_done = NULL;
2085 rx_register_event.callback = bam_mux_rx_notify;
2086 rx_register_event.user = NULL;
2087 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2088 if (ret < 0) {
2089 pr_err("%s: tx register event error %d\n", __func__, ret);
2090 goto rx_event_reg_failed;
2091 }
2092
Jeff Hugoc2696142012-05-03 11:42:13 -06002093 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002094 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002095 if (need_delayed_ul_vote) {
2096 need_delayed_ul_vote = 0;
2097 msm_bam_dmux_kickoff_ul_wakeup();
2098 }
2099 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002100 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002101 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002102 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002103 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002104 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105
2106rx_event_reg_failed:
2107 sps_disconnect(bam_rx_pipe);
2108rx_connect_failed:
2109 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2110 rx_desc_mem_buf.phys_base);
2111rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002112rx_get_config_failed:
2113 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002114rx_alloc_endpoint_failed:
2115 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116tx_connect_failed:
2117 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2118 tx_desc_mem_buf.phys_base);
2119tx_get_config_failed:
2120 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002121tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002123 /*
2124 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2125 * same handle below will cause a crash, so skip it if we've freed
2126 * the handle here.
2127 */
2128 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002130 if (!skip_iounmap)
2131 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002132ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002134 return ret;
2135}
2136
2137static int bam_init_fallback(void)
2138{
2139 u32 h;
2140 int ret;
2141 void *a2_virt_addr;
2142
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002143 /* init BAM */
2144 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
2145 if (!a2_virt_addr) {
2146 pr_err("%s: ioremap failed\n", __func__);
2147 ret = -ENOMEM;
2148 goto ioremap_failed;
2149 }
2150 a2_props.phys_addr = A2_PHYS_BASE;
2151 a2_props.virt_addr = a2_virt_addr;
2152 a2_props.virt_size = A2_PHYS_SIZE;
2153 a2_props.irq = A2_BAM_IRQ;
2154 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2155 a2_props.num_pipes = A2_NUM_PIPES;
2156 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2157 if (cpu_is_msm9615())
2158 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2159 ret = sps_register_bam_device(&a2_props, &h);
2160 if (ret < 0) {
2161 pr_err("%s: register bam error %d\n", __func__, ret);
2162 goto register_bam_failed;
2163 }
2164 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002165
2166 mutex_lock(&delayed_ul_vote_lock);
2167 bam_mux_initialized = 1;
2168 if (need_delayed_ul_vote) {
2169 need_delayed_ul_vote = 0;
2170 msm_bam_dmux_kickoff_ul_wakeup();
2171 }
2172 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002173 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002174
Jeff Hugo18792a32012-06-20 15:25:55 -06002175 power_management_only_mode = 1;
2176 bam_connection_is_active = 1;
2177 complete_all(&bam_connection_completion);
2178
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002179 return 0;
2180
2181register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002182 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002183ioremap_failed:
2184 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002185}
Jeff Hugoade1f842011-08-03 15:53:59 -06002186
Jeff Hugoa670b762012-03-15 15:58:28 -06002187static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002188{
2189 int ret = 0;
2190
2191 ret = bam_init();
2192 if (ret) {
2193 ret = bam_init_fallback();
2194 if (ret)
2195 pr_err("%s: bam init fallback failed: %d",
2196 __func__, ret);
2197 }
2198}
2199
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002200static void toggle_apps_ack(void)
2201{
2202 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002203
2204 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2205 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002206 smsm_change_state(SMSM_APPS_STATE,
2207 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2208 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2209 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002210 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002211}
2212
Jeff Hugoade1f842011-08-03 15:53:59 -06002213static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2214{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002215 static int last_processed_state;
2216
2217 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002218 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002219 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002220 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2221 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002222 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
2223 bam_dmux_log("%s: already processed this state\n", __func__);
2224 mutex_unlock(&smsm_cb_lock);
2225 return;
2226 }
2227
2228 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002229
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002230 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002231 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002232 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002233 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002234 } else if (bam_mux_initialized &&
2235 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002236 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002237 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002238 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002239 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002240 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002241 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002242 if (cpu_is_msm9615())
2243 msm9615_bam_init();
2244 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002245 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002246 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002247 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002248 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002249 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002250 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002251
2252}
2253
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002254static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2255 uint32_t new_state)
2256{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002257 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002258 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2259 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002260 complete_all(&ul_wakeup_ack_completion);
2261}
2262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002263static int bam_dmux_probe(struct platform_device *pdev)
2264{
2265 int rc;
2266
2267 DBG("%s probe called\n", __func__);
2268 if (bam_mux_initialized)
2269 return 0;
2270
Stephen Boyd69d35e32012-02-14 15:33:30 -08002271 xo_clk = clk_get(&pdev->dev, "xo");
2272 if (IS_ERR(xo_clk)) {
2273 pr_err("%s: did not get xo clock\n", __func__);
2274 return PTR_ERR(xo_clk);
2275 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002276 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002277 if (IS_ERR(dfab_clk)) {
2278 pr_err("%s: did not get dfab clock\n", __func__);
2279 return -EFAULT;
2280 }
2281
2282 rc = clk_set_rate(dfab_clk, 64000000);
2283 if (rc)
2284 pr_err("%s: unable to set dfab clock rate\n", __func__);
2285
Jeff Hugofff43af92012-03-29 17:54:52 -06002286 /*
2287 * setup the workqueue so that it can be pinned to core 0 and not
2288 * block the watchdog pet function, so that netif_rx() in rmnet
2289 * only uses one queue.
2290 */
2291 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2292 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002293 if (!bam_mux_rx_workqueue)
2294 return -ENOMEM;
2295
2296 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2297 if (!bam_mux_tx_workqueue) {
2298 destroy_workqueue(bam_mux_rx_workqueue);
2299 return -ENOMEM;
2300 }
2301
Jeff Hugo7960abd2011-08-02 15:39:38 -06002302 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002304 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2305 "bam_dmux_ch_%d", rc);
2306 /* bus 2, ie a2 stream 2 */
2307 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2308 if (!bam_ch[rc].pdev) {
2309 pr_err("%s: platform device alloc failed\n", __func__);
2310 destroy_workqueue(bam_mux_rx_workqueue);
2311 destroy_workqueue(bam_mux_tx_workqueue);
2312 return -ENOMEM;
2313 }
2314 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002315
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002316 init_completion(&ul_wakeup_ack_completion);
2317 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002318 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002319 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002320 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002321
Jeff Hugoade1f842011-08-03 15:53:59 -06002322 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2323 bam_dmux_smsm_cb, NULL);
2324
2325 if (rc) {
2326 destroy_workqueue(bam_mux_rx_workqueue);
2327 destroy_workqueue(bam_mux_tx_workqueue);
2328 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2329 return -ENOMEM;
2330 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002331
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002332 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2333 bam_dmux_smsm_ack_cb, NULL);
2334
2335 if (rc) {
2336 destroy_workqueue(bam_mux_rx_workqueue);
2337 destroy_workqueue(bam_mux_tx_workqueue);
2338 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2339 SMSM_A2_POWER_CONTROL,
2340 bam_dmux_smsm_cb, NULL);
2341 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2342 rc);
2343 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2344 platform_device_put(bam_ch[rc].pdev);
2345 return -ENOMEM;
2346 }
2347
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002348 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2349 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002351 return 0;
2352}
2353
2354static struct platform_driver bam_dmux_driver = {
2355 .probe = bam_dmux_probe,
2356 .driver = {
2357 .name = "BAM_RMNT",
2358 .owner = THIS_MODULE,
2359 },
2360};
2361
2362static int __init bam_dmux_init(void)
2363{
Eric Holmberg878923a2012-01-10 14:28:19 -07002364 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002365#ifdef CONFIG_DEBUG_FS
2366 struct dentry *dent;
2367
2368 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002369 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002370 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002371 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2372 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002373 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002374 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002375#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002376 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2377 if (ret) {
2378 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2379 bam_dmux_state_logging_disabled = 1;
2380 }
2381
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002382 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002383 return platform_driver_register(&bam_dmux_driver);
2384}
2385
Jeff Hugoade1f842011-08-03 15:53:59 -06002386late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002387MODULE_DESCRIPTION("MSM BAM DMUX");
2388MODULE_LICENSE("GPL v2");