blob: a44ba7a656d762239169b8b4e5cac9d7cfa0951f [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <mach/sps.h>
32#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060033#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060034#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070035#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070036#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define BAM_CH_LOCAL_OPEN 0x1
39#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060040#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR_MAGIC_NO 0x33fc
43
Eric Holmberg006057d2012-01-11 10:10:42 -070044#define BAM_MUX_HDR_CMD_DATA 0
45#define BAM_MUX_HDR_CMD_OPEN 1
46#define BAM_MUX_HDR_CMD_CLOSE 2
47#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
48#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jeff Hugo949080a2011-08-30 11:58:56 -060050#define POLLING_MIN_SLEEP 950 /* 0.95 ms */
51#define POLLING_MAX_SLEEP 1050 /* 1.05 ms */
52#define POLLING_INACTIVITY 40 /* cycles before switch to intr mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070054#define LOW_WATERMARK 2
55#define HIGH_WATERMARK 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57static int msm_bam_dmux_debug_enable;
58module_param_named(debug_enable, msm_bam_dmux_debug_enable,
59 int, S_IRUGO | S_IWUSR | S_IWGRP);
60
61#if defined(DEBUG)
62static uint32_t bam_dmux_read_cnt;
63static uint32_t bam_dmux_write_cnt;
64static uint32_t bam_dmux_write_cpy_cnt;
65static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070066static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070067static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070068static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
69static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
70static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72#define DBG(x...) do { \
73 if (msm_bam_dmux_debug_enable) \
74 pr_debug(x); \
75 } while (0)
76
77#define DBG_INC_READ_CNT(x) do { \
78 bam_dmux_read_cnt += (x); \
79 if (msm_bam_dmux_debug_enable) \
80 pr_debug("%s: total read bytes %u\n", \
81 __func__, bam_dmux_read_cnt); \
82 } while (0)
83
84#define DBG_INC_WRITE_CNT(x) do { \
85 bam_dmux_write_cnt += (x); \
86 if (msm_bam_dmux_debug_enable) \
87 pr_debug("%s: total written bytes %u\n", \
88 __func__, bam_dmux_write_cnt); \
89 } while (0)
90
91#define DBG_INC_WRITE_CPY(x) do { \
92 bam_dmux_write_cpy_bytes += (x); \
93 bam_dmux_write_cpy_cnt++; \
94 if (msm_bam_dmux_debug_enable) \
95 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
96 __func__, bam_dmux_write_cpy_cnt, \
97 bam_dmux_write_cpy_bytes); \
98 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070099
100#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
101 bam_dmux_tx_sps_failure_cnt++; \
102} while (0)
103
Eric Holmberg6074aba2012-01-18 17:59:44 -0700104#define DBG_INC_TX_STALL_CNT() do { \
105 bam_dmux_tx_stall_cnt++; \
106} while (0)
107
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700108#define DBG_INC_ACK_OUT_CNT() \
109 atomic_inc(&bam_dmux_ack_out_cnt)
110
111#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
112 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
113
114#define DBG_INC_ACK_IN_CNT() \
115 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116#else
117#define DBG(x...) do { } while (0)
118#define DBG_INC_READ_CNT(x...) do { } while (0)
119#define DBG_INC_WRITE_CNT(x...) do { } while (0)
120#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700121#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700122#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700123#define DBG_INC_ACK_OUT_CNT() do { } while (0)
124#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
125 do { } while (0)
126#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127#endif
128
129struct bam_ch_info {
130 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600131 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 void *priv;
133 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600134 struct platform_device *pdev;
135 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700136 int num_tx_pkts;
137 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct tx_pkt_info {
141 struct sk_buff *skb;
142 dma_addr_t dma_address;
143 char is_cmd;
144 uint32_t len;
145 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600146 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700147 unsigned ts_sec;
148 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149};
150
151struct rx_pkt_info {
152 struct sk_buff *skb;
153 dma_addr_t dma_address;
154 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600155 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156};
157
158#define A2_NUM_PIPES 6
159#define A2_SUMMING_THRESHOLD 4096
160#define A2_DEFAULT_DESCRIPTORS 32
161#define A2_PHYS_BASE 0x124C2000
162#define A2_PHYS_SIZE 0x2000
163#define BUFFER_SIZE 2048
164#define NUM_BUFFERS 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600166static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167static struct sps_pipe *bam_tx_pipe;
168static struct sps_pipe *bam_rx_pipe;
169static struct sps_connect tx_connection;
170static struct sps_connect rx_connection;
171static struct sps_mem_buffer tx_desc_mem_buf;
172static struct sps_mem_buffer rx_desc_mem_buf;
173static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600174static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
177static int bam_mux_initialized;
178
Jeff Hugo949080a2011-08-30 11:58:56 -0600179static int polling_mode;
180
181static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600182static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700183static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600184static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600185static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600186static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188struct bam_mux_hdr {
189 uint16_t magic_num;
190 uint8_t reserved;
191 uint8_t cmd;
192 uint8_t pad_len;
193 uint8_t ch_id;
194 uint16_t pkt_len;
195};
196
Jeff Hugod98b1082011-10-24 10:30:23 -0600197static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198static void bam_mux_write_done(struct work_struct *work);
199static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600200static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201
Jeff Hugo949080a2011-08-30 11:58:56 -0600202static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203
204static struct workqueue_struct *bam_mux_rx_workqueue;
205static struct workqueue_struct *bam_mux_tx_workqueue;
206
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600207/* A2 power collaspe */
208#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600209#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600210static void toggle_apps_ack(void);
211static void reconnect_to_bam(void);
212static void disconnect_to_bam(void);
213static void ul_wakeup(void);
214static void ul_timeout(struct work_struct *work);
215static void vote_dfab(void);
216static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600217static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700218static void grab_wakelock(void);
219static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600220
221static int bam_is_connected;
222static DEFINE_MUTEX(wakeup_lock);
223static struct completion ul_wakeup_ack_completion;
224static struct completion bam_connection_completion;
225static struct delayed_work ul_timeout_work;
226static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700227static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800228static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600229static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600230static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600231static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700232static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700233static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700234static int a2_pc_disabled;
235static DEFINE_MUTEX(dfab_status_lock);
236static int dfab_is_on;
237static int wait_for_dfab;
238static struct completion dfab_unvote_completion;
239static DEFINE_SPINLOCK(wakelock_reference_lock);
240static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700241static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600242static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600243static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600244static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600245static DEFINE_MUTEX(delayed_ul_vote_lock);
246static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600247static int power_management_only_mode;
Jeff Hugocb798022012-04-09 14:55:40 -0600248
249struct outside_notify_func {
250 void (*notify)(void *, int, unsigned long);
251 void *priv;
252 struct list_head list_node;
253};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600254/* End A2 power collaspe */
255
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600256/* subsystem restart */
257static int restart_notifier_cb(struct notifier_block *this,
258 unsigned long code,
259 void *data);
260
261static struct notifier_block restart_notifier = {
262 .notifier_call = restart_notifier_cb,
263};
264static int in_global_reset;
265/* end subsystem restart */
266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267#define bam_ch_is_open(x) \
268 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
269
270#define bam_ch_is_local_open(x) \
271 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
272
273#define bam_ch_is_remote_open(x) \
274 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
275
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600276#define bam_ch_is_in_reset(x) \
277 (bam_ch[(x)].status & BAM_CH_IN_RESET)
278
Eric Holmberg878923a2012-01-10 14:28:19 -0700279#define LOG_MESSAGE_MAX_SIZE 80
280struct kfifo bam_dmux_state_log;
281static uint32_t bam_dmux_state_logging_disabled;
282static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
283static int bam_dmux_uplink_vote;
284static int bam_dmux_power_state;
285
286
287#define DMUX_LOG_KERR(fmt...) \
288do { \
289 bam_dmux_log(fmt); \
290 pr_err(fmt); \
291} while (0)
292
293/**
294 * Log a state change along with a small message.
295 *
296 * Complete size of messsage is limited to @todo.
297 */
298static void bam_dmux_log(const char *fmt, ...)
299{
300 char buff[LOG_MESSAGE_MAX_SIZE];
301 unsigned long flags;
302 va_list arg_list;
303 unsigned long long t_now;
304 unsigned long nanosec_rem;
305 int len = 0;
306
307 if (bam_dmux_state_logging_disabled)
308 return;
309
310 t_now = sched_clock();
311 nanosec_rem = do_div(t_now, 1000000000U);
312
313 /*
314 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700315 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700316 * R: 1 = in global reset
317 * P: 1 = BAM is powered up
318 * A: 1 = BAM initialized and ready for data
319 *
320 * V: 1 = Uplink vote for power
321 * U: 1 = Uplink active
322 * W: 1 = Uplink Wait-for-ack
323 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700324 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600325 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700326 */
327 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600328 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700329 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700330 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700331 in_global_reset ? 'R' : 'r',
332 bam_dmux_power_state ? 'P' : 'p',
333 bam_connection_is_active ? 'A' : 'a',
334 bam_dmux_uplink_vote ? 'V' : 'v',
335 bam_is_connected ? 'U' : 'u',
336 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700337 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600338 atomic_read(&ul_ondemand_vote),
339 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700340 );
341
342 va_start(arg_list, fmt);
343 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
344 va_end(arg_list);
345 memset(buff + len, 0x0, sizeof(buff) - len);
346
347 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
348 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
349 char junk[LOG_MESSAGE_MAX_SIZE];
350 int ret;
351
352 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
353 if (ret != LOG_MESSAGE_MAX_SIZE) {
354 pr_err("%s: unable to empty log %d\n", __func__, ret);
355 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
356 flags);
357 return;
358 }
359 }
360 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
361 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
362}
363
364static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
365{
366 unsigned long long t_now;
367
368 t_now = sched_clock();
369 pkt->ts_nsec = do_div(t_now, 1000000000U);
370 pkt->ts_sec = (unsigned)t_now;
371}
372
373static inline void verify_tx_queue_is_empty(const char *func)
374{
375 unsigned long flags;
376 struct tx_pkt_info *info;
377 int reported = 0;
378
379 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
380 list_for_each_entry(info, &bam_tx_pool, list_node) {
381 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700382 bam_dmux_log("%s: tx pool not empty\n", func);
383 if (!in_global_reset)
384 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700385 reported = 1;
386 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700387 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
388 &info->list_node, info->ts_sec, info->ts_nsec);
389 if (!in_global_reset)
390 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
391 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700392 }
393 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
394}
395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396static void queue_rx(void)
397{
398 void *ptr;
399 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700400 int ret;
401 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600402
Jeff Hugoc9749932011-11-02 17:50:40 -0600403 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700404 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600405 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600406
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700407 while (rx_len_cached < NUM_BUFFERS) {
408 if (in_global_reset)
409 goto fail;
410
411 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
412 if (!info) {
413 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
414 goto fail;
415 }
416
417 INIT_WORK(&info->work, handle_bam_mux_cmd);
418
419 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
420 if (info->skb == NULL) {
421 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
422 goto fail_info;
423 }
424 ptr = skb_put(info->skb, BUFFER_SIZE);
425
426 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
427 DMA_FROM_DEVICE);
428 if (info->dma_address == 0 || info->dma_address == ~0) {
429 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
430 __func__, (void *)info->dma_address, ptr);
431 goto fail_skb;
432 }
433
434 mutex_lock(&bam_rx_pool_mutexlock);
435 list_add_tail(&info->list_node, &bam_rx_pool);
436 rx_len_cached = ++bam_rx_pool_len;
437 mutex_unlock(&bam_rx_pool_mutexlock);
438
439 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
440 BUFFER_SIZE, info,
441 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
442
443 if (ret) {
444 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
445 __func__, ret);
446 goto fail_transfer;
447 }
448 }
449 return;
450
451fail_transfer:
452 mutex_lock(&bam_rx_pool_mutexlock);
453 list_del(&info->list_node);
454 --bam_rx_pool_len;
455 rx_len_cached = bam_rx_pool_len;
456 mutex_unlock(&bam_rx_pool_mutexlock);
457
458 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
459 DMA_FROM_DEVICE);
460
461fail_skb:
462 dev_kfree_skb_any(info->skb);
463
464fail_info:
465 kfree(info);
466
467fail:
468 if (rx_len_cached == 0) {
469 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
470 in_global_reset = 1;
471 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472}
473
474static void bam_mux_process_data(struct sk_buff *rx_skb)
475{
476 unsigned long flags;
477 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600478 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479
480 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
481
482 rx_skb->data = (unsigned char *)(rx_hdr + 1);
483 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
484 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600485 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600487 event_data = (unsigned long)(rx_skb);
488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600490 if (bam_ch[rx_hdr->ch_id].notify)
491 bam_ch[rx_hdr->ch_id].notify(
492 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
493 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 else
495 dev_kfree_skb_any(rx_skb);
496 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
497
498 queue_rx();
499}
500
Eric Holmberg006057d2012-01-11 10:10:42 -0700501static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
502{
503 unsigned long flags;
504 int ret;
505
Eric Holmberga623da82012-07-12 09:37:09 -0600506 mutex_lock(&bam_pdev_mutexlock);
507 if (in_global_reset) {
508 bam_dmux_log("%s: open cid %d aborted due to ssr\n",
509 __func__, rx_hdr->ch_id);
510 mutex_unlock(&bam_pdev_mutexlock);
511 queue_rx();
512 return;
513 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700514 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
515 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
516 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
517 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700518 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
519 if (ret)
520 pr_err("%s: platform_device_add() error: %d\n",
521 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600522 mutex_unlock(&bam_pdev_mutexlock);
523 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700524}
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static void handle_bam_mux_cmd(struct work_struct *work)
527{
528 unsigned long flags;
529 struct bam_mux_hdr *rx_hdr;
530 struct rx_pkt_info *info;
531 struct sk_buff *rx_skb;
532
533 info = container_of(work, struct rx_pkt_info, work);
534 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600535 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 kfree(info);
537
538 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
539
540 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
541 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
542 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
543 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
544 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700545 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
546 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 " pad %d ch %d len %d\n", __func__,
548 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
549 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
550 dev_kfree_skb_any(rx_skb);
551 queue_rx();
552 return;
553 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700554
555 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700556 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
557 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700558 " pad %d ch %d len %d\n", __func__,
559 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
560 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
561 dev_kfree_skb_any(rx_skb);
562 queue_rx();
563 return;
564 }
565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 switch (rx_hdr->cmd) {
567 case BAM_MUX_HDR_CMD_DATA:
568 DBG_INC_READ_CNT(rx_hdr->pkt_len);
569 bam_mux_process_data(rx_skb);
570 break;
571 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700572 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700573 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700574 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600575 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
576 bam_dmux_log("%s: deactivating disconnect ack\n");
577 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600578 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700579 dev_kfree_skb_any(rx_skb);
580 break;
581 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
582 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
583 rx_hdr->ch_id);
584
585 if (!a2_pc_disabled) {
586 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700587 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700588 }
589
590 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600591 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 break;
593 case BAM_MUX_HDR_CMD_CLOSE:
594 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700595 bam_dmux_log("%s: closing cid %d\n", __func__,
596 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600597 mutex_lock(&bam_pdev_mutexlock);
598 if (in_global_reset) {
599 bam_dmux_log("%s: close cid %d aborted due to ssr\n",
600 __func__, rx_hdr->ch_id);
601 mutex_unlock(&bam_pdev_mutexlock);
602 break;
603 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
605 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
606 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600607 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
608 bam_ch[rx_hdr->ch_id].pdev =
609 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
610 if (!bam_ch[rx_hdr->ch_id].pdev)
611 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600612 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600613 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600614 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 break;
616 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700617 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
618 " reserved %d cmd %d pad %d ch %d len %d\n",
619 __func__, rx_hdr->magic_num, rx_hdr->reserved,
620 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
621 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 dev_kfree_skb_any(rx_skb);
623 queue_rx();
624 return;
625 }
626}
627
628static int bam_mux_write_cmd(void *data, uint32_t len)
629{
630 int rc;
631 struct tx_pkt_info *pkt;
632 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700633 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600635 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 if (pkt == NULL) {
637 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
638 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 return rc;
640 }
641
642 dma_address = dma_map_single(NULL, data, len,
643 DMA_TO_DEVICE);
644 if (!dma_address) {
645 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700646 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 return rc;
649 }
650 pkt->skb = (struct sk_buff *)(data);
651 pkt->len = len;
652 pkt->dma_address = dma_address;
653 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700654 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600655 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700656 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600657 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
659 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600660 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700661 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
662 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600663 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700664 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700665 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700666 dma_unmap_single(NULL, pkt->dma_address,
667 pkt->len,
668 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600669 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700670 } else {
671 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600672 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600674 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 return rc;
676}
677
678static void bam_mux_write_done(struct work_struct *work)
679{
680 struct sk_buff *skb;
681 struct bam_mux_hdr *hdr;
682 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700683 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600684 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700685 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600687 if (in_global_reset)
688 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700691
692 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
693 info_expected = list_first_entry(&bam_tx_pool,
694 struct tx_pkt_info, list_node);
695 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700696 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700697
Eric Holmberg878923a2012-01-10 14:28:19 -0700698 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
699 " list_node=%p, ts=%u.%09lu\n",
700 __func__, bam_tx_pool.next, &info->list_node,
701 info->ts_sec, info->ts_nsec
702 );
703
704 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
705 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
706 &errant_pkt->list_node, errant_pkt->ts_sec,
707 errant_pkt->ts_nsec);
708
709 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700710 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
711 BUG();
712 }
713 list_del(&info->list_node);
714 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
715
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600716 if (info->is_cmd) {
717 kfree(info->skb);
718 kfree(info);
719 return;
720 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 skb = info->skb;
722 kfree(info);
723 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700724 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600725 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700726 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
727 bam_ch[hdr->ch_id].num_tx_pkts--;
728 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600729 if (bam_ch[hdr->ch_id].notify)
730 bam_ch[hdr->ch_id].notify(
731 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
732 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700733 else
734 dev_kfree_skb_any(skb);
735}
736
737int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
738{
739 int rc = 0;
740 struct bam_mux_hdr *hdr;
741 unsigned long flags;
742 struct sk_buff *new_skb = NULL;
743 dma_addr_t dma_address;
744 struct tx_pkt_info *pkt;
745
746 if (id >= BAM_DMUX_NUM_CHANNELS)
747 return -EINVAL;
748 if (!skb)
749 return -EINVAL;
750 if (!bam_mux_initialized)
751 return -ENODEV;
752
753 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
754 spin_lock_irqsave(&bam_ch[id].lock, flags);
755 if (!bam_ch_is_open(id)) {
756 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
757 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
758 return -ENODEV;
759 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700760
761 if (bam_ch[id].use_wm &&
762 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
763 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
764 pr_err("%s: watermark exceeded: %d\n", __func__, id);
765 return -EAGAIN;
766 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
768
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600769 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600770 if (!bam_is_connected) {
771 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600772 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700773 if (unlikely(in_global_reset == 1))
774 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600775 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600776 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600777 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 /* if skb do not have any tailroom for padding,
780 copy the skb into a new expanded skb */
781 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
782 /* revisit, probably dev_alloc_skb and memcpy is effecient */
783 new_skb = skb_copy_expand(skb, skb_headroom(skb),
784 4 - (skb->len & 0x3), GFP_ATOMIC);
785 if (new_skb == NULL) {
786 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600787 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 }
789 dev_kfree_skb_any(skb);
790 skb = new_skb;
791 DBG_INC_WRITE_CPY(skb->len);
792 }
793
794 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
795
796 /* caller should allocate for hdr and padding
797 hdr is fine, padding is tricky */
798 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
799 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
800 hdr->reserved = 0;
801 hdr->ch_id = id;
802 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
803 if (skb->len & 0x3)
804 skb_put(skb, 4 - (skb->len & 0x3));
805
806 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
807
808 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
809 __func__, skb->data, skb->tail, skb->len,
810 hdr->pkt_len, hdr->pad_len);
811
812 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
813 if (pkt == NULL) {
814 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600815 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 }
817
818 dma_address = dma_map_single(NULL, skb->data, skb->len,
819 DMA_TO_DEVICE);
820 if (!dma_address) {
821 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600822 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 }
824 pkt->skb = skb;
825 pkt->dma_address = dma_address;
826 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700827 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700829 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600830 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
832 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600833 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700834 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
835 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600836 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700837 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700838 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700839 dma_unmap_single(NULL, pkt->dma_address,
840 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600841 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700842 if (new_skb)
843 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700844 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700845 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700846 spin_lock_irqsave(&bam_ch[id].lock, flags);
847 bam_ch[id].num_tx_pkts++;
848 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600849 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600850 ul_packet_written = 1;
851 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600853
854write_fail3:
855 kfree(pkt);
856write_fail2:
857 if (new_skb)
858 dev_kfree_skb_any(new_skb);
859write_fail:
860 read_unlock(&ul_wakeup_lock);
861 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862}
863
864int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600865 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866{
867 struct bam_mux_hdr *hdr;
868 unsigned long flags;
869 int rc = 0;
870
871 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700872 if (!bam_mux_initialized) {
873 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700875 }
876 if (id >= BAM_DMUX_NUM_CHANNELS) {
877 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700879 }
880 if (notify == NULL) {
881 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600882 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700883 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884
885 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
886 if (hdr == NULL) {
887 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
888 return -ENOMEM;
889 }
890 spin_lock_irqsave(&bam_ch[id].lock, flags);
891 if (bam_ch_is_open(id)) {
892 DBG("%s: Already opened %d\n", __func__, id);
893 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
894 kfree(hdr);
895 goto open_done;
896 }
897 if (!bam_ch_is_remote_open(id)) {
898 DBG("%s: Remote not open; ch: %d\n", __func__, id);
899 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
900 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700901 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 }
903
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600904 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 bam_ch[id].priv = priv;
906 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700907 bam_ch[id].num_tx_pkts = 0;
908 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
910
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600911 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600912 if (!bam_is_connected) {
913 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600914 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700915 if (unlikely(in_global_reset == 1))
916 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600917 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600918 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600919 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
922 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
923 hdr->reserved = 0;
924 hdr->ch_id = id;
925 hdr->pkt_len = 0;
926 hdr->pad_len = 0;
927
928 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600929 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700930
931open_done:
932 DBG("%s: opened ch %d\n", __func__, id);
933 return rc;
934}
935
936int msm_bam_dmux_close(uint32_t id)
937{
938 struct bam_mux_hdr *hdr;
939 unsigned long flags;
940 int rc;
941
942 if (id >= BAM_DMUX_NUM_CHANNELS)
943 return -EINVAL;
944 DBG("%s: closing ch %d\n", __func__, id);
945 if (!bam_mux_initialized)
946 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600948 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600949 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600950 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600951 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700952 if (unlikely(in_global_reset == 1))
953 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600954 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600955 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600956 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600957
Jeff Hugo061ce672011-10-21 17:15:32 -0600958 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600959 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 bam_ch[id].priv = NULL;
961 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
962 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
963
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600964 if (bam_ch_is_in_reset(id)) {
965 read_unlock(&ul_wakeup_lock);
966 bam_ch[id].status &= ~BAM_CH_IN_RESET;
967 return 0;
968 }
969
Jeff Hugobb5802f2011-11-02 17:10:29 -0600970 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 if (hdr == NULL) {
972 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600973 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 return -ENOMEM;
975 }
976 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
977 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
978 hdr->reserved = 0;
979 hdr->ch_id = id;
980 hdr->pkt_len = 0;
981 hdr->pad_len = 0;
982
983 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600984 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985
986 DBG("%s: closed ch %d\n", __func__, id);
987 return rc;
988}
989
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700990int msm_bam_dmux_is_ch_full(uint32_t id)
991{
992 unsigned long flags;
993 int ret;
994
995 if (id >= BAM_DMUX_NUM_CHANNELS)
996 return -EINVAL;
997
998 spin_lock_irqsave(&bam_ch[id].lock, flags);
999 bam_ch[id].use_wm = 1;
1000 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1001 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1002 id, bam_ch[id].num_tx_pkts, ret);
1003 if (!bam_ch_is_local_open(id)) {
1004 ret = -ENODEV;
1005 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1006 }
1007 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1008
1009 return ret;
1010}
1011
1012int msm_bam_dmux_is_ch_low(uint32_t id)
1013{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001014 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001015 int ret;
1016
1017 if (id >= BAM_DMUX_NUM_CHANNELS)
1018 return -EINVAL;
1019
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001020 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001021 bam_ch[id].use_wm = 1;
1022 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1023 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1024 id, bam_ch[id].num_tx_pkts, ret);
1025 if (!bam_ch_is_local_open(id)) {
1026 ret = -ENODEV;
1027 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1028 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001029 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001030
1031 return ret;
1032}
1033
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001034static void rx_switch_to_interrupt_mode(void)
1035{
1036 struct sps_connect cur_rx_conn;
1037 struct sps_iovec iov;
1038 struct rx_pkt_info *info;
1039 int ret;
1040
1041 /*
1042 * Attempt to enable interrupts - if this fails,
1043 * continue polling and we will retry later.
1044 */
1045 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1046 if (ret) {
1047 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1048 goto fail;
1049 }
1050
1051 rx_register_event.options = SPS_O_EOT;
1052 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1053 if (ret) {
1054 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1055 goto fail;
1056 }
1057
1058 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1059 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1060 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1061 if (ret) {
1062 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1063 goto fail;
1064 }
1065 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001066 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001067
1068 /* handle any rx packets before interrupt was enabled */
1069 while (bam_connection_is_active && !polling_mode) {
1070 ret = sps_get_iovec(bam_rx_pipe, &iov);
1071 if (ret) {
1072 pr_err("%s: sps_get_iovec failed %d\n",
1073 __func__, ret);
1074 break;
1075 }
1076 if (iov.addr == 0)
1077 break;
1078
1079 mutex_lock(&bam_rx_pool_mutexlock);
1080 if (unlikely(list_empty(&bam_rx_pool))) {
1081 mutex_unlock(&bam_rx_pool_mutexlock);
1082 continue;
1083 }
1084 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1085 list_node);
1086 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001087 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001088 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001089 if (info->dma_address != iov.addr)
1090 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1091 __func__,
1092 (void *)info->dma_address, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001093 handle_bam_mux_cmd(&info->work);
1094 }
1095 return;
1096
1097fail:
1098 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001099 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001100}
1101
Jeff Hugo949080a2011-08-30 11:58:56 -06001102static void rx_timer_work_func(struct work_struct *work)
1103{
1104 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001105 struct rx_pkt_info *info;
1106 int inactive_cycles = 0;
1107 int ret;
Jeff Hugo949080a2011-08-30 11:58:56 -06001108
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001109 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001110 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001111 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001112 if (in_global_reset)
1113 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001114
1115 ret = sps_get_iovec(bam_rx_pipe, &iov);
1116 if (ret) {
1117 pr_err("%s: sps_get_iovec failed %d\n",
1118 __func__, ret);
1119 break;
1120 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001121 if (iov.addr == 0)
1122 break;
1123 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001124 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001125 if (unlikely(list_empty(&bam_rx_pool))) {
1126 mutex_unlock(&bam_rx_pool_mutexlock);
1127 continue;
1128 }
1129 info = list_first_entry(&bam_rx_pool,
1130 struct rx_pkt_info, list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001131 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001132 list_del(&info->list_node);
Jeff Hugoc9749932011-11-02 17:50:40 -06001133 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001134 handle_bam_mux_cmd(&info->work);
1135 }
1136
1137 if (inactive_cycles == POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001138 rx_switch_to_interrupt_mode();
1139 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001140 }
1141
1142 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1143 }
1144}
1145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146static void bam_mux_tx_notify(struct sps_event_notify *notify)
1147{
1148 struct tx_pkt_info *pkt;
1149
1150 DBG("%s: event %d notified\n", __func__, notify->event_id);
1151
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001152 if (in_global_reset)
1153 return;
1154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155 switch (notify->event_id) {
1156 case SPS_EVENT_EOT:
1157 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001158 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 dma_unmap_single(NULL, pkt->dma_address,
1160 pkt->skb->len,
1161 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001162 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 dma_unmap_single(NULL, pkt->dma_address,
1164 pkt->len,
1165 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001166 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 break;
1168 default:
1169 pr_err("%s: recieved unexpected event id %d\n", __func__,
1170 notify->event_id);
1171 }
1172}
1173
Jeff Hugo33dbc002011-08-25 15:52:53 -06001174static void bam_mux_rx_notify(struct sps_event_notify *notify)
1175{
Jeff Hugo949080a2011-08-30 11:58:56 -06001176 int ret;
1177 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001178
1179 DBG("%s: event %d notified\n", __func__, notify->event_id);
1180
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001181 if (in_global_reset)
1182 return;
1183
Jeff Hugo33dbc002011-08-25 15:52:53 -06001184 switch (notify->event_id) {
1185 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001186 /* attempt to disable interrupts in this pipe */
1187 if (!polling_mode) {
1188 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1189 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001190 pr_err("%s: sps_get_config() failed %d, interrupts"
1191 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001192 break;
1193 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001194 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001195 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1196 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1197 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001198 pr_err("%s: sps_set_config() failed %d, interrupts"
1199 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001200 break;
1201 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001202 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001203 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001204 /*
1205 * run on core 0 so that netif_rx() in rmnet uses only
1206 * one queue
1207 */
1208 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001209 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001210 break;
1211 default:
1212 pr_err("%s: recieved unexpected event id %d\n", __func__,
1213 notify->event_id);
1214 }
1215}
1216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217#ifdef CONFIG_DEBUG_FS
1218
1219static int debug_tbl(char *buf, int max)
1220{
1221 int i = 0;
1222 int j;
1223
1224 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1225 i += scnprintf(buf + i, max - i,
1226 "ch%02d local open=%s remote open=%s\n",
1227 j, bam_ch_is_local_open(j) ? "Y" : "N",
1228 bam_ch_is_remote_open(j) ? "Y" : "N");
1229 }
1230
1231 return i;
1232}
1233
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001234static int debug_ul_pkt_cnt(char *buf, int max)
1235{
1236 struct list_head *p;
1237 unsigned long flags;
1238 int n = 0;
1239
1240 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1241 __list_for_each(p, &bam_tx_pool) {
1242 ++n;
1243 }
1244 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1245
1246 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1247}
1248
1249static int debug_stats(char *buf, int max)
1250{
1251 int i = 0;
1252
1253 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001254 "skb read cnt: %u\n"
1255 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001256 "skb copy cnt: %u\n"
1257 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001258 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001259 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001260 "rx queue len: %d\n"
1261 "a2 ack out cnt: %d\n"
1262 "a2 ack in cnt: %d\n"
1263 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001264 bam_dmux_read_cnt,
1265 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001266 bam_dmux_write_cpy_cnt,
1267 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001268 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001269 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001270 bam_rx_pool_len,
1271 atomic_read(&bam_dmux_ack_out_cnt),
1272 atomic_read(&bam_dmux_ack_in_cnt),
1273 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001274 );
1275
1276 return i;
1277}
1278
Eric Holmberg878923a2012-01-10 14:28:19 -07001279static int debug_log(char *buff, int max, loff_t *ppos)
1280{
1281 unsigned long flags;
1282 int i = 0;
1283
1284 if (bam_dmux_state_logging_disabled) {
1285 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1286 return i;
1287 }
1288
1289 if (*ppos == 0) {
1290 i += scnprintf(buff - i, max - i,
1291 "<DMUX> timestamp FLAGS [Message]\n"
1292 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001293 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001294 "\tR: 1 = in global reset\n"
1295 "\tP: 1 = BAM is powered up\n"
1296 "\tA: 1 = BAM initialized and ready for data\n"
1297 "\n"
1298 "\tV: 1 = Uplink vote for power\n"
1299 "\tU: 1 = Uplink active\n"
1300 "\tW: 1 = Uplink Wait-for-ack\n"
1301 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001302 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001303 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001304 );
1305 buff += i;
1306 }
1307
1308 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1309 while (kfifo_len(&bam_dmux_state_log)
1310 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1311 int k_len;
1312 k_len = kfifo_out(&bam_dmux_state_log,
1313 buff, LOG_MESSAGE_MAX_SIZE);
1314 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1315 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1316 break;
1317 }
1318
1319 /* keep non-null portion of string and add line break */
1320 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1321 buff += k_len;
1322 i += k_len;
1323 if (k_len && *(buff - 1) != '\n') {
1324 *buff++ = '\n';
1325 ++i;
1326 }
1327 }
1328 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1329
1330 return i;
1331}
1332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333#define DEBUG_BUFMAX 4096
1334static char debug_buffer[DEBUG_BUFMAX];
1335
1336static ssize_t debug_read(struct file *file, char __user *buf,
1337 size_t count, loff_t *ppos)
1338{
1339 int (*fill)(char *buf, int max) = file->private_data;
1340 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1341 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1342}
1343
Eric Holmberg878923a2012-01-10 14:28:19 -07001344static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1345 size_t count, loff_t *ppos)
1346{
1347 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1348 char *buffer;
1349 int bsize;
1350
1351 buffer = kmalloc(count, GFP_KERNEL);
1352 if (!buffer)
1353 return -ENOMEM;
1354
1355 bsize = util_func(buffer, count, ppos);
1356
1357 if (bsize >= 0) {
1358 if (copy_to_user(buff, buffer, bsize)) {
1359 kfree(buffer);
1360 return -EFAULT;
1361 }
1362 *ppos += bsize;
1363 }
1364 kfree(buffer);
1365 return bsize;
1366}
1367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368static int debug_open(struct inode *inode, struct file *file)
1369{
1370 file->private_data = inode->i_private;
1371 return 0;
1372}
1373
1374
1375static const struct file_operations debug_ops = {
1376 .read = debug_read,
1377 .open = debug_open,
1378};
1379
Eric Holmberg878923a2012-01-10 14:28:19 -07001380static const struct file_operations debug_ops_multiple = {
1381 .read = debug_read_multiple,
1382 .open = debug_open,
1383};
1384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385static void debug_create(const char *name, mode_t mode,
1386 struct dentry *dent,
1387 int (*fill)(char *buf, int max))
1388{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001389 struct dentry *file;
1390
1391 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1392 if (IS_ERR(file))
1393 pr_err("%s: debugfs create failed %d\n", __func__,
1394 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395}
1396
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001397static void debug_create_multiple(const char *name, mode_t mode,
1398 struct dentry *dent,
1399 int (*fill)(char *buf, int max, loff_t *ppos))
1400{
1401 struct dentry *file;
1402
1403 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1404 if (IS_ERR(file))
1405 pr_err("%s: debugfs create failed %d\n", __func__,
1406 (int)PTR_ERR(file));
1407}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408#endif
1409
Jeff Hugod98b1082011-10-24 10:30:23 -06001410static void notify_all(int event, unsigned long data)
1411{
1412 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001413 struct list_head *temp;
1414 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001415
1416 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001417 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001418 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001419 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1420 __func__, i, event, data);
1421 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001422 }
Jeff Hugocb798022012-04-09 14:55:40 -06001423
1424 __list_for_each(temp, &bam_other_notify_funcs) {
1425 func = container_of(temp, struct outside_notify_func,
1426 list_node);
1427 func->notify(func->priv, event, data);
1428 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001429}
1430
1431static void kickoff_ul_wakeup_func(struct work_struct *work)
1432{
1433 read_lock(&ul_wakeup_lock);
1434 if (!bam_is_connected) {
1435 read_unlock(&ul_wakeup_lock);
1436 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001437 if (unlikely(in_global_reset == 1))
1438 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001439 read_lock(&ul_wakeup_lock);
1440 ul_packet_written = 1;
1441 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1442 }
1443 read_unlock(&ul_wakeup_lock);
1444}
1445
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001446int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001447{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001448 int is_connected;
1449
1450 read_lock(&ul_wakeup_lock);
1451 ul_packet_written = 1;
1452 is_connected = bam_is_connected;
1453 if (!is_connected)
1454 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1455 read_unlock(&ul_wakeup_lock);
1456
1457 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001458}
1459
Eric Holmberg878923a2012-01-10 14:28:19 -07001460static void power_vote(int vote)
1461{
1462 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1463 bam_dmux_uplink_vote, vote);
1464
1465 if (bam_dmux_uplink_vote == vote)
1466 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1467
1468 bam_dmux_uplink_vote = vote;
1469 if (vote)
1470 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1471 else
1472 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1473}
1474
Eric Holmberg454d9da2012-01-12 09:37:14 -07001475/*
1476 * @note: Must be called with ul_wakeup_lock locked.
1477 */
1478static inline void ul_powerdown(void)
1479{
1480 bam_dmux_log("%s: powerdown\n", __func__);
1481 verify_tx_queue_is_empty(__func__);
1482
1483 if (a2_pc_disabled) {
1484 wait_for_dfab = 1;
1485 INIT_COMPLETION(dfab_unvote_completion);
1486 release_wakelock();
1487 } else {
1488 wait_for_ack = 1;
1489 INIT_COMPLETION(ul_wakeup_ack_completion);
1490 power_vote(0);
1491 }
1492 bam_is_connected = 0;
1493 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1494}
1495
1496static inline void ul_powerdown_finish(void)
1497{
1498 if (a2_pc_disabled && wait_for_dfab) {
1499 unvote_dfab();
1500 complete_all(&dfab_unvote_completion);
1501 wait_for_dfab = 0;
1502 }
1503}
1504
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001505/*
1506 * Votes for UL power and returns current power state.
1507 *
1508 * @returns true if currently connected
1509 */
1510int msm_bam_dmux_ul_power_vote(void)
1511{
1512 int is_connected;
1513
1514 read_lock(&ul_wakeup_lock);
1515 atomic_inc(&ul_ondemand_vote);
1516 is_connected = bam_is_connected;
1517 if (!is_connected)
1518 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1519 read_unlock(&ul_wakeup_lock);
1520
1521 return is_connected;
1522}
1523
1524/*
1525 * Unvotes for UL power.
1526 *
1527 * @returns true if vote count is 0 (UL shutdown possible)
1528 */
1529int msm_bam_dmux_ul_power_unvote(void)
1530{
1531 int vote;
1532
1533 read_lock(&ul_wakeup_lock);
1534 vote = atomic_dec_return(&ul_ondemand_vote);
1535 if (unlikely(vote) < 0)
1536 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1537 read_unlock(&ul_wakeup_lock);
1538
1539 return vote == 0;
1540}
1541
Jeff Hugocb798022012-04-09 14:55:40 -06001542int msm_bam_dmux_reg_notify(void *priv,
1543 void (*notify)(void *priv, int event_type,
1544 unsigned long data))
1545{
1546 struct outside_notify_func *func;
1547
1548 if (!notify)
1549 return -EINVAL;
1550
1551 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1552 if (!func)
1553 return -ENOMEM;
1554
1555 func->notify = notify;
1556 func->priv = priv;
1557 list_add(&func->list_node, &bam_other_notify_funcs);
1558
1559 return 0;
1560}
1561
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001562static void ul_timeout(struct work_struct *work)
1563{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001564 unsigned long flags;
1565 int ret;
1566
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001567 if (in_global_reset)
1568 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001569 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1570 if (!ret) { /* failed to grab lock, reschedule and bail */
1571 schedule_delayed_work(&ul_timeout_work,
1572 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1573 return;
1574 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001575 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001576 if (!ul_packet_written) {
1577 spin_lock(&bam_tx_pool_spinlock);
1578 if (!list_empty(&bam_tx_pool)) {
1579 struct tx_pkt_info *info;
1580
1581 info = list_first_entry(&bam_tx_pool,
1582 struct tx_pkt_info, list_node);
1583 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1584 __func__, info->ts_sec, info->ts_nsec);
1585 DBG_INC_TX_STALL_CNT();
1586 ul_packet_written = 1;
1587 }
1588 spin_unlock(&bam_tx_pool_spinlock);
1589 }
1590
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001591 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1592 bam_dmux_log("%s: pkt written %d\n",
1593 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001594 ul_packet_written = 0;
1595 schedule_delayed_work(&ul_timeout_work,
1596 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001597 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001598 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001599 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001600 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001601 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001602 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001603}
Jeff Hugo4838f412012-01-20 11:19:37 -07001604
1605static int ssrestart_check(void)
1606{
Eric Holmberg90285e22012-02-22 12:33:05 -07001607 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1608 in_global_reset = 1;
1609 if (get_restart_level() <= RESET_SOC)
1610 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1611 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001612}
1613
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001614static void ul_wakeup(void)
1615{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001616 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001617 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001618
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001619 mutex_lock(&wakeup_lock);
1620 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001621 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001622 mutex_unlock(&wakeup_lock);
1623 return;
1624 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001625
Jeff Hugoc2696142012-05-03 11:42:13 -06001626 /*
1627 * if someone is voting for UL before bam is inited (modem up first
1628 * time), set flag for init to kickoff ul wakeup once bam is inited
1629 */
1630 mutex_lock(&delayed_ul_vote_lock);
1631 if (unlikely(!bam_mux_initialized)) {
1632 need_delayed_ul_vote = 1;
1633 mutex_unlock(&delayed_ul_vote_lock);
1634 mutex_unlock(&wakeup_lock);
1635 return;
1636 }
1637 mutex_unlock(&delayed_ul_vote_lock);
1638
Eric Holmberg006057d2012-01-11 10:10:42 -07001639 if (a2_pc_disabled) {
1640 /*
1641 * don't grab the wakelock the first time because it is
1642 * already grabbed when a2 powers on
1643 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001644 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001645 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001646 do_vote_dfab = 1; /* vote must occur after wait */
1647 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001648 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001649 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001650 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001651 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001652 &dfab_unvote_completion, HZ);
1653 BUG_ON(ret == 0);
1654 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001655 if (likely(do_vote_dfab))
1656 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001657 schedule_delayed_work(&ul_timeout_work,
1658 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1659 bam_is_connected = 1;
1660 mutex_unlock(&wakeup_lock);
1661 return;
1662 }
1663
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001664 /*
1665 * must wait for the previous power down request to have been acked
1666 * chances are it already came in and this will just fall through
1667 * instead of waiting
1668 */
1669 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001670 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001671 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001672 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001673 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001674 if (unlikely(ret == 0) && ssrestart_check()) {
1675 mutex_unlock(&wakeup_lock);
1676 bam_dmux_log("%s timeout previous ack\n", __func__);
1677 return;
1678 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001679 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001680 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001681 power_vote(1);
1682 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001683 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001684 if (unlikely(ret == 0) && ssrestart_check()) {
1685 mutex_unlock(&wakeup_lock);
1686 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1687 return;
1688 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001689 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001690 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001691 if (unlikely(ret == 0) && ssrestart_check()) {
1692 mutex_unlock(&wakeup_lock);
1693 bam_dmux_log("%s timeout power on\n", __func__);
1694 return;
1695 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001696
1697 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001698 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001699 schedule_delayed_work(&ul_timeout_work,
1700 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1701 mutex_unlock(&wakeup_lock);
1702}
1703
1704static void reconnect_to_bam(void)
1705{
1706 int i;
1707
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001708 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001709 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001710 if (!power_management_only_mode) {
1711 i = sps_device_reset(a2_device_handle);
1712 if (i)
1713 pr_err("%s: device reset failed rc = %d\n", __func__,
1714 i);
1715 i = sps_connect(bam_tx_pipe, &tx_connection);
1716 if (i)
1717 pr_err("%s: tx connection failed rc = %d\n", __func__,
1718 i);
1719 i = sps_connect(bam_rx_pipe, &rx_connection);
1720 if (i)
1721 pr_err("%s: rx connection failed rc = %d\n", __func__,
1722 i);
1723 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1724 if (i)
1725 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1726 i);
1727 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1728 if (i)
1729 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1730 i);
1731 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001732
1733 bam_connection_is_active = 1;
1734
1735 if (polling_mode)
1736 rx_switch_to_interrupt_mode();
1737
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001738 toggle_apps_ack();
1739 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001740 if (!power_management_only_mode)
1741 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001742}
1743
1744static void disconnect_to_bam(void)
1745{
1746 struct list_head *node;
1747 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001748 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001749
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001750 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001751
1752 /* handle disconnect during active UL */
1753 write_lock_irqsave(&ul_wakeup_lock, flags);
1754 if (bam_is_connected) {
1755 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1756 ul_powerdown();
1757 }
1758 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1759 ul_powerdown_finish();
1760
1761 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001762 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001763 if (!power_management_only_mode) {
1764 sps_disconnect(bam_tx_pipe);
1765 sps_disconnect(bam_rx_pipe);
1766 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1767 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1768 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001769 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001770
1771 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001772 while (!list_empty(&bam_rx_pool)) {
1773 node = bam_rx_pool.next;
1774 list_del(node);
1775 info = container_of(node, struct rx_pkt_info, list_node);
1776 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1777 DMA_FROM_DEVICE);
1778 dev_kfree_skb_any(info->skb);
1779 kfree(info);
1780 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001781 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001782 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001783
Jeff Hugo0b13a352012-03-17 23:18:30 -06001784 if (disconnect_ack)
1785 toggle_apps_ack();
1786
Eric Holmberg878923a2012-01-10 14:28:19 -07001787 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001788}
1789
1790static void vote_dfab(void)
1791{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001792 int rc;
1793
Eric Holmberg006057d2012-01-11 10:10:42 -07001794 bam_dmux_log("%s\n", __func__);
1795 mutex_lock(&dfab_status_lock);
1796 if (dfab_is_on) {
1797 bam_dmux_log("%s: dfab is already on\n", __func__);
1798 mutex_unlock(&dfab_status_lock);
1799 return;
1800 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001801 rc = clk_prepare_enable(dfab_clk);
Jeff Hugoca0caa82011-12-05 16:05:23 -07001802 if (rc)
Eric Holmberg006057d2012-01-11 10:10:42 -07001803 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n", rc);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001804 rc = clk_prepare_enable(xo_clk);
1805 if (rc)
1806 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n", rc);
Eric Holmberg006057d2012-01-11 10:10:42 -07001807 dfab_is_on = 1;
1808 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001809}
1810
1811static void unvote_dfab(void)
1812{
Eric Holmberg006057d2012-01-11 10:10:42 -07001813 bam_dmux_log("%s\n", __func__);
1814 mutex_lock(&dfab_status_lock);
1815 if (!dfab_is_on) {
1816 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1817 dump_stack();
1818 mutex_unlock(&dfab_status_lock);
1819 return;
1820 }
Jeff Hugo23a812b2012-01-13 13:43:42 -07001821 clk_disable_unprepare(dfab_clk);
Stephen Boyd69d35e32012-02-14 15:33:30 -08001822 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001823 dfab_is_on = 0;
1824 mutex_unlock(&dfab_status_lock);
1825}
1826
1827/* reference counting wrapper around wakelock */
1828static void grab_wakelock(void)
1829{
1830 unsigned long flags;
1831
1832 spin_lock_irqsave(&wakelock_reference_lock, flags);
1833 bam_dmux_log("%s: ref count = %d\n", __func__,
1834 wakelock_reference_count);
1835 if (wakelock_reference_count == 0)
1836 wake_lock(&bam_wakelock);
1837 ++wakelock_reference_count;
1838 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1839}
1840
1841static void release_wakelock(void)
1842{
1843 unsigned long flags;
1844
1845 spin_lock_irqsave(&wakelock_reference_lock, flags);
1846 if (wakelock_reference_count == 0) {
1847 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1848 dump_stack();
1849 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1850 return;
1851 }
1852 bam_dmux_log("%s: ref count = %d\n", __func__,
1853 wakelock_reference_count);
1854 --wakelock_reference_count;
1855 if (wakelock_reference_count == 0)
1856 wake_unlock(&bam_wakelock);
1857 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001858}
1859
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001860static int restart_notifier_cb(struct notifier_block *this,
1861 unsigned long code,
1862 void *data)
1863{
1864 int i;
1865 struct list_head *node;
1866 struct tx_pkt_info *info;
1867 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001868 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001869
1870 if (code != SUBSYS_AFTER_SHUTDOWN)
1871 return NOTIFY_DONE;
1872
Eric Holmberg878923a2012-01-10 14:28:19 -07001873 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001874 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001875
1876 /* Handle uplink Powerdown */
1877 write_lock_irqsave(&ul_wakeup_lock, flags);
1878 if (bam_is_connected) {
1879 ul_powerdown();
1880 wait_for_ack = 0;
1881 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001882 /*
1883 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1884 * reset to 0. harmless if bam_is_connected check above passes
1885 */
1886 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001887 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1888 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001889 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001890 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -06001891 disconnect_ack = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001892
1893 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001894 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001895 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1896 temp_remote_status = bam_ch_is_remote_open(i);
1897 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001898 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001899 if (bam_ch_is_local_open(i))
1900 bam_ch[i].status |= BAM_CH_IN_RESET;
1901 if (temp_remote_status) {
1902 platform_device_unregister(bam_ch[i].pdev);
1903 bam_ch[i].pdev = platform_device_alloc(
1904 bam_ch[i].name, 2);
1905 }
1906 }
Eric Holmberga623da82012-07-12 09:37:09 -06001907 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001908
1909 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001910 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001911 while (!list_empty(&bam_tx_pool)) {
1912 node = bam_tx_pool.next;
1913 list_del(node);
1914 info = container_of(node, struct tx_pkt_info,
1915 list_node);
1916 if (!info->is_cmd) {
1917 dma_unmap_single(NULL, info->dma_address,
1918 info->skb->len,
1919 DMA_TO_DEVICE);
1920 dev_kfree_skb_any(info->skb);
1921 } else {
1922 dma_unmap_single(NULL, info->dma_address,
1923 info->len,
1924 DMA_TO_DEVICE);
1925 kfree(info->skb);
1926 }
1927 kfree(info);
1928 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001929 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001930
Eric Holmberg878923a2012-01-10 14:28:19 -07001931 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001932 return NOTIFY_DONE;
1933}
1934
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001935static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936{
1937 u32 h;
1938 dma_addr_t dma_addr;
1939 int ret;
1940 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001941 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001943 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 /* init BAM */
1945 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
1946 if (!a2_virt_addr) {
1947 pr_err("%s: ioremap failed\n", __func__);
1948 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07001949 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 }
1951 a2_props.phys_addr = A2_PHYS_BASE;
1952 a2_props.virt_addr = a2_virt_addr;
1953 a2_props.virt_size = A2_PHYS_SIZE;
1954 a2_props.irq = A2_BAM_IRQ;
Jeff Hugo927cba62011-11-11 11:49:52 -07001955 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 a2_props.num_pipes = A2_NUM_PIPES;
1957 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07001958 if (cpu_is_msm9615())
1959 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960 /* need to free on tear down */
1961 ret = sps_register_bam_device(&a2_props, &h);
1962 if (ret < 0) {
1963 pr_err("%s: register bam error %d\n", __func__, ret);
1964 goto register_bam_failed;
1965 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001966 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967
1968 bam_tx_pipe = sps_alloc_endpoint();
1969 if (bam_tx_pipe == NULL) {
1970 pr_err("%s: tx alloc endpoint failed\n", __func__);
1971 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001972 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973 }
1974 ret = sps_get_config(bam_tx_pipe, &tx_connection);
1975 if (ret) {
1976 pr_err("%s: tx get config failed %d\n", __func__, ret);
1977 goto tx_get_config_failed;
1978 }
1979
1980 tx_connection.source = SPS_DEV_HANDLE_MEM;
1981 tx_connection.src_pipe_index = 0;
1982 tx_connection.destination = h;
1983 tx_connection.dest_pipe_index = 4;
1984 tx_connection.mode = SPS_MODE_DEST;
1985 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
1986 tx_desc_mem_buf.size = 0x800; /* 2k */
1987 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
1988 &dma_addr, 0);
1989 if (tx_desc_mem_buf.base == NULL) {
1990 pr_err("%s: tx memory alloc failed\n", __func__);
1991 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07001992 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 }
1994 tx_desc_mem_buf.phys_base = dma_addr;
1995 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
1996 tx_connection.desc = tx_desc_mem_buf;
1997 tx_connection.event_thresh = 0x10;
1998
1999 ret = sps_connect(bam_tx_pipe, &tx_connection);
2000 if (ret < 0) {
2001 pr_err("%s: tx connect error %d\n", __func__, ret);
2002 goto tx_connect_failed;
2003 }
2004
2005 bam_rx_pipe = sps_alloc_endpoint();
2006 if (bam_rx_pipe == NULL) {
2007 pr_err("%s: rx alloc endpoint failed\n", __func__);
2008 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002009 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 }
2011 ret = sps_get_config(bam_rx_pipe, &rx_connection);
2012 if (ret) {
2013 pr_err("%s: rx get config failed %d\n", __func__, ret);
2014 goto rx_get_config_failed;
2015 }
2016
2017 rx_connection.source = h;
2018 rx_connection.src_pipe_index = 5;
2019 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2020 rx_connection.dest_pipe_index = 1;
2021 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002022 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2023 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 rx_desc_mem_buf.size = 0x800; /* 2k */
2025 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2026 &dma_addr, 0);
2027 if (rx_desc_mem_buf.base == NULL) {
2028 pr_err("%s: rx memory alloc failed\n", __func__);
2029 ret = -ENOMEM;
2030 goto rx_mem_failed;
2031 }
2032 rx_desc_mem_buf.phys_base = dma_addr;
2033 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2034 rx_connection.desc = rx_desc_mem_buf;
2035 rx_connection.event_thresh = 0x10;
2036
2037 ret = sps_connect(bam_rx_pipe, &rx_connection);
2038 if (ret < 0) {
2039 pr_err("%s: rx connect error %d\n", __func__, ret);
2040 goto rx_connect_failed;
2041 }
2042
2043 tx_register_event.options = SPS_O_EOT;
2044 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2045 tx_register_event.xfer_done = NULL;
2046 tx_register_event.callback = bam_mux_tx_notify;
2047 tx_register_event.user = NULL;
2048 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2049 if (ret < 0) {
2050 pr_err("%s: tx register event error %d\n", __func__, ret);
2051 goto rx_event_reg_failed;
2052 }
2053
Jeff Hugo33dbc002011-08-25 15:52:53 -06002054 rx_register_event.options = SPS_O_EOT;
2055 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2056 rx_register_event.xfer_done = NULL;
2057 rx_register_event.callback = bam_mux_rx_notify;
2058 rx_register_event.user = NULL;
2059 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2060 if (ret < 0) {
2061 pr_err("%s: tx register event error %d\n", __func__, ret);
2062 goto rx_event_reg_failed;
2063 }
2064
Jeff Hugoc2696142012-05-03 11:42:13 -06002065 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002067 if (need_delayed_ul_vote) {
2068 need_delayed_ul_vote = 0;
2069 msm_bam_dmux_kickoff_ul_wakeup();
2070 }
2071 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002072 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002073 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002074 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002075 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002076 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002077
2078rx_event_reg_failed:
2079 sps_disconnect(bam_rx_pipe);
2080rx_connect_failed:
2081 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2082 rx_desc_mem_buf.phys_base);
2083rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084rx_get_config_failed:
2085 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002086rx_alloc_endpoint_failed:
2087 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002088tx_connect_failed:
2089 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2090 tx_desc_mem_buf.phys_base);
2091tx_get_config_failed:
2092 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002093tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002094 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002095 /*
2096 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2097 * same handle below will cause a crash, so skip it if we've freed
2098 * the handle here.
2099 */
2100 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002101register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002102 if (!skip_iounmap)
2103 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002104ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002106 return ret;
2107}
2108
2109static int bam_init_fallback(void)
2110{
2111 u32 h;
2112 int ret;
2113 void *a2_virt_addr;
2114
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002115 /* init BAM */
2116 a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
2117 if (!a2_virt_addr) {
2118 pr_err("%s: ioremap failed\n", __func__);
2119 ret = -ENOMEM;
2120 goto ioremap_failed;
2121 }
2122 a2_props.phys_addr = A2_PHYS_BASE;
2123 a2_props.virt_addr = a2_virt_addr;
2124 a2_props.virt_size = A2_PHYS_SIZE;
2125 a2_props.irq = A2_BAM_IRQ;
2126 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2127 a2_props.num_pipes = A2_NUM_PIPES;
2128 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2129 if (cpu_is_msm9615())
2130 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2131 ret = sps_register_bam_device(&a2_props, &h);
2132 if (ret < 0) {
2133 pr_err("%s: register bam error %d\n", __func__, ret);
2134 goto register_bam_failed;
2135 }
2136 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002137
2138 mutex_lock(&delayed_ul_vote_lock);
2139 bam_mux_initialized = 1;
2140 if (need_delayed_ul_vote) {
2141 need_delayed_ul_vote = 0;
2142 msm_bam_dmux_kickoff_ul_wakeup();
2143 }
2144 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002145 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002146
Jeff Hugo18792a32012-06-20 15:25:55 -06002147 power_management_only_mode = 1;
2148 bam_connection_is_active = 1;
2149 complete_all(&bam_connection_completion);
2150
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002151 return 0;
2152
2153register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002154 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002155ioremap_failed:
2156 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002157}
Jeff Hugoade1f842011-08-03 15:53:59 -06002158
Jeff Hugoa670b762012-03-15 15:58:28 -06002159static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002160{
2161 int ret = 0;
2162
2163 ret = bam_init();
2164 if (ret) {
2165 ret = bam_init_fallback();
2166 if (ret)
2167 pr_err("%s: bam init fallback failed: %d",
2168 __func__, ret);
2169 }
2170}
2171
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002172static void toggle_apps_ack(void)
2173{
2174 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002175
2176 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2177 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002178 smsm_change_state(SMSM_APPS_STATE,
2179 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2180 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2181 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002182 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002183}
2184
Jeff Hugoade1f842011-08-03 15:53:59 -06002185static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2186{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002187 static int last_processed_state;
2188
2189 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002190 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002191 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002192 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2193 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002194 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
2195 bam_dmux_log("%s: already processed this state\n", __func__);
2196 mutex_unlock(&smsm_cb_lock);
2197 return;
2198 }
2199
2200 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002201
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002202 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002203 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002204 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002205 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002206 } else if (bam_mux_initialized &&
2207 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002208 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002209 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002210 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002211 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002212 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002213 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002214 if (cpu_is_msm9615())
2215 msm9615_bam_init();
2216 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002217 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002218 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002219 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002220 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002221 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002222 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002223
2224}
2225
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002226static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2227 uint32_t new_state)
2228{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002229 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002230 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2231 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002232 complete_all(&ul_wakeup_ack_completion);
2233}
2234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002235static int bam_dmux_probe(struct platform_device *pdev)
2236{
2237 int rc;
2238
2239 DBG("%s probe called\n", __func__);
2240 if (bam_mux_initialized)
2241 return 0;
2242
Stephen Boyd69d35e32012-02-14 15:33:30 -08002243 xo_clk = clk_get(&pdev->dev, "xo");
2244 if (IS_ERR(xo_clk)) {
2245 pr_err("%s: did not get xo clock\n", __func__);
2246 return PTR_ERR(xo_clk);
2247 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002248 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002249 if (IS_ERR(dfab_clk)) {
2250 pr_err("%s: did not get dfab clock\n", __func__);
2251 return -EFAULT;
2252 }
2253
2254 rc = clk_set_rate(dfab_clk, 64000000);
2255 if (rc)
2256 pr_err("%s: unable to set dfab clock rate\n", __func__);
2257
Jeff Hugofff43af92012-03-29 17:54:52 -06002258 /*
2259 * setup the workqueue so that it can be pinned to core 0 and not
2260 * block the watchdog pet function, so that netif_rx() in rmnet
2261 * only uses one queue.
2262 */
2263 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2264 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002265 if (!bam_mux_rx_workqueue)
2266 return -ENOMEM;
2267
2268 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2269 if (!bam_mux_tx_workqueue) {
2270 destroy_workqueue(bam_mux_rx_workqueue);
2271 return -ENOMEM;
2272 }
2273
Jeff Hugo7960abd2011-08-02 15:39:38 -06002274 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002276 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2277 "bam_dmux_ch_%d", rc);
2278 /* bus 2, ie a2 stream 2 */
2279 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2280 if (!bam_ch[rc].pdev) {
2281 pr_err("%s: platform device alloc failed\n", __func__);
2282 destroy_workqueue(bam_mux_rx_workqueue);
2283 destroy_workqueue(bam_mux_tx_workqueue);
2284 return -ENOMEM;
2285 }
2286 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002288 init_completion(&ul_wakeup_ack_completion);
2289 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002290 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002291 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002292 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002293
Jeff Hugoade1f842011-08-03 15:53:59 -06002294 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2295 bam_dmux_smsm_cb, NULL);
2296
2297 if (rc) {
2298 destroy_workqueue(bam_mux_rx_workqueue);
2299 destroy_workqueue(bam_mux_tx_workqueue);
2300 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2301 return -ENOMEM;
2302 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002304 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2305 bam_dmux_smsm_ack_cb, NULL);
2306
2307 if (rc) {
2308 destroy_workqueue(bam_mux_rx_workqueue);
2309 destroy_workqueue(bam_mux_tx_workqueue);
2310 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2311 SMSM_A2_POWER_CONTROL,
2312 bam_dmux_smsm_cb, NULL);
2313 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2314 rc);
2315 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2316 platform_device_put(bam_ch[rc].pdev);
2317 return -ENOMEM;
2318 }
2319
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002320 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2321 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002323 return 0;
2324}
2325
2326static struct platform_driver bam_dmux_driver = {
2327 .probe = bam_dmux_probe,
2328 .driver = {
2329 .name = "BAM_RMNT",
2330 .owner = THIS_MODULE,
2331 },
2332};
2333
2334static int __init bam_dmux_init(void)
2335{
Eric Holmberg878923a2012-01-10 14:28:19 -07002336 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002337#ifdef CONFIG_DEBUG_FS
2338 struct dentry *dent;
2339
2340 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002341 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002343 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2344 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002345 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002346 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002347#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002348 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2349 if (ret) {
2350 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2351 bam_dmux_state_logging_disabled = 1;
2352 }
2353
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002354 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355 return platform_driver_register(&bam_dmux_driver);
2356}
2357
Jeff Hugoade1f842011-08-03 15:53:59 -06002358late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002359MODULE_DESCRIPTION("MSM BAM DMUX");
2360MODULE_LICENSE("GPL v2");