blob: fbc3e2558023882d33143cfba5695dd27befaa87 [file] [log] [blame]
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Jeff Hugo7bf02052012-08-21 14:08:20 -060030#include <linux/of.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32#include <mach/sps.h>
33#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060034#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060035#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070036#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070037#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
39#define BAM_CH_LOCAL_OPEN 0x1
40#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060041#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
43#define BAM_MUX_HDR_MAGIC_NO 0x33fc
44
Eric Holmberg006057d2012-01-11 10:10:42 -070045#define BAM_MUX_HDR_CMD_DATA 0
46#define BAM_MUX_HDR_CMD_OPEN 1
47#define BAM_MUX_HDR_CMD_CLOSE 2
48#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
49#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070052#define LOW_WATERMARK 2
53#define HIGH_WATERMARK 4
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070054#define DEFAULT_POLLING_MIN_SLEEP (950)
55#define MAX_POLLING_SLEEP (6050)
56#define MIN_POLLING_SLEEP (950)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
58static int msm_bam_dmux_debug_enable;
59module_param_named(debug_enable, msm_bam_dmux_debug_enable,
60 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070061static int POLLING_MIN_SLEEP = 950;
62module_param_named(min_sleep, POLLING_MIN_SLEEP,
63 int, S_IRUGO | S_IWUSR | S_IWGRP);
64static int POLLING_MAX_SLEEP = 1050;
65module_param_named(max_sleep, POLLING_MAX_SLEEP,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67static int POLLING_INACTIVITY = 40;
68module_param_named(inactivity, POLLING_INACTIVITY,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70static int bam_adaptive_timer_enabled = 1;
71module_param_named(adaptive_timer_enabled,
72 bam_adaptive_timer_enabled,
73 int, S_IRUGO | S_IWUSR | S_IWGRP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
75#if defined(DEBUG)
76static uint32_t bam_dmux_read_cnt;
77static uint32_t bam_dmux_write_cnt;
78static uint32_t bam_dmux_write_cpy_cnt;
79static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070080static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070081static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070082static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
83static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
84static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085
86#define DBG(x...) do { \
87 if (msm_bam_dmux_debug_enable) \
88 pr_debug(x); \
89 } while (0)
90
91#define DBG_INC_READ_CNT(x) do { \
92 bam_dmux_read_cnt += (x); \
93 if (msm_bam_dmux_debug_enable) \
94 pr_debug("%s: total read bytes %u\n", \
95 __func__, bam_dmux_read_cnt); \
96 } while (0)
97
98#define DBG_INC_WRITE_CNT(x) do { \
99 bam_dmux_write_cnt += (x); \
100 if (msm_bam_dmux_debug_enable) \
101 pr_debug("%s: total written bytes %u\n", \
102 __func__, bam_dmux_write_cnt); \
103 } while (0)
104
105#define DBG_INC_WRITE_CPY(x) do { \
106 bam_dmux_write_cpy_bytes += (x); \
107 bam_dmux_write_cpy_cnt++; \
108 if (msm_bam_dmux_debug_enable) \
109 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
110 __func__, bam_dmux_write_cpy_cnt, \
111 bam_dmux_write_cpy_bytes); \
112 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700113
114#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
115 bam_dmux_tx_sps_failure_cnt++; \
116} while (0)
117
Eric Holmberg6074aba2012-01-18 17:59:44 -0700118#define DBG_INC_TX_STALL_CNT() do { \
119 bam_dmux_tx_stall_cnt++; \
120} while (0)
121
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700122#define DBG_INC_ACK_OUT_CNT() \
123 atomic_inc(&bam_dmux_ack_out_cnt)
124
125#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
126 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
127
128#define DBG_INC_ACK_IN_CNT() \
129 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130#else
131#define DBG(x...) do { } while (0)
132#define DBG_INC_READ_CNT(x...) do { } while (0)
133#define DBG_INC_WRITE_CNT(x...) do { } while (0)
134#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700135#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700136#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700137#define DBG_INC_ACK_OUT_CNT() do { } while (0)
138#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
139 do { } while (0)
140#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141#endif
142
143struct bam_ch_info {
144 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600145 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 void *priv;
147 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600148 struct platform_device *pdev;
149 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700150 int num_tx_pkts;
151 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152};
153
154struct tx_pkt_info {
155 struct sk_buff *skb;
156 dma_addr_t dma_address;
157 char is_cmd;
158 uint32_t len;
159 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600160 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700161 unsigned ts_sec;
162 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163};
164
165struct rx_pkt_info {
166 struct sk_buff *skb;
167 dma_addr_t dma_address;
168 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600169 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170};
171
172#define A2_NUM_PIPES 6
173#define A2_SUMMING_THRESHOLD 4096
174#define A2_DEFAULT_DESCRIPTORS 32
175#define A2_PHYS_BASE 0x124C2000
176#define A2_PHYS_SIZE 0x2000
177#define BUFFER_SIZE 2048
178#define NUM_BUFFERS 32
Jeff Hugo7bf02052012-08-21 14:08:20 -0600179
180#ifndef A2_BAM_IRQ
181#define A2_BAM_IRQ -1
182#endif
183
184static void *a2_phys_base;
185static uint32_t a2_phys_size;
186static int a2_bam_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600188static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189static struct sps_pipe *bam_tx_pipe;
190static struct sps_pipe *bam_rx_pipe;
191static struct sps_connect tx_connection;
192static struct sps_connect rx_connection;
193static struct sps_mem_buffer tx_desc_mem_buf;
194static struct sps_mem_buffer rx_desc_mem_buf;
195static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600196static struct sps_register_event rx_register_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197
198static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
199static int bam_mux_initialized;
200
Jeff Hugo949080a2011-08-30 11:58:56 -0600201static int polling_mode;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -0700202static unsigned long rx_timer_interval;
Jeff Hugo949080a2011-08-30 11:58:56 -0600203
204static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600205static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700206static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600207static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600208static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600209static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211struct bam_mux_hdr {
212 uint16_t magic_num;
213 uint8_t reserved;
214 uint8_t cmd;
215 uint8_t pad_len;
216 uint8_t ch_id;
217 uint16_t pkt_len;
218};
219
Jeff Hugod98b1082011-10-24 10:30:23 -0600220static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221static void bam_mux_write_done(struct work_struct *work);
222static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600223static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224
Jeff Hugo949080a2011-08-30 11:58:56 -0600225static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226
227static struct workqueue_struct *bam_mux_rx_workqueue;
228static struct workqueue_struct *bam_mux_tx_workqueue;
229
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600230/* A2 power collaspe */
231#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600232#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600233static void toggle_apps_ack(void);
234static void reconnect_to_bam(void);
235static void disconnect_to_bam(void);
236static void ul_wakeup(void);
237static void ul_timeout(struct work_struct *work);
238static void vote_dfab(void);
239static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600240static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700241static void grab_wakelock(void);
242static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600243
244static int bam_is_connected;
245static DEFINE_MUTEX(wakeup_lock);
246static struct completion ul_wakeup_ack_completion;
247static struct completion bam_connection_completion;
248static struct delayed_work ul_timeout_work;
249static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700250static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800251static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600252static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600253static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600254static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700255static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700256static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700257static int a2_pc_disabled;
258static DEFINE_MUTEX(dfab_status_lock);
259static int dfab_is_on;
260static int wait_for_dfab;
261static struct completion dfab_unvote_completion;
262static DEFINE_SPINLOCK(wakelock_reference_lock);
263static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700264static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600265static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600266static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600267static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600268static DEFINE_MUTEX(delayed_ul_vote_lock);
269static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600270static int power_management_only_mode;
Jeff Hugocb798022012-04-09 14:55:40 -0600271
272struct outside_notify_func {
273 void (*notify)(void *, int, unsigned long);
274 void *priv;
275 struct list_head list_node;
276};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600277/* End A2 power collaspe */
278
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600279/* subsystem restart */
280static int restart_notifier_cb(struct notifier_block *this,
281 unsigned long code,
282 void *data);
283
284static struct notifier_block restart_notifier = {
285 .notifier_call = restart_notifier_cb,
286};
287static int in_global_reset;
288/* end subsystem restart */
289
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290#define bam_ch_is_open(x) \
291 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
292
293#define bam_ch_is_local_open(x) \
294 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
295
296#define bam_ch_is_remote_open(x) \
297 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
298
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600299#define bam_ch_is_in_reset(x) \
300 (bam_ch[(x)].status & BAM_CH_IN_RESET)
301
Eric Holmberg878923a2012-01-10 14:28:19 -0700302#define LOG_MESSAGE_MAX_SIZE 80
303struct kfifo bam_dmux_state_log;
304static uint32_t bam_dmux_state_logging_disabled;
305static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
306static int bam_dmux_uplink_vote;
307static int bam_dmux_power_state;
308
Jeff Hugod7d2b062012-07-24 14:29:56 -0600309static void bam_dmux_log(const char *fmt, ...)
310 __printf(1, 2);
311
Eric Holmberg878923a2012-01-10 14:28:19 -0700312
313#define DMUX_LOG_KERR(fmt...) \
314do { \
315 bam_dmux_log(fmt); \
316 pr_err(fmt); \
317} while (0)
318
319/**
320 * Log a state change along with a small message.
321 *
322 * Complete size of messsage is limited to @todo.
323 */
324static void bam_dmux_log(const char *fmt, ...)
325{
326 char buff[LOG_MESSAGE_MAX_SIZE];
327 unsigned long flags;
328 va_list arg_list;
329 unsigned long long t_now;
330 unsigned long nanosec_rem;
331 int len = 0;
332
333 if (bam_dmux_state_logging_disabled)
334 return;
335
336 t_now = sched_clock();
337 nanosec_rem = do_div(t_now, 1000000000U);
338
339 /*
340 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700341 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700342 * R: 1 = in global reset
343 * P: 1 = BAM is powered up
344 * A: 1 = BAM initialized and ready for data
345 *
346 * V: 1 = Uplink vote for power
347 * U: 1 = Uplink active
348 * W: 1 = Uplink Wait-for-ack
349 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700350 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600351 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700352 */
353 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600354 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700355 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700356 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700357 in_global_reset ? 'R' : 'r',
358 bam_dmux_power_state ? 'P' : 'p',
359 bam_connection_is_active ? 'A' : 'a',
360 bam_dmux_uplink_vote ? 'V' : 'v',
361 bam_is_connected ? 'U' : 'u',
362 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700363 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600364 atomic_read(&ul_ondemand_vote),
365 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700366 );
367
368 va_start(arg_list, fmt);
369 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
370 va_end(arg_list);
371 memset(buff + len, 0x0, sizeof(buff) - len);
372
373 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
374 if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
375 char junk[LOG_MESSAGE_MAX_SIZE];
376 int ret;
377
378 ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
379 if (ret != LOG_MESSAGE_MAX_SIZE) {
380 pr_err("%s: unable to empty log %d\n", __func__, ret);
381 spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
382 flags);
383 return;
384 }
385 }
386 kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
387 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
388}
389
390static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
391{
392 unsigned long long t_now;
393
394 t_now = sched_clock();
395 pkt->ts_nsec = do_div(t_now, 1000000000U);
396 pkt->ts_sec = (unsigned)t_now;
397}
398
399static inline void verify_tx_queue_is_empty(const char *func)
400{
401 unsigned long flags;
402 struct tx_pkt_info *info;
403 int reported = 0;
404
405 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
406 list_for_each_entry(info, &bam_tx_pool, list_node) {
407 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700408 bam_dmux_log("%s: tx pool not empty\n", func);
409 if (!in_global_reset)
410 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700411 reported = 1;
412 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700413 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
414 &info->list_node, info->ts_sec, info->ts_nsec);
415 if (!in_global_reset)
416 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
417 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700418 }
419 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
420}
421
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422static void queue_rx(void)
423{
424 void *ptr;
425 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700426 int ret;
427 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600428
Jeff Hugoc9749932011-11-02 17:50:40 -0600429 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700430 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600431 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600432
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700433 while (rx_len_cached < NUM_BUFFERS) {
434 if (in_global_reset)
435 goto fail;
436
437 info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
438 if (!info) {
439 pr_err("%s: unable to alloc rx_pkt_info\n", __func__);
440 goto fail;
441 }
442
443 INIT_WORK(&info->work, handle_bam_mux_cmd);
444
445 info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
446 if (info->skb == NULL) {
447 DMUX_LOG_KERR("%s: unable to alloc skb\n", __func__);
448 goto fail_info;
449 }
450 ptr = skb_put(info->skb, BUFFER_SIZE);
451
452 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
453 DMA_FROM_DEVICE);
454 if (info->dma_address == 0 || info->dma_address == ~0) {
455 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
456 __func__, (void *)info->dma_address, ptr);
457 goto fail_skb;
458 }
459
460 mutex_lock(&bam_rx_pool_mutexlock);
461 list_add_tail(&info->list_node, &bam_rx_pool);
462 rx_len_cached = ++bam_rx_pool_len;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700463 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
464 BUFFER_SIZE, info,
465 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700466 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600467 list_del(&info->list_node);
468 rx_len_cached = --bam_rx_pool_len;
469 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700470 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
471 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600472
473 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
474 DMA_FROM_DEVICE);
475
476 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700477 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600478 mutex_unlock(&bam_rx_pool_mutexlock);
479
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700480 }
481 return;
482
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700483fail_skb:
484 dev_kfree_skb_any(info->skb);
485
486fail_info:
487 kfree(info);
488
489fail:
490 if (rx_len_cached == 0) {
491 DMUX_LOG_KERR("%s: RX queue failure\n", __func__);
492 in_global_reset = 1;
493 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494}
495
496static void bam_mux_process_data(struct sk_buff *rx_skb)
497{
498 unsigned long flags;
499 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600500 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501
502 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
503
504 rx_skb->data = (unsigned char *)(rx_hdr + 1);
505 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
506 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600507 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600509 event_data = (unsigned long)(rx_skb);
510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600512 if (bam_ch[rx_hdr->ch_id].notify)
513 bam_ch[rx_hdr->ch_id].notify(
514 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
515 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 else
517 dev_kfree_skb_any(rx_skb);
518 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
519
520 queue_rx();
521}
522
Eric Holmberg006057d2012-01-11 10:10:42 -0700523static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
524{
525 unsigned long flags;
526 int ret;
527
Eric Holmberga623da82012-07-12 09:37:09 -0600528 mutex_lock(&bam_pdev_mutexlock);
529 if (in_global_reset) {
530 bam_dmux_log("%s: open cid %d aborted due to ssr\n",
531 __func__, rx_hdr->ch_id);
532 mutex_unlock(&bam_pdev_mutexlock);
533 queue_rx();
534 return;
535 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700536 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
537 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
538 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
539 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700540 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
541 if (ret)
542 pr_err("%s: platform_device_add() error: %d\n",
543 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600544 mutex_unlock(&bam_pdev_mutexlock);
545 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700546}
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548static void handle_bam_mux_cmd(struct work_struct *work)
549{
550 unsigned long flags;
551 struct bam_mux_hdr *rx_hdr;
552 struct rx_pkt_info *info;
553 struct sk_buff *rx_skb;
554
555 info = container_of(work, struct rx_pkt_info, work);
556 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600557 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 kfree(info);
559
560 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
561
562 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
563 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
564 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
565 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
566 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700567 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
568 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 " pad %d ch %d len %d\n", __func__,
570 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
571 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
572 dev_kfree_skb_any(rx_skb);
573 queue_rx();
574 return;
575 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700576
577 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700578 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
579 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700580 " pad %d ch %d len %d\n", __func__,
581 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
582 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
583 dev_kfree_skb_any(rx_skb);
584 queue_rx();
585 return;
586 }
587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 switch (rx_hdr->cmd) {
589 case BAM_MUX_HDR_CMD_DATA:
590 DBG_INC_READ_CNT(rx_hdr->pkt_len);
591 bam_mux_process_data(rx_skb);
592 break;
593 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700594 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700595 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700596 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600597 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
Jeff Hugod7d2b062012-07-24 14:29:56 -0600598 bam_dmux_log("%s: deactivating disconnect ack\n",
599 __func__);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600600 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600601 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700602 dev_kfree_skb_any(rx_skb);
603 break;
604 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
605 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
606 rx_hdr->ch_id);
607
608 if (!a2_pc_disabled) {
609 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700610 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700611 }
612
613 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600614 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 break;
616 case BAM_MUX_HDR_CMD_CLOSE:
617 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700618 bam_dmux_log("%s: closing cid %d\n", __func__,
619 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600620 mutex_lock(&bam_pdev_mutexlock);
621 if (in_global_reset) {
622 bam_dmux_log("%s: close cid %d aborted due to ssr\n",
623 __func__, rx_hdr->ch_id);
624 mutex_unlock(&bam_pdev_mutexlock);
625 break;
626 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
628 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
629 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600630 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
631 bam_ch[rx_hdr->ch_id].pdev =
632 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
633 if (!bam_ch[rx_hdr->ch_id].pdev)
634 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600635 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600636 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600637 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 break;
639 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700640 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
641 " reserved %d cmd %d pad %d ch %d len %d\n",
642 __func__, rx_hdr->magic_num, rx_hdr->reserved,
643 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
644 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 dev_kfree_skb_any(rx_skb);
646 queue_rx();
647 return;
648 }
649}
650
651static int bam_mux_write_cmd(void *data, uint32_t len)
652{
653 int rc;
654 struct tx_pkt_info *pkt;
655 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700656 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600658 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 if (pkt == NULL) {
660 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
661 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662 return rc;
663 }
664
665 dma_address = dma_map_single(NULL, data, len,
666 DMA_TO_DEVICE);
667 if (!dma_address) {
668 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700669 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 return rc;
672 }
673 pkt->skb = (struct sk_buff *)(data);
674 pkt->len = len;
675 pkt->dma_address = dma_address;
676 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700677 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600678 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700679 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600680 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
682 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600683 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700684 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
685 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600686 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700687 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700688 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700689 dma_unmap_single(NULL, pkt->dma_address,
690 pkt->len,
691 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600692 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700693 } else {
694 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600695 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600697 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 return rc;
699}
700
701static void bam_mux_write_done(struct work_struct *work)
702{
703 struct sk_buff *skb;
704 struct bam_mux_hdr *hdr;
705 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700706 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600707 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700708 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600710 if (in_global_reset)
711 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700714
715 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
716 info_expected = list_first_entry(&bam_tx_pool,
717 struct tx_pkt_info, list_node);
718 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700719 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700720
Eric Holmberg878923a2012-01-10 14:28:19 -0700721 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
722 " list_node=%p, ts=%u.%09lu\n",
723 __func__, bam_tx_pool.next, &info->list_node,
724 info->ts_sec, info->ts_nsec
725 );
726
727 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
728 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
729 &errant_pkt->list_node, errant_pkt->ts_sec,
730 errant_pkt->ts_nsec);
731
732 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700733 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
734 BUG();
735 }
736 list_del(&info->list_node);
737 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
738
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600739 if (info->is_cmd) {
740 kfree(info->skb);
741 kfree(info);
742 return;
743 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 skb = info->skb;
745 kfree(info);
746 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700747 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600748 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700749 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
750 bam_ch[hdr->ch_id].num_tx_pkts--;
751 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600752 if (bam_ch[hdr->ch_id].notify)
753 bam_ch[hdr->ch_id].notify(
754 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
755 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 else
757 dev_kfree_skb_any(skb);
758}
759
760int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
761{
762 int rc = 0;
763 struct bam_mux_hdr *hdr;
764 unsigned long flags;
765 struct sk_buff *new_skb = NULL;
766 dma_addr_t dma_address;
767 struct tx_pkt_info *pkt;
768
769 if (id >= BAM_DMUX_NUM_CHANNELS)
770 return -EINVAL;
771 if (!skb)
772 return -EINVAL;
773 if (!bam_mux_initialized)
774 return -ENODEV;
775
776 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
777 spin_lock_irqsave(&bam_ch[id].lock, flags);
778 if (!bam_ch_is_open(id)) {
779 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
780 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
781 return -ENODEV;
782 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700783
784 if (bam_ch[id].use_wm &&
785 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
786 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
787 pr_err("%s: watermark exceeded: %d\n", __func__, id);
788 return -EAGAIN;
789 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
791
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600792 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600793 if (!bam_is_connected) {
794 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600795 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700796 if (unlikely(in_global_reset == 1))
797 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600798 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600799 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600800 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600801
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 /* if skb do not have any tailroom for padding,
803 copy the skb into a new expanded skb */
804 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
805 /* revisit, probably dev_alloc_skb and memcpy is effecient */
806 new_skb = skb_copy_expand(skb, skb_headroom(skb),
807 4 - (skb->len & 0x3), GFP_ATOMIC);
808 if (new_skb == NULL) {
809 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600810 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 }
812 dev_kfree_skb_any(skb);
813 skb = new_skb;
814 DBG_INC_WRITE_CPY(skb->len);
815 }
816
817 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
818
819 /* caller should allocate for hdr and padding
820 hdr is fine, padding is tricky */
821 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
822 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
823 hdr->reserved = 0;
824 hdr->ch_id = id;
825 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
826 if (skb->len & 0x3)
827 skb_put(skb, 4 - (skb->len & 0x3));
828
829 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
830
831 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
832 __func__, skb->data, skb->tail, skb->len,
833 hdr->pkt_len, hdr->pad_len);
834
835 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
836 if (pkt == NULL) {
837 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600838 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 }
840
841 dma_address = dma_map_single(NULL, skb->data, skb->len,
842 DMA_TO_DEVICE);
843 if (!dma_address) {
844 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600845 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 }
847 pkt->skb = skb;
848 pkt->dma_address = dma_address;
849 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700850 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700852 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600853 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
855 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600856 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700857 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
858 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600859 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700860 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700861 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700862 dma_unmap_single(NULL, pkt->dma_address,
863 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600864 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700865 if (new_skb)
866 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700867 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700868 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700869 spin_lock_irqsave(&bam_ch[id].lock, flags);
870 bam_ch[id].num_tx_pkts++;
871 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600872 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600873 ul_packet_written = 1;
874 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600876
877write_fail3:
878 kfree(pkt);
879write_fail2:
880 if (new_skb)
881 dev_kfree_skb_any(new_skb);
882write_fail:
883 read_unlock(&ul_wakeup_lock);
884 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885}
886
887int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600888 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889{
890 struct bam_mux_hdr *hdr;
891 unsigned long flags;
892 int rc = 0;
893
894 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700895 if (!bam_mux_initialized) {
896 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700898 }
899 if (id >= BAM_DMUX_NUM_CHANNELS) {
900 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700902 }
903 if (notify == NULL) {
904 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600905 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700906 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907
908 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
909 if (hdr == NULL) {
910 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
911 return -ENOMEM;
912 }
913 spin_lock_irqsave(&bam_ch[id].lock, flags);
914 if (bam_ch_is_open(id)) {
915 DBG("%s: Already opened %d\n", __func__, id);
916 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
917 kfree(hdr);
918 goto open_done;
919 }
920 if (!bam_ch_is_remote_open(id)) {
921 DBG("%s: Remote not open; ch: %d\n", __func__, id);
922 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
923 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700924 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 }
926
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600927 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 bam_ch[id].priv = priv;
929 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700930 bam_ch[id].num_tx_pkts = 0;
931 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
933
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600934 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600935 if (!bam_is_connected) {
936 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600937 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700938 if (unlikely(in_global_reset == 1))
939 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600940 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600941 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600942 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
945 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
946 hdr->reserved = 0;
947 hdr->ch_id = id;
948 hdr->pkt_len = 0;
949 hdr->pad_len = 0;
950
951 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600952 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953
954open_done:
955 DBG("%s: opened ch %d\n", __func__, id);
956 return rc;
957}
958
959int msm_bam_dmux_close(uint32_t id)
960{
961 struct bam_mux_hdr *hdr;
962 unsigned long flags;
963 int rc;
964
965 if (id >= BAM_DMUX_NUM_CHANNELS)
966 return -EINVAL;
967 DBG("%s: closing ch %d\n", __func__, id);
968 if (!bam_mux_initialized)
969 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600971 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600972 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600973 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600974 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700975 if (unlikely(in_global_reset == 1))
976 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600977 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600978 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600979 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600980
Jeff Hugo061ce672011-10-21 17:15:32 -0600981 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600982 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 bam_ch[id].priv = NULL;
984 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
985 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
986
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600987 if (bam_ch_is_in_reset(id)) {
988 read_unlock(&ul_wakeup_lock);
989 bam_ch[id].status &= ~BAM_CH_IN_RESET;
990 return 0;
991 }
992
Jeff Hugobb5802f2011-11-02 17:10:29 -0600993 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994 if (hdr == NULL) {
995 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600996 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997 return -ENOMEM;
998 }
999 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
1000 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
1001 hdr->reserved = 0;
1002 hdr->ch_id = id;
1003 hdr->pkt_len = 0;
1004 hdr->pad_len = 0;
1005
1006 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001007 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008
1009 DBG("%s: closed ch %d\n", __func__, id);
1010 return rc;
1011}
1012
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001013int msm_bam_dmux_is_ch_full(uint32_t id)
1014{
1015 unsigned long flags;
1016 int ret;
1017
1018 if (id >= BAM_DMUX_NUM_CHANNELS)
1019 return -EINVAL;
1020
1021 spin_lock_irqsave(&bam_ch[id].lock, flags);
1022 bam_ch[id].use_wm = 1;
1023 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1024 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1025 id, bam_ch[id].num_tx_pkts, ret);
1026 if (!bam_ch_is_local_open(id)) {
1027 ret = -ENODEV;
1028 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1029 }
1030 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1031
1032 return ret;
1033}
1034
1035int msm_bam_dmux_is_ch_low(uint32_t id)
1036{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001037 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001038 int ret;
1039
1040 if (id >= BAM_DMUX_NUM_CHANNELS)
1041 return -EINVAL;
1042
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001043 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001044 bam_ch[id].use_wm = 1;
1045 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1046 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1047 id, bam_ch[id].num_tx_pkts, ret);
1048 if (!bam_ch_is_local_open(id)) {
1049 ret = -ENODEV;
1050 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1051 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001052 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001053
1054 return ret;
1055}
1056
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001057static void rx_switch_to_interrupt_mode(void)
1058{
1059 struct sps_connect cur_rx_conn;
1060 struct sps_iovec iov;
1061 struct rx_pkt_info *info;
1062 int ret;
1063
1064 /*
1065 * Attempt to enable interrupts - if this fails,
1066 * continue polling and we will retry later.
1067 */
1068 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1069 if (ret) {
1070 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1071 goto fail;
1072 }
1073
1074 rx_register_event.options = SPS_O_EOT;
1075 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1076 if (ret) {
1077 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1078 goto fail;
1079 }
1080
1081 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1082 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1083 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1084 if (ret) {
1085 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1086 goto fail;
1087 }
1088 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001089 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001090
1091 /* handle any rx packets before interrupt was enabled */
1092 while (bam_connection_is_active && !polling_mode) {
1093 ret = sps_get_iovec(bam_rx_pipe, &iov);
1094 if (ret) {
1095 pr_err("%s: sps_get_iovec failed %d\n",
1096 __func__, ret);
1097 break;
1098 }
1099 if (iov.addr == 0)
1100 break;
1101
1102 mutex_lock(&bam_rx_pool_mutexlock);
1103 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001104 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1105 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001106 mutex_unlock(&bam_rx_pool_mutexlock);
1107 continue;
1108 }
1109 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1110 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001111 if (info->dma_address != iov.addr) {
1112 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1113 __func__,
1114 (void *)iov.addr,
1115 (void *)info->dma_address);
1116 list_for_each_entry(info, &bam_rx_pool, list_node) {
1117 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1118 (void *)info->dma_address);
1119 if (iov.addr == info->dma_address)
1120 break;
1121 }
1122 }
1123 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001124 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001125 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001126 mutex_unlock(&bam_rx_pool_mutexlock);
1127 handle_bam_mux_cmd(&info->work);
1128 }
1129 return;
1130
1131fail:
1132 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001133 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001134}
1135
Jeff Hugo949080a2011-08-30 11:58:56 -06001136static void rx_timer_work_func(struct work_struct *work)
1137{
1138 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001139 struct rx_pkt_info *info;
1140 int inactive_cycles = 0;
1141 int ret;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001142 u32 buffs_unused, buffs_used;
Jeff Hugo949080a2011-08-30 11:58:56 -06001143
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001144 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001145 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001146 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001147 if (in_global_reset)
1148 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001149
1150 ret = sps_get_iovec(bam_rx_pipe, &iov);
1151 if (ret) {
1152 pr_err("%s: sps_get_iovec failed %d\n",
1153 __func__, ret);
1154 break;
1155 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001156 if (iov.addr == 0)
1157 break;
1158 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001159 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001160 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001161 DMUX_LOG_KERR(
1162 "%s: have iovec %p but rx pool empty\n",
1163 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001164 mutex_unlock(&bam_rx_pool_mutexlock);
1165 continue;
1166 }
1167 info = list_first_entry(&bam_rx_pool,
1168 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001169 if (info->dma_address != iov.addr) {
1170 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1171 __func__,
1172 (void *)iov.addr,
1173 (void *)info->dma_address);
1174 list_for_each_entry(info, &bam_rx_pool,
1175 list_node) {
1176 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1177 (void *)info->dma_address);
1178 if (iov.addr == info->dma_address)
1179 break;
1180 }
1181 }
1182 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001183 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001184 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001185 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001186 handle_bam_mux_cmd(&info->work);
1187 }
1188
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001189 if (inactive_cycles >= POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001190 rx_switch_to_interrupt_mode();
1191 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001192 }
1193
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001194 if (bam_adaptive_timer_enabled) {
1195 usleep_range(rx_timer_interval, rx_timer_interval + 50);
1196
1197 ret = sps_get_unused_desc_num(bam_rx_pipe,
1198 &buffs_unused);
1199
1200 if (ret) {
1201 pr_err("%s: error getting num buffers unused after sleep\n",
1202 __func__);
1203
1204 break;
1205 }
1206
1207 buffs_used = NUM_BUFFERS - buffs_unused;
1208
1209 if (buffs_unused == 0) {
1210 rx_timer_interval = MIN_POLLING_SLEEP;
1211 } else {
1212 if (buffs_used > 0) {
1213 rx_timer_interval =
1214 (2 * NUM_BUFFERS *
1215 rx_timer_interval)/
1216 (3 * buffs_used);
1217 } else {
1218 rx_timer_interval =
1219 MAX_POLLING_SLEEP;
1220 }
1221 }
1222
1223 if (rx_timer_interval > MAX_POLLING_SLEEP)
1224 rx_timer_interval = MAX_POLLING_SLEEP;
1225 else if (rx_timer_interval < MIN_POLLING_SLEEP)
1226 rx_timer_interval = MIN_POLLING_SLEEP;
1227 } else {
1228 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1229 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001230 }
1231}
1232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233static void bam_mux_tx_notify(struct sps_event_notify *notify)
1234{
1235 struct tx_pkt_info *pkt;
1236
1237 DBG("%s: event %d notified\n", __func__, notify->event_id);
1238
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001239 if (in_global_reset)
1240 return;
1241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 switch (notify->event_id) {
1243 case SPS_EVENT_EOT:
1244 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001245 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246 dma_unmap_single(NULL, pkt->dma_address,
1247 pkt->skb->len,
1248 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001249 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 dma_unmap_single(NULL, pkt->dma_address,
1251 pkt->len,
1252 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001253 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 break;
1255 default:
1256 pr_err("%s: recieved unexpected event id %d\n", __func__,
1257 notify->event_id);
1258 }
1259}
1260
Jeff Hugo33dbc002011-08-25 15:52:53 -06001261static void bam_mux_rx_notify(struct sps_event_notify *notify)
1262{
Jeff Hugo949080a2011-08-30 11:58:56 -06001263 int ret;
1264 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001265
1266 DBG("%s: event %d notified\n", __func__, notify->event_id);
1267
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001268 if (in_global_reset)
1269 return;
1270
Jeff Hugo33dbc002011-08-25 15:52:53 -06001271 switch (notify->event_id) {
1272 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001273 /* attempt to disable interrupts in this pipe */
1274 if (!polling_mode) {
1275 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1276 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001277 pr_err("%s: sps_get_config() failed %d, interrupts"
1278 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001279 break;
1280 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001281 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001282 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1283 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1284 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001285 pr_err("%s: sps_set_config() failed %d, interrupts"
1286 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001287 break;
1288 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001289 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001290 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001291 /*
1292 * run on core 0 so that netif_rx() in rmnet uses only
1293 * one queue
1294 */
1295 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001296 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001297 break;
1298 default:
1299 pr_err("%s: recieved unexpected event id %d\n", __func__,
1300 notify->event_id);
1301 }
1302}
1303
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304#ifdef CONFIG_DEBUG_FS
1305
1306static int debug_tbl(char *buf, int max)
1307{
1308 int i = 0;
1309 int j;
1310
1311 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1312 i += scnprintf(buf + i, max - i,
1313 "ch%02d local open=%s remote open=%s\n",
1314 j, bam_ch_is_local_open(j) ? "Y" : "N",
1315 bam_ch_is_remote_open(j) ? "Y" : "N");
1316 }
1317
1318 return i;
1319}
1320
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001321static int debug_ul_pkt_cnt(char *buf, int max)
1322{
1323 struct list_head *p;
1324 unsigned long flags;
1325 int n = 0;
1326
1327 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1328 __list_for_each(p, &bam_tx_pool) {
1329 ++n;
1330 }
1331 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1332
1333 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1334}
1335
1336static int debug_stats(char *buf, int max)
1337{
1338 int i = 0;
1339
1340 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001341 "skb read cnt: %u\n"
1342 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001343 "skb copy cnt: %u\n"
1344 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001345 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001346 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001347 "rx queue len: %d\n"
1348 "a2 ack out cnt: %d\n"
1349 "a2 ack in cnt: %d\n"
1350 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001351 bam_dmux_read_cnt,
1352 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001353 bam_dmux_write_cpy_cnt,
1354 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001355 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001356 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001357 bam_rx_pool_len,
1358 atomic_read(&bam_dmux_ack_out_cnt),
1359 atomic_read(&bam_dmux_ack_in_cnt),
1360 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001361 );
1362
1363 return i;
1364}
1365
Eric Holmberg878923a2012-01-10 14:28:19 -07001366static int debug_log(char *buff, int max, loff_t *ppos)
1367{
1368 unsigned long flags;
1369 int i = 0;
1370
1371 if (bam_dmux_state_logging_disabled) {
1372 i += scnprintf(buff - i, max - i, "Logging disabled\n");
1373 return i;
1374 }
1375
1376 if (*ppos == 0) {
1377 i += scnprintf(buff - i, max - i,
1378 "<DMUX> timestamp FLAGS [Message]\n"
1379 "FLAGS:\n"
Eric Holmberg006057d2012-01-11 10:10:42 -07001380 "\tD: 1 = Power collapse disabled\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001381 "\tR: 1 = in global reset\n"
1382 "\tP: 1 = BAM is powered up\n"
1383 "\tA: 1 = BAM initialized and ready for data\n"
1384 "\n"
1385 "\tV: 1 = Uplink vote for power\n"
1386 "\tU: 1 = Uplink active\n"
1387 "\tW: 1 = Uplink Wait-for-ack\n"
1388 "\tA: 1 = Uplink ACK received\n"
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001389 "\t#: >=1 On-demand uplink vote\n"
Jeff Hugo0b13a352012-03-17 23:18:30 -06001390 "\tD: 1 = Disconnect ACK active\n"
Eric Holmberg878923a2012-01-10 14:28:19 -07001391 );
1392 buff += i;
1393 }
1394
1395 spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
1396 while (kfifo_len(&bam_dmux_state_log)
1397 && (i + LOG_MESSAGE_MAX_SIZE) < max) {
1398 int k_len;
1399 k_len = kfifo_out(&bam_dmux_state_log,
1400 buff, LOG_MESSAGE_MAX_SIZE);
1401 if (k_len != LOG_MESSAGE_MAX_SIZE) {
1402 pr_err("%s: retrieve failure %d\n", __func__, k_len);
1403 break;
1404 }
1405
1406 /* keep non-null portion of string and add line break */
1407 k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
1408 buff += k_len;
1409 i += k_len;
1410 if (k_len && *(buff - 1) != '\n') {
1411 *buff++ = '\n';
1412 ++i;
1413 }
1414 }
1415 spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
1416
1417 return i;
1418}
1419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420#define DEBUG_BUFMAX 4096
1421static char debug_buffer[DEBUG_BUFMAX];
1422
1423static ssize_t debug_read(struct file *file, char __user *buf,
1424 size_t count, loff_t *ppos)
1425{
1426 int (*fill)(char *buf, int max) = file->private_data;
1427 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1428 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1429}
1430
Eric Holmberg878923a2012-01-10 14:28:19 -07001431static ssize_t debug_read_multiple(struct file *file, char __user *buff,
1432 size_t count, loff_t *ppos)
1433{
1434 int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
1435 char *buffer;
1436 int bsize;
1437
1438 buffer = kmalloc(count, GFP_KERNEL);
1439 if (!buffer)
1440 return -ENOMEM;
1441
1442 bsize = util_func(buffer, count, ppos);
1443
1444 if (bsize >= 0) {
1445 if (copy_to_user(buff, buffer, bsize)) {
1446 kfree(buffer);
1447 return -EFAULT;
1448 }
1449 *ppos += bsize;
1450 }
1451 kfree(buffer);
1452 return bsize;
1453}
1454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455static int debug_open(struct inode *inode, struct file *file)
1456{
1457 file->private_data = inode->i_private;
1458 return 0;
1459}
1460
1461
1462static const struct file_operations debug_ops = {
1463 .read = debug_read,
1464 .open = debug_open,
1465};
1466
Eric Holmberg878923a2012-01-10 14:28:19 -07001467static const struct file_operations debug_ops_multiple = {
1468 .read = debug_read_multiple,
1469 .open = debug_open,
1470};
1471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472static void debug_create(const char *name, mode_t mode,
1473 struct dentry *dent,
1474 int (*fill)(char *buf, int max))
1475{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001476 struct dentry *file;
1477
1478 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1479 if (IS_ERR(file))
1480 pr_err("%s: debugfs create failed %d\n", __func__,
1481 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482}
1483
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001484static void debug_create_multiple(const char *name, mode_t mode,
1485 struct dentry *dent,
1486 int (*fill)(char *buf, int max, loff_t *ppos))
1487{
1488 struct dentry *file;
1489
1490 file = debugfs_create_file(name, mode, dent, fill, &debug_ops_multiple);
1491 if (IS_ERR(file))
1492 pr_err("%s: debugfs create failed %d\n", __func__,
1493 (int)PTR_ERR(file));
1494}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001495#endif
1496
Jeff Hugod98b1082011-10-24 10:30:23 -06001497static void notify_all(int event, unsigned long data)
1498{
1499 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001500 struct list_head *temp;
1501 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001502
1503 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001504 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001505 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001506 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1507 __func__, i, event, data);
1508 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001509 }
Jeff Hugocb798022012-04-09 14:55:40 -06001510
1511 __list_for_each(temp, &bam_other_notify_funcs) {
1512 func = container_of(temp, struct outside_notify_func,
1513 list_node);
1514 func->notify(func->priv, event, data);
1515 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001516}
1517
1518static void kickoff_ul_wakeup_func(struct work_struct *work)
1519{
1520 read_lock(&ul_wakeup_lock);
1521 if (!bam_is_connected) {
1522 read_unlock(&ul_wakeup_lock);
1523 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001524 if (unlikely(in_global_reset == 1))
1525 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001526 read_lock(&ul_wakeup_lock);
1527 ul_packet_written = 1;
1528 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1529 }
1530 read_unlock(&ul_wakeup_lock);
1531}
1532
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001533int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001534{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001535 int is_connected;
1536
1537 read_lock(&ul_wakeup_lock);
1538 ul_packet_written = 1;
1539 is_connected = bam_is_connected;
1540 if (!is_connected)
1541 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1542 read_unlock(&ul_wakeup_lock);
1543
1544 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001545}
1546
Eric Holmberg878923a2012-01-10 14:28:19 -07001547static void power_vote(int vote)
1548{
1549 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1550 bam_dmux_uplink_vote, vote);
1551
1552 if (bam_dmux_uplink_vote == vote)
1553 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1554
1555 bam_dmux_uplink_vote = vote;
1556 if (vote)
1557 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1558 else
1559 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1560}
1561
Eric Holmberg454d9da2012-01-12 09:37:14 -07001562/*
1563 * @note: Must be called with ul_wakeup_lock locked.
1564 */
1565static inline void ul_powerdown(void)
1566{
1567 bam_dmux_log("%s: powerdown\n", __func__);
1568 verify_tx_queue_is_empty(__func__);
1569
1570 if (a2_pc_disabled) {
1571 wait_for_dfab = 1;
1572 INIT_COMPLETION(dfab_unvote_completion);
1573 release_wakelock();
1574 } else {
1575 wait_for_ack = 1;
1576 INIT_COMPLETION(ul_wakeup_ack_completion);
1577 power_vote(0);
1578 }
1579 bam_is_connected = 0;
1580 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1581}
1582
1583static inline void ul_powerdown_finish(void)
1584{
1585 if (a2_pc_disabled && wait_for_dfab) {
1586 unvote_dfab();
1587 complete_all(&dfab_unvote_completion);
1588 wait_for_dfab = 0;
1589 }
1590}
1591
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001592/*
1593 * Votes for UL power and returns current power state.
1594 *
1595 * @returns true if currently connected
1596 */
1597int msm_bam_dmux_ul_power_vote(void)
1598{
1599 int is_connected;
1600
1601 read_lock(&ul_wakeup_lock);
1602 atomic_inc(&ul_ondemand_vote);
1603 is_connected = bam_is_connected;
1604 if (!is_connected)
1605 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1606 read_unlock(&ul_wakeup_lock);
1607
1608 return is_connected;
1609}
1610
1611/*
1612 * Unvotes for UL power.
1613 *
1614 * @returns true if vote count is 0 (UL shutdown possible)
1615 */
1616int msm_bam_dmux_ul_power_unvote(void)
1617{
1618 int vote;
1619
1620 read_lock(&ul_wakeup_lock);
1621 vote = atomic_dec_return(&ul_ondemand_vote);
1622 if (unlikely(vote) < 0)
1623 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1624 read_unlock(&ul_wakeup_lock);
1625
1626 return vote == 0;
1627}
1628
Jeff Hugocb798022012-04-09 14:55:40 -06001629int msm_bam_dmux_reg_notify(void *priv,
1630 void (*notify)(void *priv, int event_type,
1631 unsigned long data))
1632{
1633 struct outside_notify_func *func;
1634
1635 if (!notify)
1636 return -EINVAL;
1637
1638 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1639 if (!func)
1640 return -ENOMEM;
1641
1642 func->notify = notify;
1643 func->priv = priv;
1644 list_add(&func->list_node, &bam_other_notify_funcs);
1645
1646 return 0;
1647}
1648
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001649static void ul_timeout(struct work_struct *work)
1650{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001651 unsigned long flags;
1652 int ret;
1653
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001654 if (in_global_reset)
1655 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001656 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1657 if (!ret) { /* failed to grab lock, reschedule and bail */
1658 schedule_delayed_work(&ul_timeout_work,
1659 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1660 return;
1661 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001662 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001663 if (!ul_packet_written) {
1664 spin_lock(&bam_tx_pool_spinlock);
1665 if (!list_empty(&bam_tx_pool)) {
1666 struct tx_pkt_info *info;
1667
1668 info = list_first_entry(&bam_tx_pool,
1669 struct tx_pkt_info, list_node);
1670 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1671 __func__, info->ts_sec, info->ts_nsec);
1672 DBG_INC_TX_STALL_CNT();
1673 ul_packet_written = 1;
1674 }
1675 spin_unlock(&bam_tx_pool_spinlock);
1676 }
1677
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001678 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1679 bam_dmux_log("%s: pkt written %d\n",
1680 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001681 ul_packet_written = 0;
1682 schedule_delayed_work(&ul_timeout_work,
1683 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001684 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001685 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001686 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001687 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001688 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001689 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001690}
Jeff Hugo4838f412012-01-20 11:19:37 -07001691
1692static int ssrestart_check(void)
1693{
Eric Holmberg90285e22012-02-22 12:33:05 -07001694 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1695 in_global_reset = 1;
1696 if (get_restart_level() <= RESET_SOC)
1697 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1698 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001699}
1700
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001701static void ul_wakeup(void)
1702{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001703 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001704 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001705
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001706 mutex_lock(&wakeup_lock);
1707 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001708 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001709 mutex_unlock(&wakeup_lock);
1710 return;
1711 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001712
Jeff Hugoc2696142012-05-03 11:42:13 -06001713 /*
1714 * if someone is voting for UL before bam is inited (modem up first
1715 * time), set flag for init to kickoff ul wakeup once bam is inited
1716 */
1717 mutex_lock(&delayed_ul_vote_lock);
1718 if (unlikely(!bam_mux_initialized)) {
1719 need_delayed_ul_vote = 1;
1720 mutex_unlock(&delayed_ul_vote_lock);
1721 mutex_unlock(&wakeup_lock);
1722 return;
1723 }
1724 mutex_unlock(&delayed_ul_vote_lock);
1725
Eric Holmberg006057d2012-01-11 10:10:42 -07001726 if (a2_pc_disabled) {
1727 /*
1728 * don't grab the wakelock the first time because it is
1729 * already grabbed when a2 powers on
1730 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001731 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001732 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001733 do_vote_dfab = 1; /* vote must occur after wait */
1734 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001735 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001736 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001737 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001738 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001739 &dfab_unvote_completion, HZ);
1740 BUG_ON(ret == 0);
1741 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001742 if (likely(do_vote_dfab))
1743 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001744 schedule_delayed_work(&ul_timeout_work,
1745 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1746 bam_is_connected = 1;
1747 mutex_unlock(&wakeup_lock);
1748 return;
1749 }
1750
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001751 /*
1752 * must wait for the previous power down request to have been acked
1753 * chances are it already came in and this will just fall through
1754 * instead of waiting
1755 */
1756 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001757 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001758 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001759 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001760 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001761 if (unlikely(ret == 0) && ssrestart_check()) {
1762 mutex_unlock(&wakeup_lock);
1763 bam_dmux_log("%s timeout previous ack\n", __func__);
1764 return;
1765 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001766 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001767 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001768 power_vote(1);
1769 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001770 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001771 if (unlikely(ret == 0) && ssrestart_check()) {
1772 mutex_unlock(&wakeup_lock);
1773 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1774 return;
1775 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001776 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001777 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001778 if (unlikely(ret == 0) && ssrestart_check()) {
1779 mutex_unlock(&wakeup_lock);
1780 bam_dmux_log("%s timeout power on\n", __func__);
1781 return;
1782 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001783
1784 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001785 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001786 schedule_delayed_work(&ul_timeout_work,
1787 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1788 mutex_unlock(&wakeup_lock);
1789}
1790
1791static void reconnect_to_bam(void)
1792{
1793 int i;
1794
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001795 in_global_reset = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001796 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001797 if (!power_management_only_mode) {
1798 i = sps_device_reset(a2_device_handle);
1799 if (i)
1800 pr_err("%s: device reset failed rc = %d\n", __func__,
1801 i);
1802 i = sps_connect(bam_tx_pipe, &tx_connection);
1803 if (i)
1804 pr_err("%s: tx connection failed rc = %d\n", __func__,
1805 i);
1806 i = sps_connect(bam_rx_pipe, &rx_connection);
1807 if (i)
1808 pr_err("%s: rx connection failed rc = %d\n", __func__,
1809 i);
1810 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1811 if (i)
1812 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1813 i);
1814 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1815 if (i)
1816 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1817 i);
1818 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001819
1820 bam_connection_is_active = 1;
1821
1822 if (polling_mode)
1823 rx_switch_to_interrupt_mode();
1824
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001825 toggle_apps_ack();
1826 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001827 if (!power_management_only_mode)
1828 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001829}
1830
1831static void disconnect_to_bam(void)
1832{
1833 struct list_head *node;
1834 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001835 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001836
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001837 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001838
1839 /* handle disconnect during active UL */
1840 write_lock_irqsave(&ul_wakeup_lock, flags);
1841 if (bam_is_connected) {
1842 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1843 ul_powerdown();
1844 }
1845 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1846 ul_powerdown_finish();
1847
1848 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001849 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001850 if (!power_management_only_mode) {
1851 sps_disconnect(bam_tx_pipe);
1852 sps_disconnect(bam_rx_pipe);
1853 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1854 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1855 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001856 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001857
1858 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001859 while (!list_empty(&bam_rx_pool)) {
1860 node = bam_rx_pool.next;
1861 list_del(node);
1862 info = container_of(node, struct rx_pkt_info, list_node);
1863 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1864 DMA_FROM_DEVICE);
1865 dev_kfree_skb_any(info->skb);
1866 kfree(info);
1867 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001868 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001869 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001870
Jeff Hugo0b13a352012-03-17 23:18:30 -06001871 if (disconnect_ack)
1872 toggle_apps_ack();
1873
Eric Holmberg878923a2012-01-10 14:28:19 -07001874 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001875}
1876
1877static void vote_dfab(void)
1878{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001879 int rc;
1880
Eric Holmberg006057d2012-01-11 10:10:42 -07001881 bam_dmux_log("%s\n", __func__);
1882 mutex_lock(&dfab_status_lock);
1883 if (dfab_is_on) {
1884 bam_dmux_log("%s: dfab is already on\n", __func__);
1885 mutex_unlock(&dfab_status_lock);
1886 return;
1887 }
Jeff Hugo0c9371a2012-08-09 15:32:49 -06001888 if (dfab_clk) {
1889 rc = clk_prepare_enable(dfab_clk);
1890 if (rc)
1891 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n",
1892 rc);
1893 }
1894 if (xo_clk) {
1895 rc = clk_prepare_enable(xo_clk);
1896 if (rc)
1897 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n",
1898 rc);
1899 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001900 dfab_is_on = 1;
1901 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001902}
1903
1904static void unvote_dfab(void)
1905{
Eric Holmberg006057d2012-01-11 10:10:42 -07001906 bam_dmux_log("%s\n", __func__);
1907 mutex_lock(&dfab_status_lock);
1908 if (!dfab_is_on) {
1909 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1910 dump_stack();
1911 mutex_unlock(&dfab_status_lock);
1912 return;
1913 }
Jeff Hugo0c9371a2012-08-09 15:32:49 -06001914 if (dfab_clk)
1915 clk_disable_unprepare(dfab_clk);
1916 if (xo_clk)
1917 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001918 dfab_is_on = 0;
1919 mutex_unlock(&dfab_status_lock);
1920}
1921
1922/* reference counting wrapper around wakelock */
1923static void grab_wakelock(void)
1924{
1925 unsigned long flags;
1926
1927 spin_lock_irqsave(&wakelock_reference_lock, flags);
1928 bam_dmux_log("%s: ref count = %d\n", __func__,
1929 wakelock_reference_count);
1930 if (wakelock_reference_count == 0)
1931 wake_lock(&bam_wakelock);
1932 ++wakelock_reference_count;
1933 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1934}
1935
1936static void release_wakelock(void)
1937{
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&wakelock_reference_lock, flags);
1941 if (wakelock_reference_count == 0) {
1942 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1943 dump_stack();
1944 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1945 return;
1946 }
1947 bam_dmux_log("%s: ref count = %d\n", __func__,
1948 wakelock_reference_count);
1949 --wakelock_reference_count;
1950 if (wakelock_reference_count == 0)
1951 wake_unlock(&bam_wakelock);
1952 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001953}
1954
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001955static int restart_notifier_cb(struct notifier_block *this,
1956 unsigned long code,
1957 void *data)
1958{
1959 int i;
1960 struct list_head *node;
1961 struct tx_pkt_info *info;
1962 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001963 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001964
1965 if (code != SUBSYS_AFTER_SHUTDOWN)
1966 return NOTIFY_DONE;
1967
Eric Holmberg878923a2012-01-10 14:28:19 -07001968 bam_dmux_log("%s: begin\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001969 in_global_reset = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001970
1971 /* Handle uplink Powerdown */
1972 write_lock_irqsave(&ul_wakeup_lock, flags);
1973 if (bam_is_connected) {
1974 ul_powerdown();
1975 wait_for_ack = 0;
1976 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001977 /*
1978 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1979 * reset to 0. harmless if bam_is_connected check above passes
1980 */
1981 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001982 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1983 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001984 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001985 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugof62029d2012-07-17 13:39:53 -06001986 disconnect_ack = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001987
1988 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001989 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001990 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1991 temp_remote_status = bam_ch_is_remote_open(i);
1992 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001993 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001994 if (bam_ch_is_local_open(i))
1995 bam_ch[i].status |= BAM_CH_IN_RESET;
1996 if (temp_remote_status) {
1997 platform_device_unregister(bam_ch[i].pdev);
1998 bam_ch[i].pdev = platform_device_alloc(
1999 bam_ch[i].name, 2);
2000 }
2001 }
Eric Holmberga623da82012-07-12 09:37:09 -06002002 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07002003
2004 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07002005 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002006 while (!list_empty(&bam_tx_pool)) {
2007 node = bam_tx_pool.next;
2008 list_del(node);
2009 info = container_of(node, struct tx_pkt_info,
2010 list_node);
2011 if (!info->is_cmd) {
2012 dma_unmap_single(NULL, info->dma_address,
2013 info->skb->len,
2014 DMA_TO_DEVICE);
2015 dev_kfree_skb_any(info->skb);
2016 } else {
2017 dma_unmap_single(NULL, info->dma_address,
2018 info->len,
2019 DMA_TO_DEVICE);
2020 kfree(info->skb);
2021 }
2022 kfree(info);
2023 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07002024 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07002025
Eric Holmberg878923a2012-01-10 14:28:19 -07002026 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002027 return NOTIFY_DONE;
2028}
2029
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002030static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002031{
2032 u32 h;
2033 dma_addr_t dma_addr;
2034 int ret;
2035 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002036 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002038 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002039 /* init BAM */
Jeff Hugo7bf02052012-08-21 14:08:20 -06002040 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2041 a2_phys_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 if (!a2_virt_addr) {
2043 pr_err("%s: ioremap failed\n", __func__);
2044 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07002045 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046 }
Jeff Hugo7bf02052012-08-21 14:08:20 -06002047 a2_props.phys_addr = (u32)(a2_phys_base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002049 a2_props.virt_size = a2_phys_size;
2050 a2_props.irq = a2_bam_irq;
Jeff Hugo927cba62011-11-11 11:49:52 -07002051 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 a2_props.num_pipes = A2_NUM_PIPES;
2053 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo75913c82011-12-05 15:59:01 -07002054 if (cpu_is_msm9615())
2055 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002056 /* need to free on tear down */
2057 ret = sps_register_bam_device(&a2_props, &h);
2058 if (ret < 0) {
2059 pr_err("%s: register bam error %d\n", __func__, ret);
2060 goto register_bam_failed;
2061 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002062 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002063
2064 bam_tx_pipe = sps_alloc_endpoint();
2065 if (bam_tx_pipe == NULL) {
2066 pr_err("%s: tx alloc endpoint failed\n", __func__);
2067 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002068 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069 }
2070 ret = sps_get_config(bam_tx_pipe, &tx_connection);
2071 if (ret) {
2072 pr_err("%s: tx get config failed %d\n", __func__, ret);
2073 goto tx_get_config_failed;
2074 }
2075
2076 tx_connection.source = SPS_DEV_HANDLE_MEM;
2077 tx_connection.src_pipe_index = 0;
2078 tx_connection.destination = h;
2079 tx_connection.dest_pipe_index = 4;
2080 tx_connection.mode = SPS_MODE_DEST;
2081 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2082 tx_desc_mem_buf.size = 0x800; /* 2k */
2083 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2084 &dma_addr, 0);
2085 if (tx_desc_mem_buf.base == NULL) {
2086 pr_err("%s: tx memory alloc failed\n", __func__);
2087 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002088 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 }
2090 tx_desc_mem_buf.phys_base = dma_addr;
2091 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2092 tx_connection.desc = tx_desc_mem_buf;
2093 tx_connection.event_thresh = 0x10;
2094
2095 ret = sps_connect(bam_tx_pipe, &tx_connection);
2096 if (ret < 0) {
2097 pr_err("%s: tx connect error %d\n", __func__, ret);
2098 goto tx_connect_failed;
2099 }
2100
2101 bam_rx_pipe = sps_alloc_endpoint();
2102 if (bam_rx_pipe == NULL) {
2103 pr_err("%s: rx alloc endpoint failed\n", __func__);
2104 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002105 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002106 }
2107 ret = sps_get_config(bam_rx_pipe, &rx_connection);
2108 if (ret) {
2109 pr_err("%s: rx get config failed %d\n", __func__, ret);
2110 goto rx_get_config_failed;
2111 }
2112
2113 rx_connection.source = h;
2114 rx_connection.src_pipe_index = 5;
2115 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2116 rx_connection.dest_pipe_index = 1;
2117 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002118 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2119 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120 rx_desc_mem_buf.size = 0x800; /* 2k */
2121 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2122 &dma_addr, 0);
2123 if (rx_desc_mem_buf.base == NULL) {
2124 pr_err("%s: rx memory alloc failed\n", __func__);
2125 ret = -ENOMEM;
2126 goto rx_mem_failed;
2127 }
2128 rx_desc_mem_buf.phys_base = dma_addr;
2129 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2130 rx_connection.desc = rx_desc_mem_buf;
2131 rx_connection.event_thresh = 0x10;
2132
2133 ret = sps_connect(bam_rx_pipe, &rx_connection);
2134 if (ret < 0) {
2135 pr_err("%s: rx connect error %d\n", __func__, ret);
2136 goto rx_connect_failed;
2137 }
2138
2139 tx_register_event.options = SPS_O_EOT;
2140 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2141 tx_register_event.xfer_done = NULL;
2142 tx_register_event.callback = bam_mux_tx_notify;
2143 tx_register_event.user = NULL;
2144 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2145 if (ret < 0) {
2146 pr_err("%s: tx register event error %d\n", __func__, ret);
2147 goto rx_event_reg_failed;
2148 }
2149
Jeff Hugo33dbc002011-08-25 15:52:53 -06002150 rx_register_event.options = SPS_O_EOT;
2151 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2152 rx_register_event.xfer_done = NULL;
2153 rx_register_event.callback = bam_mux_rx_notify;
2154 rx_register_event.user = NULL;
2155 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2156 if (ret < 0) {
2157 pr_err("%s: tx register event error %d\n", __func__, ret);
2158 goto rx_event_reg_failed;
2159 }
2160
Jeff Hugoc2696142012-05-03 11:42:13 -06002161 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002162 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002163 if (need_delayed_ul_vote) {
2164 need_delayed_ul_vote = 0;
2165 msm_bam_dmux_kickoff_ul_wakeup();
2166 }
2167 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002168 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002169 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002170 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002171 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002172 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173
2174rx_event_reg_failed:
2175 sps_disconnect(bam_rx_pipe);
2176rx_connect_failed:
2177 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2178 rx_desc_mem_buf.phys_base);
2179rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180rx_get_config_failed:
2181 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002182rx_alloc_endpoint_failed:
2183 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002184tx_connect_failed:
2185 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2186 tx_desc_mem_buf.phys_base);
2187tx_get_config_failed:
2188 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002189tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002190 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002191 /*
2192 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2193 * same handle below will cause a crash, so skip it if we've freed
2194 * the handle here.
2195 */
2196 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002197register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002198 if (!skip_iounmap)
2199 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002200ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002202 return ret;
2203}
2204
2205static int bam_init_fallback(void)
2206{
2207 u32 h;
2208 int ret;
2209 void *a2_virt_addr;
2210
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002211 /* init BAM */
Jeff Hugo7bf02052012-08-21 14:08:20 -06002212 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2213 a2_phys_size);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002214 if (!a2_virt_addr) {
2215 pr_err("%s: ioremap failed\n", __func__);
2216 ret = -ENOMEM;
2217 goto ioremap_failed;
2218 }
Jeff Hugo7bf02052012-08-21 14:08:20 -06002219 a2_props.phys_addr = (u32)(a2_phys_base);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002220 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002221 a2_props.virt_size = a2_phys_size;
2222 a2_props.irq = a2_bam_irq;
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002223 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2224 a2_props.num_pipes = A2_NUM_PIPES;
2225 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
2226 if (cpu_is_msm9615())
2227 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2228 ret = sps_register_bam_device(&a2_props, &h);
2229 if (ret < 0) {
2230 pr_err("%s: register bam error %d\n", __func__, ret);
2231 goto register_bam_failed;
2232 }
2233 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002234
2235 mutex_lock(&delayed_ul_vote_lock);
2236 bam_mux_initialized = 1;
2237 if (need_delayed_ul_vote) {
2238 need_delayed_ul_vote = 0;
2239 msm_bam_dmux_kickoff_ul_wakeup();
2240 }
2241 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002242 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002243
Jeff Hugo18792a32012-06-20 15:25:55 -06002244 power_management_only_mode = 1;
2245 bam_connection_is_active = 1;
2246 complete_all(&bam_connection_completion);
2247
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002248 return 0;
2249
2250register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002251 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002252ioremap_failed:
2253 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002254}
Jeff Hugoade1f842011-08-03 15:53:59 -06002255
Jeff Hugoa670b762012-03-15 15:58:28 -06002256static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002257{
2258 int ret = 0;
2259
2260 ret = bam_init();
2261 if (ret) {
2262 ret = bam_init_fallback();
2263 if (ret)
2264 pr_err("%s: bam init fallback failed: %d",
2265 __func__, ret);
2266 }
2267}
2268
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002269static void toggle_apps_ack(void)
2270{
2271 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002272
2273 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2274 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002275 smsm_change_state(SMSM_APPS_STATE,
2276 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2277 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2278 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002279 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002280}
2281
Jeff Hugoade1f842011-08-03 15:53:59 -06002282static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2283{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002284 static int last_processed_state;
2285
2286 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002287 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002288 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002289 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2290 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002291 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
2292 bam_dmux_log("%s: already processed this state\n", __func__);
2293 mutex_unlock(&smsm_cb_lock);
2294 return;
2295 }
2296
2297 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002298
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002299 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002300 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002301 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002302 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002303 } else if (bam_mux_initialized &&
2304 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002305 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002306 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002307 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002308 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002309 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002310 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002311 if (cpu_is_msm9615())
2312 msm9615_bam_init();
2313 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002314 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002315 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002316 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002317 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002318 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002319 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002320
2321}
2322
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002323static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2324 uint32_t new_state)
2325{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002326 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002327 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2328 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002329 complete_all(&ul_wakeup_ack_completion);
2330}
2331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332static int bam_dmux_probe(struct platform_device *pdev)
2333{
2334 int rc;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002335 struct resource *r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336
2337 DBG("%s probe called\n", __func__);
2338 if (bam_mux_initialized)
2339 return 0;
2340
Jeff Hugo7bf02052012-08-21 14:08:20 -06002341 if (pdev->dev.of_node) {
2342 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2343 if (!r) {
2344 pr_err("%s: reg field missing\n", __func__);
2345 return -ENODEV;
2346 }
2347 a2_phys_base = (void *)(r->start);
2348 a2_phys_size = (uint32_t)(resource_size(r));
2349 a2_bam_irq = platform_get_irq(pdev, 0);
2350 if (a2_bam_irq == -ENXIO) {
2351 pr_err("%s: irq field missing\n", __func__);
2352 return -ENODEV;
2353 }
2354 DBG("%s: base:%p size:%x irq:%d\n", __func__,
2355 a2_phys_base,
2356 a2_phys_size,
2357 a2_bam_irq);
2358 } else { /* fallback to default init data */
2359 a2_phys_base = (void *)(A2_PHYS_BASE);
2360 a2_phys_size = A2_PHYS_SIZE;
2361 a2_bam_irq = A2_BAM_IRQ;
2362 }
2363
Stephen Boyd69d35e32012-02-14 15:33:30 -08002364 xo_clk = clk_get(&pdev->dev, "xo");
2365 if (IS_ERR(xo_clk)) {
Jeff Hugo0c9371a2012-08-09 15:32:49 -06002366 bam_dmux_log("%s: did not get xo clock\n", __func__);
2367 xo_clk = NULL;
Stephen Boyd69d35e32012-02-14 15:33:30 -08002368 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002369 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002370 if (IS_ERR(dfab_clk)) {
Jeff Hugo0c9371a2012-08-09 15:32:49 -06002371 bam_dmux_log("%s: did not get dfab clock\n", __func__);
2372 dfab_clk = NULL;
2373 } else {
2374 rc = clk_set_rate(dfab_clk, 64000000);
2375 if (rc)
2376 pr_err("%s: unable to set dfab clock rate\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002377 }
2378
Jeff Hugofff43af92012-03-29 17:54:52 -06002379 /*
2380 * setup the workqueue so that it can be pinned to core 0 and not
2381 * block the watchdog pet function, so that netif_rx() in rmnet
2382 * only uses one queue.
2383 */
2384 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2385 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002386 if (!bam_mux_rx_workqueue)
2387 return -ENOMEM;
2388
2389 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2390 if (!bam_mux_tx_workqueue) {
2391 destroy_workqueue(bam_mux_rx_workqueue);
2392 return -ENOMEM;
2393 }
2394
Jeff Hugo7960abd2011-08-02 15:39:38 -06002395 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002396 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002397 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2398 "bam_dmux_ch_%d", rc);
2399 /* bus 2, ie a2 stream 2 */
2400 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2401 if (!bam_ch[rc].pdev) {
2402 pr_err("%s: platform device alloc failed\n", __func__);
2403 destroy_workqueue(bam_mux_rx_workqueue);
2404 destroy_workqueue(bam_mux_tx_workqueue);
2405 return -ENOMEM;
2406 }
2407 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002408
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002409 init_completion(&ul_wakeup_ack_completion);
2410 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002411 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002412 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002413 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002414
Jeff Hugoade1f842011-08-03 15:53:59 -06002415 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2416 bam_dmux_smsm_cb, NULL);
2417
2418 if (rc) {
2419 destroy_workqueue(bam_mux_rx_workqueue);
2420 destroy_workqueue(bam_mux_tx_workqueue);
2421 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2422 return -ENOMEM;
2423 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002424
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002425 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2426 bam_dmux_smsm_ack_cb, NULL);
2427
2428 if (rc) {
2429 destroy_workqueue(bam_mux_rx_workqueue);
2430 destroy_workqueue(bam_mux_tx_workqueue);
2431 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2432 SMSM_A2_POWER_CONTROL,
2433 bam_dmux_smsm_cb, NULL);
2434 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2435 rc);
2436 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2437 platform_device_put(bam_ch[rc].pdev);
2438 return -ENOMEM;
2439 }
2440
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002441 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2442 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002444 return 0;
2445}
2446
Jeff Hugo7bf02052012-08-21 14:08:20 -06002447static struct of_device_id msm_match_table[] = {
2448 {.compatible = "qcom,bam_dmux"},
2449 {},
2450};
2451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002452static struct platform_driver bam_dmux_driver = {
2453 .probe = bam_dmux_probe,
2454 .driver = {
2455 .name = "BAM_RMNT",
2456 .owner = THIS_MODULE,
Jeff Hugo7bf02052012-08-21 14:08:20 -06002457 .of_match_table = msm_match_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458 },
2459};
2460
2461static int __init bam_dmux_init(void)
2462{
Eric Holmberg878923a2012-01-10 14:28:19 -07002463 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002464#ifdef CONFIG_DEBUG_FS
2465 struct dentry *dent;
2466
2467 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002468 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002469 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002470 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2471 debug_create("stats", 0444, dent, debug_stats);
Eric Holmberge4ac80b2012-01-12 09:21:59 -07002472 debug_create_multiple("log", 0444, dent, debug_log);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002473 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002474#endif
Eric Holmberg878923a2012-01-10 14:28:19 -07002475 ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
2476 if (ret) {
2477 pr_err("%s: failed to allocate log %d\n", __func__, ret);
2478 bam_dmux_state_logging_disabled = 1;
2479 }
2480
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07002481 rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
2482
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002483 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002484 return platform_driver_register(&bam_dmux_driver);
2485}
2486
Jeff Hugoade1f842011-08-03 15:53:59 -06002487late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002488MODULE_DESCRIPTION("MSM BAM DMUX");
2489MODULE_LICENSE("GPL v2");