blob: 7c2c463912bf8364dd8fc8e58676bc4db79028df [file] [log] [blame]
Arun Kumar Neelakantam381cd542013-01-17 18:58:04 +05301/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * BAM DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/debugfs.h>
Jeff Hugoaab7ebc2011-09-07 16:46:04 -060027#include <linux/clk.h>
Jeff Hugoae3a85e2011-12-02 17:10:18 -070028#include <linux/wakelock.h>
Eric Holmberg878923a2012-01-10 14:28:19 -070029#include <linux/kfifo.h>
Jeff Hugo7bf02052012-08-21 14:08:20 -060030#include <linux/of.h>
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +053031#include <mach/msm_ipc_logging.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032#include <mach/sps.h>
33#include <mach/bam_dmux.h>
Jeff Hugoade1f842011-08-03 15:53:59 -060034#include <mach/msm_smsm.h>
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060035#include <mach/subsystem_notif.h>
Jeff Hugo75913c82011-12-05 15:59:01 -070036#include <mach/socinfo.h>
Jeff Hugo4838f412012-01-20 11:19:37 -070037#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
39#define BAM_CH_LOCAL_OPEN 0x1
40#define BAM_CH_REMOTE_OPEN 0x2
Jeff Hugo6e7a92a2011-10-24 05:25:13 -060041#define BAM_CH_IN_RESET 0x4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
43#define BAM_MUX_HDR_MAGIC_NO 0x33fc
44
Eric Holmberg006057d2012-01-11 10:10:42 -070045#define BAM_MUX_HDR_CMD_DATA 0
46#define BAM_MUX_HDR_CMD_OPEN 1
47#define BAM_MUX_HDR_CMD_CLOSE 2
48#define BAM_MUX_HDR_CMD_STATUS 3 /* unused */
49#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -070052#define LOW_WATERMARK 2
53#define HIGH_WATERMARK 4
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070054#define DEFAULT_POLLING_MIN_SLEEP (950)
55#define MAX_POLLING_SLEEP (6050)
56#define MIN_POLLING_SLEEP (950)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
58static int msm_bam_dmux_debug_enable;
59module_param_named(debug_enable, msm_bam_dmux_debug_enable,
60 int, S_IRUGO | S_IWUSR | S_IWGRP);
Anurag Singhdcd8b4e2012-07-30 16:46:37 -070061static int POLLING_MIN_SLEEP = 950;
62module_param_named(min_sleep, POLLING_MIN_SLEEP,
63 int, S_IRUGO | S_IWUSR | S_IWGRP);
64static int POLLING_MAX_SLEEP = 1050;
65module_param_named(max_sleep, POLLING_MAX_SLEEP,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67static int POLLING_INACTIVITY = 40;
68module_param_named(inactivity, POLLING_INACTIVITY,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70static int bam_adaptive_timer_enabled = 1;
71module_param_named(adaptive_timer_enabled,
72 bam_adaptive_timer_enabled,
73 int, S_IRUGO | S_IWUSR | S_IWGRP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
75#if defined(DEBUG)
76static uint32_t bam_dmux_read_cnt;
77static uint32_t bam_dmux_write_cnt;
78static uint32_t bam_dmux_write_cpy_cnt;
79static uint32_t bam_dmux_write_cpy_bytes;
Eric Holmberg2fddbcd2011-11-28 18:25:57 -070080static uint32_t bam_dmux_tx_sps_failure_cnt;
Eric Holmberg6074aba2012-01-18 17:59:44 -070081static uint32_t bam_dmux_tx_stall_cnt;
Eric Holmberg1f1255d2012-02-22 13:37:21 -070082static atomic_t bam_dmux_ack_out_cnt = ATOMIC_INIT(0);
83static atomic_t bam_dmux_ack_in_cnt = ATOMIC_INIT(0);
84static atomic_t bam_dmux_a2_pwr_cntl_in_cnt = ATOMIC_INIT(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085
86#define DBG(x...) do { \
87 if (msm_bam_dmux_debug_enable) \
88 pr_debug(x); \
89 } while (0)
90
91#define DBG_INC_READ_CNT(x) do { \
92 bam_dmux_read_cnt += (x); \
93 if (msm_bam_dmux_debug_enable) \
94 pr_debug("%s: total read bytes %u\n", \
95 __func__, bam_dmux_read_cnt); \
96 } while (0)
97
98#define DBG_INC_WRITE_CNT(x) do { \
99 bam_dmux_write_cnt += (x); \
100 if (msm_bam_dmux_debug_enable) \
101 pr_debug("%s: total written bytes %u\n", \
102 __func__, bam_dmux_write_cnt); \
103 } while (0)
104
105#define DBG_INC_WRITE_CPY(x) do { \
106 bam_dmux_write_cpy_bytes += (x); \
107 bam_dmux_write_cpy_cnt++; \
108 if (msm_bam_dmux_debug_enable) \
109 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
110 __func__, bam_dmux_write_cpy_cnt, \
111 bam_dmux_write_cpy_bytes); \
112 } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700113
114#define DBG_INC_TX_SPS_FAILURE_CNT() do { \
115 bam_dmux_tx_sps_failure_cnt++; \
116} while (0)
117
Eric Holmberg6074aba2012-01-18 17:59:44 -0700118#define DBG_INC_TX_STALL_CNT() do { \
119 bam_dmux_tx_stall_cnt++; \
120} while (0)
121
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700122#define DBG_INC_ACK_OUT_CNT() \
123 atomic_inc(&bam_dmux_ack_out_cnt)
124
125#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
126 atomic_inc(&bam_dmux_a2_pwr_cntl_in_cnt)
127
128#define DBG_INC_ACK_IN_CNT() \
129 atomic_inc(&bam_dmux_ack_in_cnt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130#else
131#define DBG(x...) do { } while (0)
132#define DBG_INC_READ_CNT(x...) do { } while (0)
133#define DBG_INC_WRITE_CNT(x...) do { } while (0)
134#define DBG_INC_WRITE_CPY(x...) do { } while (0)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700135#define DBG_INC_TX_SPS_FAILURE_CNT() do { } while (0)
Eric Holmberg6074aba2012-01-18 17:59:44 -0700136#define DBG_INC_TX_STALL_CNT() do { } while (0)
Eric Holmberg1f1255d2012-02-22 13:37:21 -0700137#define DBG_INC_ACK_OUT_CNT() do { } while (0)
138#define DBG_INC_A2_POWER_CONTROL_IN_CNT() \
139 do { } while (0)
140#define DBG_INC_ACK_IN_CNT() do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141#endif
142
143struct bam_ch_info {
144 uint32_t status;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600145 void (*notify)(void *, int, unsigned long);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 void *priv;
147 spinlock_t lock;
Jeff Hugo7960abd2011-08-02 15:39:38 -0600148 struct platform_device *pdev;
149 char name[BAM_DMUX_CH_NAME_MAX_LEN];
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700150 int num_tx_pkts;
151 int use_wm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152};
153
154struct tx_pkt_info {
155 struct sk_buff *skb;
156 dma_addr_t dma_address;
157 char is_cmd;
158 uint32_t len;
159 struct work_struct work;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600160 struct list_head list_node;
Eric Holmberg878923a2012-01-10 14:28:19 -0700161 unsigned ts_sec;
162 unsigned long ts_nsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163};
164
165struct rx_pkt_info {
166 struct sk_buff *skb;
167 dma_addr_t dma_address;
168 struct work_struct work;
Jeff Hugo949080a2011-08-30 11:58:56 -0600169 struct list_head list_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170};
171
172#define A2_NUM_PIPES 6
173#define A2_SUMMING_THRESHOLD 4096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174#define A2_PHYS_BASE 0x124C2000
175#define A2_PHYS_SIZE 0x2000
176#define BUFFER_SIZE 2048
177#define NUM_BUFFERS 32
Jeff Hugo7bf02052012-08-21 14:08:20 -0600178
179#ifndef A2_BAM_IRQ
180#define A2_BAM_IRQ -1
181#endif
182
183static void *a2_phys_base;
184static uint32_t a2_phys_size;
185static int a2_bam_irq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186static struct sps_bam_props a2_props;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600187static u32 a2_device_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188static struct sps_pipe *bam_tx_pipe;
189static struct sps_pipe *bam_rx_pipe;
190static struct sps_connect tx_connection;
191static struct sps_connect rx_connection;
192static struct sps_mem_buffer tx_desc_mem_buf;
193static struct sps_mem_buffer rx_desc_mem_buf;
194static struct sps_register_event tx_register_event;
Jeff Hugo33dbc002011-08-25 15:52:53 -0600195static struct sps_register_event rx_register_event;
Jeff Hugo0682dad2012-10-22 11:34:28 -0600196static bool satellite_mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197
198static struct bam_ch_info bam_ch[BAM_DMUX_NUM_CHANNELS];
199static int bam_mux_initialized;
200
Jeff Hugo949080a2011-08-30 11:58:56 -0600201static int polling_mode;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -0700202static unsigned long rx_timer_interval;
Jeff Hugo949080a2011-08-30 11:58:56 -0600203
204static LIST_HEAD(bam_rx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600205static DEFINE_MUTEX(bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700206static int bam_rx_pool_len;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600207static LIST_HEAD(bam_tx_pool);
Jeff Hugoc9749932011-11-02 17:50:40 -0600208static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
Eric Holmberga623da82012-07-12 09:37:09 -0600209static DEFINE_MUTEX(bam_pdev_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211struct bam_mux_hdr {
212 uint16_t magic_num;
213 uint8_t reserved;
214 uint8_t cmd;
215 uint8_t pad_len;
216 uint8_t ch_id;
217 uint16_t pkt_len;
218};
219
Jeff Hugod98b1082011-10-24 10:30:23 -0600220static void notify_all(int event, unsigned long data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221static void bam_mux_write_done(struct work_struct *work);
222static void handle_bam_mux_cmd(struct work_struct *work);
Jeff Hugo949080a2011-08-30 11:58:56 -0600223static void rx_timer_work_func(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224
Jeff Hugo949080a2011-08-30 11:58:56 -0600225static DECLARE_WORK(rx_timer_work, rx_timer_work_func);
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600226static struct delayed_work queue_rx_work;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227
228static struct workqueue_struct *bam_mux_rx_workqueue;
229static struct workqueue_struct *bam_mux_tx_workqueue;
230
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600231/* A2 power collaspe */
232#define UL_TIMEOUT_DELAY 1000 /* in ms */
Jeff Hugo0b13a352012-03-17 23:18:30 -0600233#define ENABLE_DISCONNECT_ACK 0x1
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600234static void toggle_apps_ack(void);
235static void reconnect_to_bam(void);
236static void disconnect_to_bam(void);
237static void ul_wakeup(void);
238static void ul_timeout(struct work_struct *work);
239static void vote_dfab(void);
240static void unvote_dfab(void);
Jeff Hugod98b1082011-10-24 10:30:23 -0600241static void kickoff_ul_wakeup_func(struct work_struct *work);
Eric Holmberg006057d2012-01-11 10:10:42 -0700242static void grab_wakelock(void);
243static void release_wakelock(void);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600244
245static int bam_is_connected;
246static DEFINE_MUTEX(wakeup_lock);
247static struct completion ul_wakeup_ack_completion;
248static struct completion bam_connection_completion;
249static struct delayed_work ul_timeout_work;
250static int ul_packet_written;
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700251static atomic_t ul_ondemand_vote = ATOMIC_INIT(0);
Stephen Boyd69d35e32012-02-14 15:33:30 -0800252static struct clk *dfab_clk, *xo_clk;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600253static DEFINE_RWLOCK(ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600254static DECLARE_WORK(kickoff_ul_wakeup, kickoff_ul_wakeup_func);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600255static int bam_connection_is_active;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -0700256static int wait_for_ack;
Jeff Hugoae3a85e2011-12-02 17:10:18 -0700257static struct wake_lock bam_wakelock;
Eric Holmberg006057d2012-01-11 10:10:42 -0700258static int a2_pc_disabled;
259static DEFINE_MUTEX(dfab_status_lock);
260static int dfab_is_on;
261static int wait_for_dfab;
262static struct completion dfab_unvote_completion;
263static DEFINE_SPINLOCK(wakelock_reference_lock);
264static int wakelock_reference_count;
Jeff Hugo583a6da2012-02-03 11:37:30 -0700265static int a2_pc_disabled_wakelock_skipped;
Jeff Hugob1e7c582012-06-20 15:02:11 -0600266static int disconnect_ack = 1;
Jeff Hugocb798022012-04-09 14:55:40 -0600267static LIST_HEAD(bam_other_notify_funcs);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -0600268static DEFINE_MUTEX(smsm_cb_lock);
Jeff Hugoc2696142012-05-03 11:42:13 -0600269static DEFINE_MUTEX(delayed_ul_vote_lock);
270static int need_delayed_ul_vote;
Jeff Hugo18792a32012-06-20 15:25:55 -0600271static int power_management_only_mode;
Jeff Hugo73f356f2012-12-14 17:56:19 -0700272static int in_ssr;
273static int ssr_skipped_disconnect;
Jeff Hugocb798022012-04-09 14:55:40 -0600274
275struct outside_notify_func {
276 void (*notify)(void *, int, unsigned long);
277 void *priv;
278 struct list_head list_node;
279};
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600280/* End A2 power collaspe */
281
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600282/* subsystem restart */
283static int restart_notifier_cb(struct notifier_block *this,
284 unsigned long code,
285 void *data);
286
287static struct notifier_block restart_notifier = {
288 .notifier_call = restart_notifier_cb,
289};
290static int in_global_reset;
291/* end subsystem restart */
292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293#define bam_ch_is_open(x) \
294 (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN))
295
296#define bam_ch_is_local_open(x) \
297 (bam_ch[(x)].status & BAM_CH_LOCAL_OPEN)
298
299#define bam_ch_is_remote_open(x) \
300 (bam_ch[(x)].status & BAM_CH_REMOTE_OPEN)
301
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600302#define bam_ch_is_in_reset(x) \
303 (bam_ch[(x)].status & BAM_CH_IN_RESET)
304
Eric Holmberg878923a2012-01-10 14:28:19 -0700305#define LOG_MESSAGE_MAX_SIZE 80
306struct kfifo bam_dmux_state_log;
307static uint32_t bam_dmux_state_logging_disabled;
Eric Holmberg878923a2012-01-10 14:28:19 -0700308static int bam_dmux_uplink_vote;
309static int bam_dmux_power_state;
310
Jeff Hugod7d2b062012-07-24 14:29:56 -0600311static void bam_dmux_log(const char *fmt, ...)
312 __printf(1, 2);
313
Eric Holmberg878923a2012-01-10 14:28:19 -0700314
315#define DMUX_LOG_KERR(fmt...) \
316do { \
317 bam_dmux_log(fmt); \
318 pr_err(fmt); \
319} while (0)
320
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +0530321static void *bam_ipc_log_txt;
322
323#define BAM_IPC_LOG_PAGES 5
324
Eric Holmberg878923a2012-01-10 14:28:19 -0700325/**
326 * Log a state change along with a small message.
327 *
328 * Complete size of messsage is limited to @todo.
329 */
330static void bam_dmux_log(const char *fmt, ...)
331{
332 char buff[LOG_MESSAGE_MAX_SIZE];
Eric Holmberg878923a2012-01-10 14:28:19 -0700333 va_list arg_list;
334 unsigned long long t_now;
335 unsigned long nanosec_rem;
336 int len = 0;
337
338 if (bam_dmux_state_logging_disabled)
339 return;
340
341 t_now = sched_clock();
342 nanosec_rem = do_div(t_now, 1000000000U);
343
344 /*
345 * States
Eric Holmberg006057d2012-01-11 10:10:42 -0700346 * D: 1 = Power collapse disabled
Eric Holmberg878923a2012-01-10 14:28:19 -0700347 * R: 1 = in global reset
348 * P: 1 = BAM is powered up
349 * A: 1 = BAM initialized and ready for data
350 *
351 * V: 1 = Uplink vote for power
352 * U: 1 = Uplink active
353 * W: 1 = Uplink Wait-for-ack
354 * A: 1 = Uplink ACK received
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700355 * #: >=1 On-demand uplink vote
Jeff Hugo0b13a352012-03-17 23:18:30 -0600356 * D: 1 = Disconnect ACK active
Eric Holmberg878923a2012-01-10 14:28:19 -0700357 */
358 len += scnprintf(buff, sizeof(buff),
Jeff Hugo0b13a352012-03-17 23:18:30 -0600359 "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
Eric Holmberg878923a2012-01-10 14:28:19 -0700360 (unsigned)t_now, nanosec_rem,
Eric Holmberg006057d2012-01-11 10:10:42 -0700361 a2_pc_disabled ? 'D' : 'd',
Eric Holmberg878923a2012-01-10 14:28:19 -0700362 in_global_reset ? 'R' : 'r',
363 bam_dmux_power_state ? 'P' : 'p',
364 bam_connection_is_active ? 'A' : 'a',
365 bam_dmux_uplink_vote ? 'V' : 'v',
366 bam_is_connected ? 'U' : 'u',
367 wait_for_ack ? 'W' : 'w',
Eric Holmbergbc9f21c2012-01-18 11:33:33 -0700368 ul_wakeup_ack_completion.done ? 'A' : 'a',
Jeff Hugo0b13a352012-03-17 23:18:30 -0600369 atomic_read(&ul_ondemand_vote),
370 disconnect_ack ? 'D' : 'd'
Eric Holmberg878923a2012-01-10 14:28:19 -0700371 );
372
373 va_start(arg_list, fmt);
374 len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
375 va_end(arg_list);
376 memset(buff + len, 0x0, sizeof(buff) - len);
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +0530377 if (bam_ipc_log_txt)
378 ipc_log_string(bam_ipc_log_txt, buff);
Eric Holmberg878923a2012-01-10 14:28:19 -0700379}
380
381static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
382{
383 unsigned long long t_now;
384
385 t_now = sched_clock();
386 pkt->ts_nsec = do_div(t_now, 1000000000U);
387 pkt->ts_sec = (unsigned)t_now;
388}
389
390static inline void verify_tx_queue_is_empty(const char *func)
391{
392 unsigned long flags;
393 struct tx_pkt_info *info;
394 int reported = 0;
395
396 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
397 list_for_each_entry(info, &bam_tx_pool, list_node) {
398 if (!reported) {
Eric Holmberg454d9da2012-01-12 09:37:14 -0700399 bam_dmux_log("%s: tx pool not empty\n", func);
400 if (!in_global_reset)
401 pr_err("%s: tx pool not empty\n", func);
Eric Holmberg878923a2012-01-10 14:28:19 -0700402 reported = 1;
403 }
Eric Holmberg454d9da2012-01-12 09:37:14 -0700404 bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
405 &info->list_node, info->ts_sec, info->ts_nsec);
406 if (!in_global_reset)
407 pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
408 &info->list_node, info->ts_sec, info->ts_nsec);
Eric Holmberg878923a2012-01-10 14:28:19 -0700409 }
410 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
411}
412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413static void queue_rx(void)
414{
415 void *ptr;
416 struct rx_pkt_info *info;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700417 int ret;
418 int rx_len_cached;
Jeff Hugo949080a2011-08-30 11:58:56 -0600419
Jeff Hugoc9749932011-11-02 17:50:40 -0600420 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700421 rx_len_cached = bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -0600422 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -0600423
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600424 while (bam_connection_is_active && rx_len_cached < NUM_BUFFERS) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700425 if (in_global_reset)
426 goto fail;
427
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600428 info = kmalloc(sizeof(struct rx_pkt_info),
429 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700430 if (!info) {
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600431 DMUX_LOG_KERR(
432 "%s: unable to alloc rx_pkt_info, will retry later\n",
433 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700434 goto fail;
435 }
436
437 INIT_WORK(&info->work, handle_bam_mux_cmd);
438
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600439 info->skb = __dev_alloc_skb(BUFFER_SIZE,
440 GFP_NOWAIT | __GFP_NOWARN);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700441 if (info->skb == NULL) {
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600442 DMUX_LOG_KERR(
443 "%s: unable to alloc skb, will retry later\n",
444 __func__);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700445 goto fail_info;
446 }
447 ptr = skb_put(info->skb, BUFFER_SIZE);
448
449 info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
450 DMA_FROM_DEVICE);
451 if (info->dma_address == 0 || info->dma_address == ~0) {
452 DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
453 __func__, (void *)info->dma_address, ptr);
454 goto fail_skb;
455 }
456
457 mutex_lock(&bam_rx_pool_mutexlock);
458 list_add_tail(&info->list_node, &bam_rx_pool);
459 rx_len_cached = ++bam_rx_pool_len;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700460 ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
461 BUFFER_SIZE, info,
462 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700463 if (ret) {
Eric Holmberg00cf8692012-07-16 14:21:19 -0600464 list_del(&info->list_node);
465 rx_len_cached = --bam_rx_pool_len;
466 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700467 DMUX_LOG_KERR("%s: sps_transfer_one failed %d\n",
468 __func__, ret);
Eric Holmberg00cf8692012-07-16 14:21:19 -0600469
470 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
471 DMA_FROM_DEVICE);
472
473 goto fail_skb;
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700474 }
Eric Holmberg00cf8692012-07-16 14:21:19 -0600475 mutex_unlock(&bam_rx_pool_mutexlock);
476
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700477 }
478 return;
479
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700480fail_skb:
481 dev_kfree_skb_any(info->skb);
482
483fail_info:
484 kfree(info);
485
486fail:
Arun Kumar Neelakantamd932d032012-12-13 18:06:49 +0530487 if (rx_len_cached == 0 && !in_global_reset) {
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600488 DMUX_LOG_KERR("%s: rescheduling\n", __func__);
489 schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700490 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491}
492
Jeff Hugoad75d8d2012-10-03 15:53:54 -0600493static void queue_rx_work_func(struct work_struct *work)
494{
495 queue_rx();
496}
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498static void bam_mux_process_data(struct sk_buff *rx_skb)
499{
500 unsigned long flags;
501 struct bam_mux_hdr *rx_hdr;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600502 unsigned long event_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503
504 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
505
506 rx_skb->data = (unsigned char *)(rx_hdr + 1);
507 rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
508 rx_skb->len = rx_hdr->pkt_len;
Jeff Hugoee88f672011-10-04 17:14:52 -0600509 rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600511 event_data = (unsigned long)(rx_skb);
512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600514 if (bam_ch[rx_hdr->ch_id].notify)
515 bam_ch[rx_hdr->ch_id].notify(
516 bam_ch[rx_hdr->ch_id].priv, BAM_DMUX_RECEIVE,
517 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 else
519 dev_kfree_skb_any(rx_skb);
520 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
521
522 queue_rx();
523}
524
Eric Holmberg006057d2012-01-11 10:10:42 -0700525static inline void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
526{
527 unsigned long flags;
528 int ret;
529
Eric Holmberga623da82012-07-12 09:37:09 -0600530 mutex_lock(&bam_pdev_mutexlock);
531 if (in_global_reset) {
532 bam_dmux_log("%s: open cid %d aborted due to ssr\n",
533 __func__, rx_hdr->ch_id);
534 mutex_unlock(&bam_pdev_mutexlock);
535 queue_rx();
536 return;
537 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700538 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
539 bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
540 bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
541 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Eric Holmberg006057d2012-01-11 10:10:42 -0700542 ret = platform_device_add(bam_ch[rx_hdr->ch_id].pdev);
543 if (ret)
544 pr_err("%s: platform_device_add() error: %d\n",
545 __func__, ret);
Eric Holmberga623da82012-07-12 09:37:09 -0600546 mutex_unlock(&bam_pdev_mutexlock);
547 queue_rx();
Eric Holmberg006057d2012-01-11 10:10:42 -0700548}
549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550static void handle_bam_mux_cmd(struct work_struct *work)
551{
552 unsigned long flags;
553 struct bam_mux_hdr *rx_hdr;
554 struct rx_pkt_info *info;
555 struct sk_buff *rx_skb;
556
557 info = container_of(work, struct rx_pkt_info, work);
558 rx_skb = info->skb;
Jeff Hugo949080a2011-08-30 11:58:56 -0600559 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 kfree(info);
561
562 rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
563
564 DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
565 DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
566 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
567 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
568 if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700569 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
570 " reserved %d cmd %d"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 " pad %d ch %d len %d\n", __func__,
572 rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
573 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
574 dev_kfree_skb_any(rx_skb);
575 queue_rx();
576 return;
577 }
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700578
579 if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700580 DMUX_LOG_KERR("%s: dropping invalid LCID %d"
581 " reserved %d cmd %d"
Eric Holmberg9ff40a52011-11-17 19:17:00 -0700582 " pad %d ch %d len %d\n", __func__,
583 rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
584 rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
585 dev_kfree_skb_any(rx_skb);
586 queue_rx();
587 return;
588 }
589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 switch (rx_hdr->cmd) {
591 case BAM_MUX_HDR_CMD_DATA:
592 DBG_INC_READ_CNT(rx_hdr->pkt_len);
593 bam_mux_process_data(rx_skb);
594 break;
595 case BAM_MUX_HDR_CMD_OPEN:
Eric Holmberg006057d2012-01-11 10:10:42 -0700596 bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
Eric Holmberg878923a2012-01-10 14:28:19 -0700597 rx_hdr->ch_id);
Eric Holmberg006057d2012-01-11 10:10:42 -0700598 handle_bam_mux_cmd_open(rx_hdr);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600599 if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
Jeff Hugod7d2b062012-07-24 14:29:56 -0600600 bam_dmux_log("%s: deactivating disconnect ack\n",
601 __func__);
Jeff Hugob1e7c582012-06-20 15:02:11 -0600602 disconnect_ack = 0;
Jeff Hugo0b13a352012-03-17 23:18:30 -0600603 }
Eric Holmberg006057d2012-01-11 10:10:42 -0700604 dev_kfree_skb_any(rx_skb);
605 break;
606 case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
607 bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
608 rx_hdr->ch_id);
609
610 if (!a2_pc_disabled) {
611 a2_pc_disabled = 1;
Jeff Hugo322179f2012-02-29 10:52:34 -0700612 ul_wakeup();
Eric Holmberg006057d2012-01-11 10:10:42 -0700613 }
614
615 handle_bam_mux_cmd_open(rx_hdr);
Eric Holmberge779dba2011-11-04 18:22:01 -0600616 dev_kfree_skb_any(rx_skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617 break;
618 case BAM_MUX_HDR_CMD_CLOSE:
619 /* probably should drop pending write */
Eric Holmberg878923a2012-01-10 14:28:19 -0700620 bam_dmux_log("%s: closing cid %d\n", __func__,
621 rx_hdr->ch_id);
Eric Holmberga623da82012-07-12 09:37:09 -0600622 mutex_lock(&bam_pdev_mutexlock);
623 if (in_global_reset) {
624 bam_dmux_log("%s: close cid %d aborted due to ssr\n",
625 __func__, rx_hdr->ch_id);
626 mutex_unlock(&bam_pdev_mutexlock);
627 break;
628 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
630 bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
631 spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
Jeff Hugo7960abd2011-08-02 15:39:38 -0600632 platform_device_unregister(bam_ch[rx_hdr->ch_id].pdev);
633 bam_ch[rx_hdr->ch_id].pdev =
634 platform_device_alloc(bam_ch[rx_hdr->ch_id].name, 2);
635 if (!bam_ch[rx_hdr->ch_id].pdev)
636 pr_err("%s: platform_device_alloc failed\n", __func__);
Eric Holmberga623da82012-07-12 09:37:09 -0600637 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberge779dba2011-11-04 18:22:01 -0600638 dev_kfree_skb_any(rx_skb);
Eric Holmberga623da82012-07-12 09:37:09 -0600639 queue_rx();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 break;
641 default:
Eric Holmberg878923a2012-01-10 14:28:19 -0700642 DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
643 " reserved %d cmd %d pad %d ch %d len %d\n",
644 __func__, rx_hdr->magic_num, rx_hdr->reserved,
645 rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
646 rx_hdr->pkt_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 dev_kfree_skb_any(rx_skb);
648 queue_rx();
649 return;
650 }
651}
652
653static int bam_mux_write_cmd(void *data, uint32_t len)
654{
655 int rc;
656 struct tx_pkt_info *pkt;
657 dma_addr_t dma_address;
Jeff Hugo626303bf2011-11-21 11:43:28 -0700658 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659
Eric Holmbergd83cd2b2011-11-04 15:54:17 -0600660 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 if (pkt == NULL) {
662 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
663 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664 return rc;
665 }
666
667 dma_address = dma_map_single(NULL, data, len,
668 DMA_TO_DEVICE);
669 if (!dma_address) {
670 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugo96cb7482011-12-07 13:28:31 -0700671 kfree(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 rc = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 return rc;
674 }
675 pkt->skb = (struct sk_buff *)(data);
676 pkt->len = len;
677 pkt->dma_address = dma_address;
678 pkt->is_cmd = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -0700679 set_tx_timestamp(pkt);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600680 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700681 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600682 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
684 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600685 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700686 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
687 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600688 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700689 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700690 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700691 dma_unmap_single(NULL, pkt->dma_address,
692 pkt->len,
693 DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600694 kfree(pkt);
Jeff Hugobb6da952012-01-16 15:02:42 -0700695 } else {
696 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600697 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600699 ul_packet_written = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 return rc;
701}
702
703static void bam_mux_write_done(struct work_struct *work)
704{
705 struct sk_buff *skb;
706 struct bam_mux_hdr *hdr;
707 struct tx_pkt_info *info;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700708 struct tx_pkt_info *info_expected;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600709 unsigned long event_data;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700710 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600712 if (in_global_reset)
713 return;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 info = container_of(work, struct tx_pkt_info, work);
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700716
717 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
718 info_expected = list_first_entry(&bam_tx_pool,
719 struct tx_pkt_info, list_node);
720 if (unlikely(info != info_expected)) {
Eric Holmberg878923a2012-01-10 14:28:19 -0700721 struct tx_pkt_info *errant_pkt;
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700722
Eric Holmberg878923a2012-01-10 14:28:19 -0700723 DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
724 " list_node=%p, ts=%u.%09lu\n",
725 __func__, bam_tx_pool.next, &info->list_node,
726 info->ts_sec, info->ts_nsec
727 );
728
729 list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
730 DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
731 &errant_pkt->list_node, errant_pkt->ts_sec,
732 errant_pkt->ts_nsec);
733
734 }
Eric Holmberg1cde7a62011-12-19 18:34:01 -0700735 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
736 BUG();
737 }
738 list_del(&info->list_node);
739 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
740
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600741 if (info->is_cmd) {
742 kfree(info->skb);
743 kfree(info);
744 return;
745 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 skb = info->skb;
747 kfree(info);
748 hdr = (struct bam_mux_hdr *)skb->data;
Eric Holmberg9fdef262012-02-14 11:46:05 -0700749 DBG_INC_WRITE_CNT(skb->len);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600750 event_data = (unsigned long)(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700751 spin_lock_irqsave(&bam_ch[hdr->ch_id].lock, flags);
752 bam_ch[hdr->ch_id].num_tx_pkts--;
753 spin_unlock_irqrestore(&bam_ch[hdr->ch_id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600754 if (bam_ch[hdr->ch_id].notify)
755 bam_ch[hdr->ch_id].notify(
756 bam_ch[hdr->ch_id].priv, BAM_DMUX_WRITE_DONE,
757 event_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 else
759 dev_kfree_skb_any(skb);
760}
761
762int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
763{
764 int rc = 0;
765 struct bam_mux_hdr *hdr;
766 unsigned long flags;
767 struct sk_buff *new_skb = NULL;
768 dma_addr_t dma_address;
769 struct tx_pkt_info *pkt;
770
771 if (id >= BAM_DMUX_NUM_CHANNELS)
772 return -EINVAL;
773 if (!skb)
774 return -EINVAL;
775 if (!bam_mux_initialized)
776 return -ENODEV;
777
778 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
779 spin_lock_irqsave(&bam_ch[id].lock, flags);
780 if (!bam_ch_is_open(id)) {
781 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
782 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
783 return -ENODEV;
784 }
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700785
786 if (bam_ch[id].use_wm &&
787 (bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
788 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
789 pr_err("%s: watermark exceeded: %d\n", __func__, id);
790 return -EAGAIN;
791 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
793
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600794 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600795 if (!bam_is_connected) {
796 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600797 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700798 if (unlikely(in_global_reset == 1))
799 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600800 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600801 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600802 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600803
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 /* if skb do not have any tailroom for padding,
805 copy the skb into a new expanded skb */
806 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
807 /* revisit, probably dev_alloc_skb and memcpy is effecient */
808 new_skb = skb_copy_expand(skb, skb_headroom(skb),
809 4 - (skb->len & 0x3), GFP_ATOMIC);
810 if (new_skb == NULL) {
811 pr_err("%s: cannot allocate skb\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600812 goto write_fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 }
814 dev_kfree_skb_any(skb);
815 skb = new_skb;
816 DBG_INC_WRITE_CPY(skb->len);
817 }
818
819 hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));
820
821 /* caller should allocate for hdr and padding
822 hdr is fine, padding is tricky */
823 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
824 hdr->cmd = BAM_MUX_HDR_CMD_DATA;
825 hdr->reserved = 0;
826 hdr->ch_id = id;
827 hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
828 if (skb->len & 0x3)
829 skb_put(skb, 4 - (skb->len & 0x3));
830
831 hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);
832
833 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
834 __func__, skb->data, skb->tail, skb->len,
835 hdr->pkt_len, hdr->pad_len);
836
837 pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
838 if (pkt == NULL) {
839 pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600840 goto write_fail2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 }
842
843 dma_address = dma_map_single(NULL, skb->data, skb->len,
844 DMA_TO_DEVICE);
845 if (!dma_address) {
846 pr_err("%s: dma_map_single() failed\n", __func__);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600847 goto write_fail3;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 }
849 pkt->skb = skb;
850 pkt->dma_address = dma_address;
851 pkt->is_cmd = 0;
Eric Holmberg878923a2012-01-10 14:28:19 -0700852 set_tx_timestamp(pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 INIT_WORK(&pkt->work, bam_mux_write_done);
Jeff Hugo626303bf2011-11-21 11:43:28 -0700854 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600855 list_add_tail(&pkt->list_node, &bam_tx_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
857 pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600858 if (rc) {
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700859 DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
860 __func__, rc);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600861 list_del(&pkt->list_node);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -0700862 DBG_INC_TX_SPS_FAILURE_CNT();
Jeff Hugo626303bf2011-11-21 11:43:28 -0700863 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmbergb5b08e52012-01-20 14:19:00 -0700864 dma_unmap_single(NULL, pkt->dma_address,
865 pkt->skb->len, DMA_TO_DEVICE);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600866 kfree(pkt);
Jeff Hugo872bd062011-11-15 17:47:21 -0700867 if (new_skb)
868 dev_kfree_skb_any(new_skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700869 } else {
Jeff Hugobb6da952012-01-16 15:02:42 -0700870 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700871 spin_lock_irqsave(&bam_ch[id].lock, flags);
872 bam_ch[id].num_tx_pkts++;
873 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Jeff Hugo7b80c802011-11-04 16:12:20 -0600874 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600875 ul_packet_written = 1;
876 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877 return rc;
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600878
879write_fail3:
880 kfree(pkt);
881write_fail2:
Arun Kumar Neelakantam381cd542013-01-17 18:58:04 +0530882 skb_pull(skb, sizeof(struct bam_mux_hdr));
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600883 if (new_skb)
884 dev_kfree_skb_any(new_skb);
885write_fail:
886 read_unlock(&ul_wakeup_lock);
887 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888}
889
890int msm_bam_dmux_open(uint32_t id, void *priv,
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600891 void (*notify)(void *, int, unsigned long))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892{
893 struct bam_mux_hdr *hdr;
894 unsigned long flags;
895 int rc = 0;
896
897 DBG("%s: opening ch %d\n", __func__, id);
Eric Holmberg5d775432011-11-09 10:23:35 -0700898 if (!bam_mux_initialized) {
899 DBG("%s: not inititialized\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900 return -ENODEV;
Eric Holmberg5d775432011-11-09 10:23:35 -0700901 }
902 if (id >= BAM_DMUX_NUM_CHANNELS) {
903 pr_err("%s: invalid channel id %d\n", __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700905 }
906 if (notify == NULL) {
907 pr_err("%s: notify function is NULL\n", __func__);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600908 return -EINVAL;
Eric Holmberg5d775432011-11-09 10:23:35 -0700909 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910
911 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
912 if (hdr == NULL) {
913 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
914 return -ENOMEM;
915 }
916 spin_lock_irqsave(&bam_ch[id].lock, flags);
917 if (bam_ch_is_open(id)) {
918 DBG("%s: Already opened %d\n", __func__, id);
919 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
920 kfree(hdr);
921 goto open_done;
922 }
923 if (!bam_ch_is_remote_open(id)) {
924 DBG("%s: Remote not open; ch: %d\n", __func__, id);
925 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
926 kfree(hdr);
Eric Holmberg5d775432011-11-09 10:23:35 -0700927 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 }
929
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600930 bam_ch[id].notify = notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 bam_ch[id].priv = priv;
932 bam_ch[id].status |= BAM_CH_LOCAL_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700933 bam_ch[id].num_tx_pkts = 0;
934 bam_ch[id].use_wm = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
936
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600937 read_lock(&ul_wakeup_lock);
Jeff Hugo061ce672011-10-21 17:15:32 -0600938 if (!bam_is_connected) {
939 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600940 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700941 if (unlikely(in_global_reset == 1))
942 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600943 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600944 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600945 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
948 hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
949 hdr->reserved = 0;
950 hdr->ch_id = id;
951 hdr->pkt_len = 0;
952 hdr->pad_len = 0;
953
954 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600955 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956
957open_done:
958 DBG("%s: opened ch %d\n", __func__, id);
959 return rc;
960}
961
962int msm_bam_dmux_close(uint32_t id)
963{
964 struct bam_mux_hdr *hdr;
965 unsigned long flags;
966 int rc;
967
968 if (id >= BAM_DMUX_NUM_CHANNELS)
969 return -EINVAL;
970 DBG("%s: closing ch %d\n", __func__, id);
971 if (!bam_mux_initialized)
972 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600974 read_lock(&ul_wakeup_lock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600975 if (!bam_is_connected && !bam_ch_is_in_reset(id)) {
Jeff Hugo061ce672011-10-21 17:15:32 -0600976 read_unlock(&ul_wakeup_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600977 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -0700978 if (unlikely(in_global_reset == 1))
979 return -EFAULT;
Jeff Hugo061ce672011-10-21 17:15:32 -0600980 read_lock(&ul_wakeup_lock);
Jeff Hugod98b1082011-10-24 10:30:23 -0600981 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
Jeff Hugo061ce672011-10-21 17:15:32 -0600982 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -0600983
Jeff Hugo061ce672011-10-21 17:15:32 -0600984 spin_lock_irqsave(&bam_ch[id].lock, flags);
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600985 bam_ch[id].notify = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 bam_ch[id].priv = NULL;
987 bam_ch[id].status &= ~BAM_CH_LOCAL_OPEN;
988 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
989
Jeff Hugo6e7a92a2011-10-24 05:25:13 -0600990 if (bam_ch_is_in_reset(id)) {
991 read_unlock(&ul_wakeup_lock);
992 bam_ch[id].status &= ~BAM_CH_IN_RESET;
993 return 0;
994 }
995
Jeff Hugobb5802f2011-11-02 17:10:29 -0600996 hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997 if (hdr == NULL) {
998 pr_err("%s: hdr kmalloc failed. ch: %d\n", __func__, id);
Jeff Hugoc6af54d2011-11-02 17:00:27 -0600999 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 return -ENOMEM;
1001 }
1002 hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
1003 hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
1004 hdr->reserved = 0;
1005 hdr->ch_id = id;
1006 hdr->pkt_len = 0;
1007 hdr->pad_len = 0;
1008
1009 rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001010 read_unlock(&ul_wakeup_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011
1012 DBG("%s: closed ch %d\n", __func__, id);
1013 return rc;
1014}
1015
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001016int msm_bam_dmux_is_ch_full(uint32_t id)
1017{
1018 unsigned long flags;
1019 int ret;
1020
1021 if (id >= BAM_DMUX_NUM_CHANNELS)
1022 return -EINVAL;
1023
1024 spin_lock_irqsave(&bam_ch[id].lock, flags);
1025 bam_ch[id].use_wm = 1;
1026 ret = bam_ch[id].num_tx_pkts >= HIGH_WATERMARK;
1027 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
1028 id, bam_ch[id].num_tx_pkts, ret);
1029 if (!bam_ch_is_local_open(id)) {
1030 ret = -ENODEV;
1031 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1032 }
1033 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
1034
1035 return ret;
1036}
1037
1038int msm_bam_dmux_is_ch_low(uint32_t id)
1039{
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001040 unsigned long flags;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001041 int ret;
1042
1043 if (id >= BAM_DMUX_NUM_CHANNELS)
1044 return -EINVAL;
1045
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001046 spin_lock_irqsave(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001047 bam_ch[id].use_wm = 1;
1048 ret = bam_ch[id].num_tx_pkts <= LOW_WATERMARK;
1049 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
1050 id, bam_ch[id].num_tx_pkts, ret);
1051 if (!bam_ch_is_local_open(id)) {
1052 ret = -ENODEV;
1053 pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
1054 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -06001055 spin_unlock_irqrestore(&bam_ch[id].lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001056
1057 return ret;
1058}
1059
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001060static void rx_switch_to_interrupt_mode(void)
1061{
1062 struct sps_connect cur_rx_conn;
1063 struct sps_iovec iov;
1064 struct rx_pkt_info *info;
1065 int ret;
1066
1067 /*
1068 * Attempt to enable interrupts - if this fails,
1069 * continue polling and we will retry later.
1070 */
1071 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1072 if (ret) {
1073 pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
1074 goto fail;
1075 }
1076
1077 rx_register_event.options = SPS_O_EOT;
1078 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
1079 if (ret) {
1080 pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
1081 goto fail;
1082 }
1083
1084 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
1085 SPS_O_EOT | SPS_O_ACK_TRANSFERS;
1086 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1087 if (ret) {
1088 pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
1089 goto fail;
1090 }
1091 polling_mode = 0;
Eric Holmberg006057d2012-01-11 10:10:42 -07001092 release_wakelock();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001093
1094 /* handle any rx packets before interrupt was enabled */
1095 while (bam_connection_is_active && !polling_mode) {
1096 ret = sps_get_iovec(bam_rx_pipe, &iov);
1097 if (ret) {
1098 pr_err("%s: sps_get_iovec failed %d\n",
1099 __func__, ret);
1100 break;
1101 }
1102 if (iov.addr == 0)
1103 break;
1104
1105 mutex_lock(&bam_rx_pool_mutexlock);
1106 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001107 DMUX_LOG_KERR("%s: have iovec %p but rx pool empty\n",
1108 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001109 mutex_unlock(&bam_rx_pool_mutexlock);
1110 continue;
1111 }
1112 info = list_first_entry(&bam_rx_pool, struct rx_pkt_info,
1113 list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001114 if (info->dma_address != iov.addr) {
1115 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1116 __func__,
1117 (void *)iov.addr,
1118 (void *)info->dma_address);
1119 list_for_each_entry(info, &bam_rx_pool, list_node) {
1120 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1121 (void *)info->dma_address);
1122 if (iov.addr == info->dma_address)
1123 break;
1124 }
1125 }
1126 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001127 list_del(&info->list_node);
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001128 --bam_rx_pool_len;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001129 mutex_unlock(&bam_rx_pool_mutexlock);
1130 handle_bam_mux_cmd(&info->work);
1131 }
1132 return;
1133
1134fail:
1135 pr_err("%s: reverting to polling\n", __func__);
Jeff Hugofff43af92012-03-29 17:54:52 -06001136 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001137}
1138
Jeff Hugo949080a2011-08-30 11:58:56 -06001139static void rx_timer_work_func(struct work_struct *work)
1140{
1141 struct sps_iovec iov;
Jeff Hugo949080a2011-08-30 11:58:56 -06001142 struct rx_pkt_info *info;
1143 int inactive_cycles = 0;
1144 int ret;
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001145 u32 buffs_unused, buffs_used;
Jeff Hugo949080a2011-08-30 11:58:56 -06001146
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001147 while (bam_connection_is_active) { /* timer loop */
Jeff Hugo949080a2011-08-30 11:58:56 -06001148 ++inactive_cycles;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001149 while (bam_connection_is_active) { /* deplete queue loop */
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001150 if (in_global_reset)
1151 return;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001152
1153 ret = sps_get_iovec(bam_rx_pipe, &iov);
1154 if (ret) {
1155 pr_err("%s: sps_get_iovec failed %d\n",
1156 __func__, ret);
1157 break;
1158 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001159 if (iov.addr == 0)
1160 break;
1161 inactive_cycles = 0;
Jeff Hugoc9749932011-11-02 17:50:40 -06001162 mutex_lock(&bam_rx_pool_mutexlock);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001163 if (unlikely(list_empty(&bam_rx_pool))) {
Eric Holmberg00cf8692012-07-16 14:21:19 -06001164 DMUX_LOG_KERR(
1165 "%s: have iovec %p but rx pool empty\n",
1166 __func__, (void *)iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001167 mutex_unlock(&bam_rx_pool_mutexlock);
1168 continue;
1169 }
1170 info = list_first_entry(&bam_rx_pool,
1171 struct rx_pkt_info, list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001172 if (info->dma_address != iov.addr) {
1173 DMUX_LOG_KERR("%s: iovec %p != dma %p\n",
1174 __func__,
1175 (void *)iov.addr,
1176 (void *)info->dma_address);
1177 list_for_each_entry(info, &bam_rx_pool,
1178 list_node) {
1179 DMUX_LOG_KERR("%s: dma %p\n", __func__,
1180 (void *)info->dma_address);
1181 if (iov.addr == info->dma_address)
1182 break;
1183 }
1184 }
1185 BUG_ON(info->dma_address != iov.addr);
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001186 list_del(&info->list_node);
Eric Holmberg00cf8692012-07-16 14:21:19 -06001187 --bam_rx_pool_len;
Jeff Hugoc9749932011-11-02 17:50:40 -06001188 mutex_unlock(&bam_rx_pool_mutexlock);
Jeff Hugo949080a2011-08-30 11:58:56 -06001189 handle_bam_mux_cmd(&info->work);
1190 }
1191
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001192 if (inactive_cycles >= POLLING_INACTIVITY) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001193 rx_switch_to_interrupt_mode();
1194 break;
Jeff Hugo949080a2011-08-30 11:58:56 -06001195 }
1196
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07001197 if (bam_adaptive_timer_enabled) {
1198 usleep_range(rx_timer_interval, rx_timer_interval + 50);
1199
1200 ret = sps_get_unused_desc_num(bam_rx_pipe,
1201 &buffs_unused);
1202
1203 if (ret) {
1204 pr_err("%s: error getting num buffers unused after sleep\n",
1205 __func__);
1206
1207 break;
1208 }
1209
1210 buffs_used = NUM_BUFFERS - buffs_unused;
1211
1212 if (buffs_unused == 0) {
1213 rx_timer_interval = MIN_POLLING_SLEEP;
1214 } else {
1215 if (buffs_used > 0) {
1216 rx_timer_interval =
1217 (2 * NUM_BUFFERS *
1218 rx_timer_interval)/
1219 (3 * buffs_used);
1220 } else {
1221 rx_timer_interval =
1222 MAX_POLLING_SLEEP;
1223 }
1224 }
1225
1226 if (rx_timer_interval > MAX_POLLING_SLEEP)
1227 rx_timer_interval = MAX_POLLING_SLEEP;
1228 else if (rx_timer_interval < MIN_POLLING_SLEEP)
1229 rx_timer_interval = MIN_POLLING_SLEEP;
1230 } else {
1231 usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
1232 }
Jeff Hugo949080a2011-08-30 11:58:56 -06001233 }
1234}
1235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236static void bam_mux_tx_notify(struct sps_event_notify *notify)
1237{
1238 struct tx_pkt_info *pkt;
1239
1240 DBG("%s: event %d notified\n", __func__, notify->event_id);
1241
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001242 if (in_global_reset)
1243 return;
1244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 switch (notify->event_id) {
1246 case SPS_EVENT_EOT:
1247 pkt = notify->data.transfer.user;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001248 if (!pkt->is_cmd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 dma_unmap_single(NULL, pkt->dma_address,
1250 pkt->skb->len,
1251 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001252 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 dma_unmap_single(NULL, pkt->dma_address,
1254 pkt->len,
1255 DMA_TO_DEVICE);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001256 queue_work(bam_mux_tx_workqueue, &pkt->work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 break;
1258 default:
1259 pr_err("%s: recieved unexpected event id %d\n", __func__,
1260 notify->event_id);
1261 }
1262}
1263
Jeff Hugo33dbc002011-08-25 15:52:53 -06001264static void bam_mux_rx_notify(struct sps_event_notify *notify)
1265{
Jeff Hugo949080a2011-08-30 11:58:56 -06001266 int ret;
1267 struct sps_connect cur_rx_conn;
Jeff Hugo33dbc002011-08-25 15:52:53 -06001268
1269 DBG("%s: event %d notified\n", __func__, notify->event_id);
1270
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001271 if (in_global_reset)
1272 return;
1273
Jeff Hugo33dbc002011-08-25 15:52:53 -06001274 switch (notify->event_id) {
1275 case SPS_EVENT_EOT:
Jeff Hugo949080a2011-08-30 11:58:56 -06001276 /* attempt to disable interrupts in this pipe */
1277 if (!polling_mode) {
1278 ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
1279 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001280 pr_err("%s: sps_get_config() failed %d, interrupts"
1281 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001282 break;
1283 }
Jeff Hugoa9d32ba2011-11-21 14:59:48 -07001284 cur_rx_conn.options = SPS_O_AUTO_ENABLE |
Jeff Hugo949080a2011-08-30 11:58:56 -06001285 SPS_O_ACK_TRANSFERS | SPS_O_POLL;
1286 ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
1287 if (ret) {
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001288 pr_err("%s: sps_set_config() failed %d, interrupts"
1289 " not disabled\n", __func__, ret);
Jeff Hugo949080a2011-08-30 11:58:56 -06001290 break;
1291 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001292 grab_wakelock();
Jeff Hugo949080a2011-08-30 11:58:56 -06001293 polling_mode = 1;
Jeff Hugofff43af92012-03-29 17:54:52 -06001294 /*
1295 * run on core 0 so that netif_rx() in rmnet uses only
1296 * one queue
1297 */
1298 queue_work_on(0, bam_mux_rx_workqueue, &rx_timer_work);
Jeff Hugo949080a2011-08-30 11:58:56 -06001299 }
Jeff Hugo33dbc002011-08-25 15:52:53 -06001300 break;
1301 default:
1302 pr_err("%s: recieved unexpected event id %d\n", __func__,
1303 notify->event_id);
1304 }
1305}
1306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307#ifdef CONFIG_DEBUG_FS
1308
1309static int debug_tbl(char *buf, int max)
1310{
1311 int i = 0;
1312 int j;
1313
1314 for (j = 0; j < BAM_DMUX_NUM_CHANNELS; ++j) {
1315 i += scnprintf(buf + i, max - i,
1316 "ch%02d local open=%s remote open=%s\n",
1317 j, bam_ch_is_local_open(j) ? "Y" : "N",
1318 bam_ch_is_remote_open(j) ? "Y" : "N");
1319 }
1320
1321 return i;
1322}
1323
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001324static int debug_ul_pkt_cnt(char *buf, int max)
1325{
1326 struct list_head *p;
1327 unsigned long flags;
1328 int n = 0;
1329
1330 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
1331 __list_for_each(p, &bam_tx_pool) {
1332 ++n;
1333 }
1334 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
1335
1336 return scnprintf(buf, max, "Number of UL packets in flight: %d\n", n);
1337}
1338
1339static int debug_stats(char *buf, int max)
1340{
1341 int i = 0;
1342
1343 i += scnprintf(buf + i, max - i,
Eric Holmberg9fdef262012-02-14 11:46:05 -07001344 "skb read cnt: %u\n"
1345 "skb write cnt: %u\n"
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001346 "skb copy cnt: %u\n"
1347 "skb copy bytes: %u\n"
Eric Holmberg6074aba2012-01-18 17:59:44 -07001348 "sps tx failures: %u\n"
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001349 "sps tx stalls: %u\n"
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001350 "rx queue len: %d\n"
1351 "a2 ack out cnt: %d\n"
1352 "a2 ack in cnt: %d\n"
1353 "a2 pwr cntl in: %d\n",
Eric Holmberg9fdef262012-02-14 11:46:05 -07001354 bam_dmux_read_cnt,
1355 bam_dmux_write_cnt,
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001356 bam_dmux_write_cpy_cnt,
1357 bam_dmux_write_cpy_bytes,
Eric Holmberg6074aba2012-01-18 17:59:44 -07001358 bam_dmux_tx_sps_failure_cnt,
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001359 bam_dmux_tx_stall_cnt,
Eric Holmberg1f1255d2012-02-22 13:37:21 -07001360 bam_rx_pool_len,
1361 atomic_read(&bam_dmux_ack_out_cnt),
1362 atomic_read(&bam_dmux_ack_in_cnt),
1363 atomic_read(&bam_dmux_a2_pwr_cntl_in_cnt)
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07001364 );
1365
1366 return i;
1367}
1368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369#define DEBUG_BUFMAX 4096
1370static char debug_buffer[DEBUG_BUFMAX];
1371
1372static ssize_t debug_read(struct file *file, char __user *buf,
1373 size_t count, loff_t *ppos)
1374{
1375 int (*fill)(char *buf, int max) = file->private_data;
1376 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
1377 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
1378}
1379
1380static int debug_open(struct inode *inode, struct file *file)
1381{
1382 file->private_data = inode->i_private;
1383 return 0;
1384}
1385
1386
1387static const struct file_operations debug_ops = {
1388 .read = debug_read,
1389 .open = debug_open,
1390};
1391
1392static void debug_create(const char *name, mode_t mode,
1393 struct dentry *dent,
1394 int (*fill)(char *buf, int max))
1395{
Eric Holmberge4ac80b2012-01-12 09:21:59 -07001396 struct dentry *file;
1397
1398 file = debugfs_create_file(name, mode, dent, fill, &debug_ops);
1399 if (IS_ERR(file))
1400 pr_err("%s: debugfs create failed %d\n", __func__,
1401 (int)PTR_ERR(file));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402}
1403
1404#endif
1405
Jeff Hugod98b1082011-10-24 10:30:23 -06001406static void notify_all(int event, unsigned long data)
1407{
1408 int i;
Jeff Hugocb798022012-04-09 14:55:40 -06001409 struct list_head *temp;
1410 struct outside_notify_func *func;
Jeff Hugod98b1082011-10-24 10:30:23 -06001411
1412 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001413 if (bam_ch_is_open(i)) {
Jeff Hugod98b1082011-10-24 10:30:23 -06001414 bam_ch[i].notify(bam_ch[i].priv, event, data);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001415 bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
1416 __func__, i, event, data);
1417 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001418 }
Jeff Hugocb798022012-04-09 14:55:40 -06001419
1420 __list_for_each(temp, &bam_other_notify_funcs) {
1421 func = container_of(temp, struct outside_notify_func,
1422 list_node);
1423 func->notify(func->priv, event, data);
1424 }
Jeff Hugod98b1082011-10-24 10:30:23 -06001425}
1426
1427static void kickoff_ul_wakeup_func(struct work_struct *work)
1428{
1429 read_lock(&ul_wakeup_lock);
1430 if (!bam_is_connected) {
1431 read_unlock(&ul_wakeup_lock);
1432 ul_wakeup();
Jeff Hugo4838f412012-01-20 11:19:37 -07001433 if (unlikely(in_global_reset == 1))
1434 return;
Jeff Hugod98b1082011-10-24 10:30:23 -06001435 read_lock(&ul_wakeup_lock);
1436 ul_packet_written = 1;
1437 notify_all(BAM_DMUX_UL_CONNECTED, (unsigned long)(NULL));
1438 }
1439 read_unlock(&ul_wakeup_lock);
1440}
1441
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001442int msm_bam_dmux_kickoff_ul_wakeup(void)
Jeff Hugod98b1082011-10-24 10:30:23 -06001443{
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001444 int is_connected;
1445
1446 read_lock(&ul_wakeup_lock);
1447 ul_packet_written = 1;
1448 is_connected = bam_is_connected;
1449 if (!is_connected)
1450 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1451 read_unlock(&ul_wakeup_lock);
1452
1453 return is_connected;
Jeff Hugod98b1082011-10-24 10:30:23 -06001454}
1455
Eric Holmberg878923a2012-01-10 14:28:19 -07001456static void power_vote(int vote)
1457{
1458 bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
1459 bam_dmux_uplink_vote, vote);
1460
1461 if (bam_dmux_uplink_vote == vote)
1462 bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
1463
1464 bam_dmux_uplink_vote = vote;
1465 if (vote)
1466 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
1467 else
1468 smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
1469}
1470
Eric Holmberg454d9da2012-01-12 09:37:14 -07001471/*
1472 * @note: Must be called with ul_wakeup_lock locked.
1473 */
1474static inline void ul_powerdown(void)
1475{
1476 bam_dmux_log("%s: powerdown\n", __func__);
1477 verify_tx_queue_is_empty(__func__);
1478
1479 if (a2_pc_disabled) {
1480 wait_for_dfab = 1;
1481 INIT_COMPLETION(dfab_unvote_completion);
1482 release_wakelock();
1483 } else {
1484 wait_for_ack = 1;
1485 INIT_COMPLETION(ul_wakeup_ack_completion);
1486 power_vote(0);
1487 }
1488 bam_is_connected = 0;
1489 notify_all(BAM_DMUX_UL_DISCONNECTED, (unsigned long)(NULL));
1490}
1491
1492static inline void ul_powerdown_finish(void)
1493{
1494 if (a2_pc_disabled && wait_for_dfab) {
1495 unvote_dfab();
1496 complete_all(&dfab_unvote_completion);
1497 wait_for_dfab = 0;
1498 }
1499}
1500
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001501/*
1502 * Votes for UL power and returns current power state.
1503 *
1504 * @returns true if currently connected
1505 */
1506int msm_bam_dmux_ul_power_vote(void)
1507{
1508 int is_connected;
1509
1510 read_lock(&ul_wakeup_lock);
1511 atomic_inc(&ul_ondemand_vote);
1512 is_connected = bam_is_connected;
1513 if (!is_connected)
1514 queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
1515 read_unlock(&ul_wakeup_lock);
1516
1517 return is_connected;
1518}
1519
1520/*
1521 * Unvotes for UL power.
1522 *
1523 * @returns true if vote count is 0 (UL shutdown possible)
1524 */
1525int msm_bam_dmux_ul_power_unvote(void)
1526{
1527 int vote;
1528
1529 read_lock(&ul_wakeup_lock);
1530 vote = atomic_dec_return(&ul_ondemand_vote);
1531 if (unlikely(vote) < 0)
1532 DMUX_LOG_KERR("%s: invalid power vote %d\n", __func__, vote);
1533 read_unlock(&ul_wakeup_lock);
1534
1535 return vote == 0;
1536}
1537
Jeff Hugocb798022012-04-09 14:55:40 -06001538int msm_bam_dmux_reg_notify(void *priv,
1539 void (*notify)(void *priv, int event_type,
1540 unsigned long data))
1541{
1542 struct outside_notify_func *func;
1543
1544 if (!notify)
1545 return -EINVAL;
1546
1547 func = kmalloc(sizeof(struct outside_notify_func), GFP_KERNEL);
1548 if (!func)
1549 return -ENOMEM;
1550
1551 func->notify = notify;
1552 func->priv = priv;
1553 list_add(&func->list_node, &bam_other_notify_funcs);
1554
1555 return 0;
1556}
1557
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001558static void ul_timeout(struct work_struct *work)
1559{
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001560 unsigned long flags;
1561 int ret;
1562
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001563 if (in_global_reset)
1564 return;
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001565 ret = write_trylock_irqsave(&ul_wakeup_lock, flags);
1566 if (!ret) { /* failed to grab lock, reschedule and bail */
1567 schedule_delayed_work(&ul_timeout_work,
1568 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1569 return;
1570 }
Eric Holmberg454d9da2012-01-12 09:37:14 -07001571 if (bam_is_connected) {
Eric Holmberg6074aba2012-01-18 17:59:44 -07001572 if (!ul_packet_written) {
1573 spin_lock(&bam_tx_pool_spinlock);
1574 if (!list_empty(&bam_tx_pool)) {
1575 struct tx_pkt_info *info;
1576
1577 info = list_first_entry(&bam_tx_pool,
1578 struct tx_pkt_info, list_node);
1579 DMUX_LOG_KERR("%s: UL delayed ts=%u.%09lu\n",
1580 __func__, info->ts_sec, info->ts_nsec);
1581 DBG_INC_TX_STALL_CNT();
1582 ul_packet_written = 1;
1583 }
1584 spin_unlock(&bam_tx_pool_spinlock);
1585 }
1586
Eric Holmbergbc9f21c2012-01-18 11:33:33 -07001587 if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
1588 bam_dmux_log("%s: pkt written %d\n",
1589 __func__, ul_packet_written);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001590 ul_packet_written = 0;
1591 schedule_delayed_work(&ul_timeout_work,
1592 msecs_to_jiffies(UL_TIMEOUT_DELAY));
Eric Holmberg006057d2012-01-11 10:10:42 -07001593 } else {
Eric Holmberg454d9da2012-01-12 09:37:14 -07001594 ul_powerdown();
Eric Holmberg006057d2012-01-11 10:10:42 -07001595 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001596 }
Jeff Hugoc040a5b2011-11-15 14:26:01 -07001597 write_unlock_irqrestore(&ul_wakeup_lock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001598 ul_powerdown_finish();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001599}
Jeff Hugo4838f412012-01-20 11:19:37 -07001600
1601static int ssrestart_check(void)
1602{
Eric Holmberg90285e22012-02-22 12:33:05 -07001603 DMUX_LOG_KERR("%s: modem timeout: BAM DMUX disabled\n", __func__);
1604 in_global_reset = 1;
1605 if (get_restart_level() <= RESET_SOC)
1606 DMUX_LOG_KERR("%s: ssrestart not enabled\n", __func__);
1607 return 1;
Jeff Hugo4838f412012-01-20 11:19:37 -07001608}
1609
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001610static void ul_wakeup(void)
1611{
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001612 int ret;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001613 int do_vote_dfab = 0;
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001614
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001615 mutex_lock(&wakeup_lock);
1616 if (bam_is_connected) { /* bam got connected before lock grabbed */
Eric Holmberg878923a2012-01-10 14:28:19 -07001617 bam_dmux_log("%s Already awake\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001618 mutex_unlock(&wakeup_lock);
1619 return;
1620 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001621
Jeff Hugoc2696142012-05-03 11:42:13 -06001622 /*
Jeff Hugo00424ff2012-08-27 13:19:09 -06001623 * if this gets hit, that means restart_notifier_cb() has started
1624 * but probably not finished, thus we know SSR has happened, but
1625 * haven't been able to send that info to our clients yet.
1626 * in that case, abort the ul_wakeup() so that we don't undo any
1627 * work restart_notifier_cb() has done. The clients will be notified
1628 * shortly. No cleanup necessary (reschedule the wakeup) as our and
1629 * their SSR handling will cover it
1630 */
1631 if (unlikely(in_global_reset == 1)) {
1632 mutex_unlock(&wakeup_lock);
1633 return;
1634 }
1635
1636 /*
Jeff Hugoc2696142012-05-03 11:42:13 -06001637 * if someone is voting for UL before bam is inited (modem up first
1638 * time), set flag for init to kickoff ul wakeup once bam is inited
1639 */
1640 mutex_lock(&delayed_ul_vote_lock);
1641 if (unlikely(!bam_mux_initialized)) {
1642 need_delayed_ul_vote = 1;
1643 mutex_unlock(&delayed_ul_vote_lock);
1644 mutex_unlock(&wakeup_lock);
1645 return;
1646 }
1647 mutex_unlock(&delayed_ul_vote_lock);
1648
Eric Holmberg006057d2012-01-11 10:10:42 -07001649 if (a2_pc_disabled) {
1650 /*
1651 * don't grab the wakelock the first time because it is
1652 * already grabbed when a2 powers on
1653 */
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001654 if (likely(a2_pc_disabled_wakelock_skipped)) {
Eric Holmberg006057d2012-01-11 10:10:42 -07001655 grab_wakelock();
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001656 do_vote_dfab = 1; /* vote must occur after wait */
1657 } else {
Jeff Hugo583a6da2012-02-03 11:37:30 -07001658 a2_pc_disabled_wakelock_skipped = 1;
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001659 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001660 if (wait_for_dfab) {
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001661 ret = wait_for_completion_timeout(
Eric Holmberg006057d2012-01-11 10:10:42 -07001662 &dfab_unvote_completion, HZ);
1663 BUG_ON(ret == 0);
1664 }
Jeff Hugo5f57ec92012-05-14 13:34:28 -06001665 if (likely(do_vote_dfab))
1666 vote_dfab();
Eric Holmberg006057d2012-01-11 10:10:42 -07001667 schedule_delayed_work(&ul_timeout_work,
1668 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1669 bam_is_connected = 1;
1670 mutex_unlock(&wakeup_lock);
1671 return;
1672 }
1673
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001674 /*
1675 * must wait for the previous power down request to have been acked
1676 * chances are it already came in and this will just fall through
1677 * instead of waiting
1678 */
1679 if (wait_for_ack) {
Eric Holmberg878923a2012-01-10 14:28:19 -07001680 bam_dmux_log("%s waiting for previous ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001681 ret = wait_for_completion_timeout(
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001682 &ul_wakeup_ack_completion, HZ);
Eric Holmberg006057d2012-01-11 10:10:42 -07001683 wait_for_ack = 0;
Jeff Hugo4838f412012-01-20 11:19:37 -07001684 if (unlikely(ret == 0) && ssrestart_check()) {
1685 mutex_unlock(&wakeup_lock);
1686 bam_dmux_log("%s timeout previous ack\n", __func__);
1687 return;
1688 }
Jeff Hugof6c1c1e2011-12-01 17:43:49 -07001689 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001690 INIT_COMPLETION(ul_wakeup_ack_completion);
Eric Holmberg878923a2012-01-10 14:28:19 -07001691 power_vote(1);
1692 bam_dmux_log("%s waiting for wakeup ack\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001693 ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001694 if (unlikely(ret == 0) && ssrestart_check()) {
1695 mutex_unlock(&wakeup_lock);
1696 bam_dmux_log("%s timeout wakeup ack\n", __func__);
1697 return;
1698 }
Eric Holmberg878923a2012-01-10 14:28:19 -07001699 bam_dmux_log("%s waiting completion\n", __func__);
Jeff Hugo66f7f1e2012-01-16 14:30:42 -07001700 ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
Jeff Hugo4838f412012-01-20 11:19:37 -07001701 if (unlikely(ret == 0) && ssrestart_check()) {
1702 mutex_unlock(&wakeup_lock);
1703 bam_dmux_log("%s timeout power on\n", __func__);
1704 return;
1705 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001706
1707 bam_is_connected = 1;
Eric Holmberg878923a2012-01-10 14:28:19 -07001708 bam_dmux_log("%s complete\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001709 schedule_delayed_work(&ul_timeout_work,
1710 msecs_to_jiffies(UL_TIMEOUT_DELAY));
1711 mutex_unlock(&wakeup_lock);
1712}
1713
1714static void reconnect_to_bam(void)
1715{
1716 int i;
1717
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001718 in_global_reset = 0;
Jeff Hugo73f356f2012-12-14 17:56:19 -07001719 in_ssr = 0;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001720 vote_dfab();
Jeff Hugo18792a32012-06-20 15:25:55 -06001721 if (!power_management_only_mode) {
Jeff Hugo73f356f2012-12-14 17:56:19 -07001722 if (ssr_skipped_disconnect) {
1723 /* delayed to here to prevent bus stall */
1724 sps_disconnect(bam_tx_pipe);
1725 sps_disconnect(bam_rx_pipe);
1726 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1727 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1728 }
1729 ssr_skipped_disconnect = 0;
Jeff Hugo18792a32012-06-20 15:25:55 -06001730 i = sps_device_reset(a2_device_handle);
1731 if (i)
1732 pr_err("%s: device reset failed rc = %d\n", __func__,
1733 i);
1734 i = sps_connect(bam_tx_pipe, &tx_connection);
1735 if (i)
1736 pr_err("%s: tx connection failed rc = %d\n", __func__,
1737 i);
1738 i = sps_connect(bam_rx_pipe, &rx_connection);
1739 if (i)
1740 pr_err("%s: rx connection failed rc = %d\n", __func__,
1741 i);
1742 i = sps_register_event(bam_tx_pipe, &tx_register_event);
1743 if (i)
1744 pr_err("%s: tx event reg failed rc = %d\n", __func__,
1745 i);
1746 i = sps_register_event(bam_rx_pipe, &rx_register_event);
1747 if (i)
1748 pr_err("%s: rx event reg failed rc = %d\n", __func__,
1749 i);
1750 }
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001751
1752 bam_connection_is_active = 1;
1753
1754 if (polling_mode)
1755 rx_switch_to_interrupt_mode();
1756
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001757 toggle_apps_ack();
1758 complete_all(&bam_connection_completion);
Jeff Hugo18792a32012-06-20 15:25:55 -06001759 if (!power_management_only_mode)
1760 queue_rx();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001761}
1762
1763static void disconnect_to_bam(void)
1764{
1765 struct list_head *node;
1766 struct rx_pkt_info *info;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001767 unsigned long flags;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001768
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001769 bam_connection_is_active = 0;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001770
1771 /* handle disconnect during active UL */
1772 write_lock_irqsave(&ul_wakeup_lock, flags);
1773 if (bam_is_connected) {
1774 bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
1775 ul_powerdown();
1776 }
1777 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1778 ul_powerdown_finish();
1779
1780 /* tear down BAM connection */
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001781 INIT_COMPLETION(bam_connection_completion);
Jeff Hugo73f356f2012-12-14 17:56:19 -07001782
1783 /* in_ssr documentation/assumptions found in restart_notifier_cb */
1784 if (!power_management_only_mode) {
1785 if (likely(!in_ssr)) {
1786 sps_disconnect(bam_tx_pipe);
1787 sps_disconnect(bam_rx_pipe);
1788 __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
1789 __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
1790 sps_device_reset(a2_device_handle);
1791 } else {
1792 ssr_skipped_disconnect = 1;
1793 }
1794 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001795 unvote_dfab();
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001796
1797 mutex_lock(&bam_rx_pool_mutexlock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001798 while (!list_empty(&bam_rx_pool)) {
1799 node = bam_rx_pool.next;
1800 list_del(node);
1801 info = container_of(node, struct rx_pkt_info, list_node);
1802 dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
1803 DMA_FROM_DEVICE);
1804 dev_kfree_skb_any(info->skb);
1805 kfree(info);
1806 }
Eric Holmbergb5b08e52012-01-20 14:19:00 -07001807 bam_rx_pool_len = 0;
Eric Holmberg8df0cdb2012-01-04 17:40:46 -07001808 mutex_unlock(&bam_rx_pool_mutexlock);
Eric Holmberg878923a2012-01-10 14:28:19 -07001809
Jeff Hugo0b13a352012-03-17 23:18:30 -06001810 if (disconnect_ack)
1811 toggle_apps_ack();
1812
Eric Holmberg878923a2012-01-10 14:28:19 -07001813 verify_tx_queue_is_empty(__func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001814}
1815
1816static void vote_dfab(void)
1817{
Jeff Hugoca0caa82011-12-05 16:05:23 -07001818 int rc;
1819
Eric Holmberg006057d2012-01-11 10:10:42 -07001820 bam_dmux_log("%s\n", __func__);
1821 mutex_lock(&dfab_status_lock);
1822 if (dfab_is_on) {
1823 bam_dmux_log("%s: dfab is already on\n", __func__);
1824 mutex_unlock(&dfab_status_lock);
1825 return;
1826 }
Jeff Hugo0c9371a2012-08-09 15:32:49 -06001827 if (dfab_clk) {
1828 rc = clk_prepare_enable(dfab_clk);
1829 if (rc)
1830 DMUX_LOG_KERR("bam_dmux vote for dfab failed rc = %d\n",
1831 rc);
1832 }
1833 if (xo_clk) {
1834 rc = clk_prepare_enable(xo_clk);
1835 if (rc)
1836 DMUX_LOG_KERR("bam_dmux vote for xo failed rc = %d\n",
1837 rc);
1838 }
Eric Holmberg006057d2012-01-11 10:10:42 -07001839 dfab_is_on = 1;
1840 mutex_unlock(&dfab_status_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001841}
1842
1843static void unvote_dfab(void)
1844{
Eric Holmberg006057d2012-01-11 10:10:42 -07001845 bam_dmux_log("%s\n", __func__);
1846 mutex_lock(&dfab_status_lock);
1847 if (!dfab_is_on) {
1848 DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
1849 dump_stack();
1850 mutex_unlock(&dfab_status_lock);
1851 return;
1852 }
Jeff Hugo0c9371a2012-08-09 15:32:49 -06001853 if (dfab_clk)
1854 clk_disable_unprepare(dfab_clk);
1855 if (xo_clk)
1856 clk_disable_unprepare(xo_clk);
Eric Holmberg006057d2012-01-11 10:10:42 -07001857 dfab_is_on = 0;
1858 mutex_unlock(&dfab_status_lock);
1859}
1860
1861/* reference counting wrapper around wakelock */
1862static void grab_wakelock(void)
1863{
1864 unsigned long flags;
1865
1866 spin_lock_irqsave(&wakelock_reference_lock, flags);
1867 bam_dmux_log("%s: ref count = %d\n", __func__,
1868 wakelock_reference_count);
1869 if (wakelock_reference_count == 0)
1870 wake_lock(&bam_wakelock);
1871 ++wakelock_reference_count;
1872 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1873}
1874
1875static void release_wakelock(void)
1876{
1877 unsigned long flags;
1878
1879 spin_lock_irqsave(&wakelock_reference_lock, flags);
1880 if (wakelock_reference_count == 0) {
1881 DMUX_LOG_KERR("%s: bam_dmux wakelock not locked\n", __func__);
1882 dump_stack();
1883 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
1884 return;
1885 }
1886 bam_dmux_log("%s: ref count = %d\n", __func__,
1887 wakelock_reference_count);
1888 --wakelock_reference_count;
1889 if (wakelock_reference_count == 0)
1890 wake_unlock(&bam_wakelock);
1891 spin_unlock_irqrestore(&wakelock_reference_lock, flags);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001892}
1893
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001894static int restart_notifier_cb(struct notifier_block *this,
1895 unsigned long code,
1896 void *data)
1897{
1898 int i;
1899 struct list_head *node;
1900 struct tx_pkt_info *info;
1901 int temp_remote_status;
Jeff Hugo626303bf2011-11-21 11:43:28 -07001902 unsigned long flags;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001903
Jeff Hugo73f356f2012-12-14 17:56:19 -07001904 /*
1905 * Bam_dmux counts on the fact that the BEFORE_SHUTDOWN level of
1906 * notifications are guarenteed to execute before the AFTER_SHUTDOWN
1907 * level of notifications, and that BEFORE_SHUTDOWN always occurs in
1908 * all SSR events, no matter what triggered the SSR. Also, bam_dmux
1909 * assumes that SMD does its SSR processing in the AFTER_SHUTDOWN level
1910 * thus bam_dmux is guarenteed to detect SSR before SMD, since the
1911 * callbacks for all the drivers within the AFTER_SHUTDOWN level could
1912 * occur in any order. Bam_dmux uses this knowledge to skip accessing
1913 * the bam hardware when disconnect_to_bam() is triggered by SMD's SSR
1914 * processing. We do not wat to access the bam hardware during SSR
1915 * because a watchdog crash from a bus stall would likely occur.
1916 */
Jeff Hugob6f72f12013-02-25 13:46:56 -07001917 if (code == SUBSYS_BEFORE_SHUTDOWN) {
1918 in_global_reset = 1;
Jeff Hugo73f356f2012-12-14 17:56:19 -07001919 in_ssr = 1;
Jeff Hugob6f72f12013-02-25 13:46:56 -07001920 bam_dmux_log("%s: begin\n", __func__);
1921 flush_workqueue(bam_mux_rx_workqueue);
1922 }
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001923 if (code != SUBSYS_AFTER_SHUTDOWN)
1924 return NOTIFY_DONE;
1925
Eric Holmberg454d9da2012-01-12 09:37:14 -07001926 /* Handle uplink Powerdown */
1927 write_lock_irqsave(&ul_wakeup_lock, flags);
1928 if (bam_is_connected) {
1929 ul_powerdown();
1930 wait_for_ack = 0;
1931 }
Jeff Hugo4838f412012-01-20 11:19:37 -07001932 /*
1933 * if modem crash during ul_wakeup(), power_vote is 1, needs to be
1934 * reset to 0. harmless if bam_is_connected check above passes
1935 */
1936 power_vote(0);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001937 write_unlock_irqrestore(&ul_wakeup_lock, flags);
1938 ul_powerdown_finish();
Eric Holmberg006057d2012-01-11 10:10:42 -07001939 a2_pc_disabled = 0;
Jeff Hugo583a6da2012-02-03 11:37:30 -07001940 a2_pc_disabled_wakelock_skipped = 0;
Jeff Hugof62029d2012-07-17 13:39:53 -06001941 disconnect_ack = 1;
Eric Holmberg454d9da2012-01-12 09:37:14 -07001942
1943 /* Cleanup Channel States */
Eric Holmberga623da82012-07-12 09:37:09 -06001944 mutex_lock(&bam_pdev_mutexlock);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001945 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
1946 temp_remote_status = bam_ch_is_remote_open(i);
1947 bam_ch[i].status &= ~BAM_CH_REMOTE_OPEN;
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -07001948 bam_ch[i].num_tx_pkts = 0;
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001949 if (bam_ch_is_local_open(i))
1950 bam_ch[i].status |= BAM_CH_IN_RESET;
1951 if (temp_remote_status) {
1952 platform_device_unregister(bam_ch[i].pdev);
1953 bam_ch[i].pdev = platform_device_alloc(
1954 bam_ch[i].name, 2);
1955 }
1956 }
Eric Holmberga623da82012-07-12 09:37:09 -06001957 mutex_unlock(&bam_pdev_mutexlock);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001958
1959 /* Cleanup pending UL data */
Jeff Hugo626303bf2011-11-21 11:43:28 -07001960 spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001961 while (!list_empty(&bam_tx_pool)) {
1962 node = bam_tx_pool.next;
1963 list_del(node);
1964 info = container_of(node, struct tx_pkt_info,
1965 list_node);
1966 if (!info->is_cmd) {
1967 dma_unmap_single(NULL, info->dma_address,
1968 info->skb->len,
1969 DMA_TO_DEVICE);
1970 dev_kfree_skb_any(info->skb);
1971 } else {
1972 dma_unmap_single(NULL, info->dma_address,
1973 info->len,
1974 DMA_TO_DEVICE);
1975 kfree(info->skb);
1976 }
1977 kfree(info);
1978 }
Jeff Hugo626303bf2011-11-21 11:43:28 -07001979 spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
Eric Holmberg454d9da2012-01-12 09:37:14 -07001980
Eric Holmberg878923a2012-01-10 14:28:19 -07001981 bam_dmux_log("%s: complete\n", __func__);
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06001982 return NOTIFY_DONE;
1983}
1984
Jeff Hugo9dea05c2011-12-21 12:23:05 -07001985static int bam_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001986{
1987 u32 h;
1988 dma_addr_t dma_addr;
1989 int ret;
1990 void *a2_virt_addr;
Jeff Hugo4b2890d2012-01-16 16:14:21 -07001991 int skip_iounmap = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001992
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06001993 vote_dfab();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994 /* init BAM */
Jeff Hugo7bf02052012-08-21 14:08:20 -06001995 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
1996 a2_phys_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 if (!a2_virt_addr) {
1998 pr_err("%s: ioremap failed\n", __func__);
1999 ret = -ENOMEM;
Jeff Hugo994a92d2012-01-05 13:25:21 -07002000 goto ioremap_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001 }
Jeff Hugo7bf02052012-08-21 14:08:20 -06002002 a2_props.phys_addr = (u32)(a2_phys_base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002004 a2_props.virt_size = a2_phys_size;
2005 a2_props.irq = a2_bam_irq;
Jeff Hugo927cba62011-11-11 11:49:52 -07002006 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002007 a2_props.num_pipes = A2_NUM_PIPES;
2008 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo0682dad2012-10-22 11:34:28 -06002009 if (cpu_is_msm9615() || satellite_mode)
Jeff Hugo75913c82011-12-05 15:59:01 -07002010 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002011 /* need to free on tear down */
2012 ret = sps_register_bam_device(&a2_props, &h);
2013 if (ret < 0) {
2014 pr_err("%s: register bam error %d\n", __func__, ret);
2015 goto register_bam_failed;
2016 }
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002017 a2_device_handle = h;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018
2019 bam_tx_pipe = sps_alloc_endpoint();
2020 if (bam_tx_pipe == NULL) {
2021 pr_err("%s: tx alloc endpoint failed\n", __func__);
2022 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002023 goto tx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 }
2025 ret = sps_get_config(bam_tx_pipe, &tx_connection);
2026 if (ret) {
2027 pr_err("%s: tx get config failed %d\n", __func__, ret);
2028 goto tx_get_config_failed;
2029 }
2030
2031 tx_connection.source = SPS_DEV_HANDLE_MEM;
2032 tx_connection.src_pipe_index = 0;
2033 tx_connection.destination = h;
2034 tx_connection.dest_pipe_index = 4;
2035 tx_connection.mode = SPS_MODE_DEST;
2036 tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
2037 tx_desc_mem_buf.size = 0x800; /* 2k */
2038 tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
2039 &dma_addr, 0);
2040 if (tx_desc_mem_buf.base == NULL) {
2041 pr_err("%s: tx memory alloc failed\n", __func__);
2042 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002043 goto tx_get_config_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 }
2045 tx_desc_mem_buf.phys_base = dma_addr;
2046 memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
2047 tx_connection.desc = tx_desc_mem_buf;
2048 tx_connection.event_thresh = 0x10;
2049
2050 ret = sps_connect(bam_tx_pipe, &tx_connection);
2051 if (ret < 0) {
2052 pr_err("%s: tx connect error %d\n", __func__, ret);
2053 goto tx_connect_failed;
2054 }
2055
2056 bam_rx_pipe = sps_alloc_endpoint();
2057 if (bam_rx_pipe == NULL) {
2058 pr_err("%s: rx alloc endpoint failed\n", __func__);
2059 ret = -ENOMEM;
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002060 goto rx_alloc_endpoint_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002061 }
2062 ret = sps_get_config(bam_rx_pipe, &rx_connection);
2063 if (ret) {
2064 pr_err("%s: rx get config failed %d\n", __func__, ret);
2065 goto rx_get_config_failed;
2066 }
2067
2068 rx_connection.source = h;
2069 rx_connection.src_pipe_index = 5;
2070 rx_connection.destination = SPS_DEV_HANDLE_MEM;
2071 rx_connection.dest_pipe_index = 1;
2072 rx_connection.mode = SPS_MODE_SRC;
Jeff Hugo949080a2011-08-30 11:58:56 -06002073 rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
2074 SPS_O_ACK_TRANSFERS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002075 rx_desc_mem_buf.size = 0x800; /* 2k */
2076 rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
2077 &dma_addr, 0);
2078 if (rx_desc_mem_buf.base == NULL) {
2079 pr_err("%s: rx memory alloc failed\n", __func__);
2080 ret = -ENOMEM;
2081 goto rx_mem_failed;
2082 }
2083 rx_desc_mem_buf.phys_base = dma_addr;
2084 memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
2085 rx_connection.desc = rx_desc_mem_buf;
2086 rx_connection.event_thresh = 0x10;
2087
2088 ret = sps_connect(bam_rx_pipe, &rx_connection);
2089 if (ret < 0) {
2090 pr_err("%s: rx connect error %d\n", __func__, ret);
2091 goto rx_connect_failed;
2092 }
2093
2094 tx_register_event.options = SPS_O_EOT;
2095 tx_register_event.mode = SPS_TRIGGER_CALLBACK;
2096 tx_register_event.xfer_done = NULL;
2097 tx_register_event.callback = bam_mux_tx_notify;
2098 tx_register_event.user = NULL;
2099 ret = sps_register_event(bam_tx_pipe, &tx_register_event);
2100 if (ret < 0) {
2101 pr_err("%s: tx register event error %d\n", __func__, ret);
2102 goto rx_event_reg_failed;
2103 }
2104
Jeff Hugo33dbc002011-08-25 15:52:53 -06002105 rx_register_event.options = SPS_O_EOT;
2106 rx_register_event.mode = SPS_TRIGGER_CALLBACK;
2107 rx_register_event.xfer_done = NULL;
2108 rx_register_event.callback = bam_mux_rx_notify;
2109 rx_register_event.user = NULL;
2110 ret = sps_register_event(bam_rx_pipe, &rx_register_event);
2111 if (ret < 0) {
2112 pr_err("%s: tx register event error %d\n", __func__, ret);
2113 goto rx_event_reg_failed;
2114 }
2115
Jeff Hugoc2696142012-05-03 11:42:13 -06002116 mutex_lock(&delayed_ul_vote_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117 bam_mux_initialized = 1;
Jeff Hugoc2696142012-05-03 11:42:13 -06002118 if (need_delayed_ul_vote) {
2119 need_delayed_ul_vote = 0;
2120 msm_bam_dmux_kickoff_ul_wakeup();
2121 }
2122 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002123 toggle_apps_ack();
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002124 bam_connection_is_active = 1;
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002125 complete_all(&bam_connection_completion);
Jeff Hugo2fb555e2012-03-14 16:33:47 -06002126 queue_rx();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002127 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128
2129rx_event_reg_failed:
2130 sps_disconnect(bam_rx_pipe);
2131rx_connect_failed:
2132 dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
2133 rx_desc_mem_buf.phys_base);
2134rx_mem_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002135rx_get_config_failed:
2136 sps_free_endpoint(bam_rx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002137rx_alloc_endpoint_failed:
2138 sps_disconnect(bam_tx_pipe);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002139tx_connect_failed:
2140 dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
2141 tx_desc_mem_buf.phys_base);
2142tx_get_config_failed:
2143 sps_free_endpoint(bam_tx_pipe);
Jeff Hugo8ff4a812012-01-17 11:03:13 -07002144tx_alloc_endpoint_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002145 sps_deregister_bam_device(h);
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002146 /*
2147 * sps_deregister_bam_device() calls iounmap. calling iounmap on the
2148 * same handle below will cause a crash, so skip it if we've freed
2149 * the handle here.
2150 */
2151 skip_iounmap = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002153 if (!skip_iounmap)
2154 iounmap(a2_virt_addr);
Jeff Hugo994a92d2012-01-05 13:25:21 -07002155ioremap_failed:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002156 /*destroy_workqueue(bam_mux_workqueue);*/
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002157 return ret;
2158}
2159
2160static int bam_init_fallback(void)
2161{
2162 u32 h;
2163 int ret;
2164 void *a2_virt_addr;
2165
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002166 /* init BAM */
Jeff Hugo7bf02052012-08-21 14:08:20 -06002167 a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
2168 a2_phys_size);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002169 if (!a2_virt_addr) {
2170 pr_err("%s: ioremap failed\n", __func__);
2171 ret = -ENOMEM;
2172 goto ioremap_failed;
2173 }
Jeff Hugo7bf02052012-08-21 14:08:20 -06002174 a2_props.phys_addr = (u32)(a2_phys_base);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002175 a2_props.virt_addr = a2_virt_addr;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002176 a2_props.virt_size = a2_phys_size;
2177 a2_props.irq = a2_bam_irq;
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002178 a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
2179 a2_props.num_pipes = A2_NUM_PIPES;
2180 a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
Jeff Hugo0682dad2012-10-22 11:34:28 -06002181 if (cpu_is_msm9615() || satellite_mode)
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002182 a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2183 ret = sps_register_bam_device(&a2_props, &h);
2184 if (ret < 0) {
2185 pr_err("%s: register bam error %d\n", __func__, ret);
2186 goto register_bam_failed;
2187 }
2188 a2_device_handle = h;
Jeff Hugoc2696142012-05-03 11:42:13 -06002189
2190 mutex_lock(&delayed_ul_vote_lock);
2191 bam_mux_initialized = 1;
2192 if (need_delayed_ul_vote) {
2193 need_delayed_ul_vote = 0;
2194 msm_bam_dmux_kickoff_ul_wakeup();
2195 }
2196 mutex_unlock(&delayed_ul_vote_lock);
Jeff Hugo2bec9772012-04-05 12:25:16 -06002197 toggle_apps_ack();
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002198
Jeff Hugo18792a32012-06-20 15:25:55 -06002199 power_management_only_mode = 1;
2200 bam_connection_is_active = 1;
2201 complete_all(&bam_connection_completion);
2202
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002203 return 0;
2204
2205register_bam_failed:
Jeff Hugo4b2890d2012-01-16 16:14:21 -07002206 iounmap(a2_virt_addr);
Jeff Hugo9dea05c2011-12-21 12:23:05 -07002207ioremap_failed:
2208 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209}
Jeff Hugoade1f842011-08-03 15:53:59 -06002210
Jeff Hugoa670b762012-03-15 15:58:28 -06002211static void msm9615_bam_init(void)
Eric Holmberg604ab252012-01-15 00:01:18 -07002212{
2213 int ret = 0;
2214
2215 ret = bam_init();
2216 if (ret) {
2217 ret = bam_init_fallback();
2218 if (ret)
2219 pr_err("%s: bam init fallback failed: %d",
2220 __func__, ret);
2221 }
2222}
2223
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002224static void toggle_apps_ack(void)
2225{
2226 static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
Eric Holmberg878923a2012-01-10 14:28:19 -07002227
2228 bam_dmux_log("%s: apps ack %d->%d\n", __func__,
2229 clear_bit & 0x1, ~clear_bit & 0x1);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002230 smsm_change_state(SMSM_APPS_STATE,
2231 clear_bit & SMSM_A2_POWER_CONTROL_ACK,
2232 ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
2233 clear_bit = ~clear_bit;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002234 DBG_INC_ACK_OUT_CNT();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002235}
2236
Jeff Hugoade1f842011-08-03 15:53:59 -06002237static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
2238{
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002239 static int last_processed_state;
2240
2241 mutex_lock(&smsm_cb_lock);
Eric Holmberg878923a2012-01-10 14:28:19 -07002242 bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002243 DBG_INC_A2_POWER_CONTROL_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002244 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2245 new_state);
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002246 if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
2247 bam_dmux_log("%s: already processed this state\n", __func__);
2248 mutex_unlock(&smsm_cb_lock);
2249 return;
2250 }
2251
2252 last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
Eric Holmberg878923a2012-01-10 14:28:19 -07002253
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002254 if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002255 bam_dmux_log("%s: reconnect\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002256 grab_wakelock();
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002257 reconnect_to_bam();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002258 } else if (bam_mux_initialized &&
2259 !(new_state & SMSM_A2_POWER_CONTROL)) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002260 bam_dmux_log("%s: disconnect\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002261 disconnect_to_bam();
Eric Holmberg006057d2012-01-11 10:10:42 -07002262 release_wakelock();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002263 } else if (new_state & SMSM_A2_POWER_CONTROL) {
Eric Holmberg878923a2012-01-10 14:28:19 -07002264 bam_dmux_log("%s: init\n", __func__);
Eric Holmberg006057d2012-01-11 10:10:42 -07002265 grab_wakelock();
Jeff Hugoa670b762012-03-15 15:58:28 -06002266 if (cpu_is_msm9615())
2267 msm9615_bam_init();
2268 else
Eric Holmberg604ab252012-01-15 00:01:18 -07002269 bam_init();
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002270 } else {
Eric Holmberg878923a2012-01-10 14:28:19 -07002271 bam_dmux_log("%s: bad state change\n", __func__);
Jeff Hugoade1f842011-08-03 15:53:59 -06002272 pr_err("%s: unsupported state change\n", __func__);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002273 }
Jeff Hugo4b7c7b32012-04-18 16:25:14 -06002274 mutex_unlock(&smsm_cb_lock);
Jeff Hugoade1f842011-08-03 15:53:59 -06002275
2276}
2277
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002278static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
2279 uint32_t new_state)
2280{
Eric Holmberg1f1255d2012-02-22 13:37:21 -07002281 DBG_INC_ACK_IN_CNT();
Eric Holmberg878923a2012-01-10 14:28:19 -07002282 bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
2283 new_state);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002284 complete_all(&ul_wakeup_ack_completion);
2285}
2286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287static int bam_dmux_probe(struct platform_device *pdev)
2288{
2289 int rc;
Jeff Hugo7bf02052012-08-21 14:08:20 -06002290 struct resource *r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002291
2292 DBG("%s probe called\n", __func__);
2293 if (bam_mux_initialized)
2294 return 0;
2295
Jeff Hugo7bf02052012-08-21 14:08:20 -06002296 if (pdev->dev.of_node) {
2297 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2298 if (!r) {
2299 pr_err("%s: reg field missing\n", __func__);
2300 return -ENODEV;
2301 }
2302 a2_phys_base = (void *)(r->start);
2303 a2_phys_size = (uint32_t)(resource_size(r));
2304 a2_bam_irq = platform_get_irq(pdev, 0);
2305 if (a2_bam_irq == -ENXIO) {
2306 pr_err("%s: irq field missing\n", __func__);
2307 return -ENODEV;
2308 }
Jeff Hugo0682dad2012-10-22 11:34:28 -06002309 satellite_mode = of_property_read_bool(pdev->dev.of_node,
2310 "qcom,satellite-mode");
2311
2312 DBG("%s: base:%p size:%x irq:%d satellite:%d\n", __func__,
Jeff Hugo7bf02052012-08-21 14:08:20 -06002313 a2_phys_base,
2314 a2_phys_size,
Jeff Hugo0682dad2012-10-22 11:34:28 -06002315 a2_bam_irq,
2316 satellite_mode);
Jeff Hugo7bf02052012-08-21 14:08:20 -06002317 } else { /* fallback to default init data */
2318 a2_phys_base = (void *)(A2_PHYS_BASE);
2319 a2_phys_size = A2_PHYS_SIZE;
2320 a2_bam_irq = A2_BAM_IRQ;
2321 }
2322
Stephen Boyd69d35e32012-02-14 15:33:30 -08002323 xo_clk = clk_get(&pdev->dev, "xo");
2324 if (IS_ERR(xo_clk)) {
Jeff Hugo0c9371a2012-08-09 15:32:49 -06002325 bam_dmux_log("%s: did not get xo clock\n", __func__);
2326 xo_clk = NULL;
Stephen Boyd69d35e32012-02-14 15:33:30 -08002327 }
Stephen Boyd1c51a492011-10-26 12:11:47 -07002328 dfab_clk = clk_get(&pdev->dev, "bus_clk");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002329 if (IS_ERR(dfab_clk)) {
Jeff Hugo0c9371a2012-08-09 15:32:49 -06002330 bam_dmux_log("%s: did not get dfab clock\n", __func__);
2331 dfab_clk = NULL;
2332 } else {
2333 rc = clk_set_rate(dfab_clk, 64000000);
2334 if (rc)
2335 pr_err("%s: unable to set dfab clock rate\n", __func__);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002336 }
2337
Jeff Hugofff43af92012-03-29 17:54:52 -06002338 /*
2339 * setup the workqueue so that it can be pinned to core 0 and not
2340 * block the watchdog pet function, so that netif_rx() in rmnet
2341 * only uses one queue.
2342 */
2343 bam_mux_rx_workqueue = alloc_workqueue("bam_dmux_rx",
2344 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002345 if (!bam_mux_rx_workqueue)
2346 return -ENOMEM;
2347
2348 bam_mux_tx_workqueue = create_singlethread_workqueue("bam_dmux_tx");
2349 if (!bam_mux_tx_workqueue) {
2350 destroy_workqueue(bam_mux_rx_workqueue);
2351 return -ENOMEM;
2352 }
2353
Jeff Hugo7960abd2011-08-02 15:39:38 -06002354 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355 spin_lock_init(&bam_ch[rc].lock);
Jeff Hugo7960abd2011-08-02 15:39:38 -06002356 scnprintf(bam_ch[rc].name, BAM_DMUX_CH_NAME_MAX_LEN,
2357 "bam_dmux_ch_%d", rc);
2358 /* bus 2, ie a2 stream 2 */
2359 bam_ch[rc].pdev = platform_device_alloc(bam_ch[rc].name, 2);
2360 if (!bam_ch[rc].pdev) {
2361 pr_err("%s: platform device alloc failed\n", __func__);
2362 destroy_workqueue(bam_mux_rx_workqueue);
2363 destroy_workqueue(bam_mux_tx_workqueue);
2364 return -ENOMEM;
2365 }
2366 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002367
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002368 init_completion(&ul_wakeup_ack_completion);
2369 init_completion(&bam_connection_completion);
Eric Holmberg006057d2012-01-11 10:10:42 -07002370 init_completion(&dfab_unvote_completion);
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002371 INIT_DELAYED_WORK(&ul_timeout_work, ul_timeout);
Jeff Hugoad75d8d2012-10-03 15:53:54 -06002372 INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
Jeff Hugoae3a85e2011-12-02 17:10:18 -07002373 wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002374
Jeff Hugoade1f842011-08-03 15:53:59 -06002375 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
2376 bam_dmux_smsm_cb, NULL);
2377
2378 if (rc) {
2379 destroy_workqueue(bam_mux_rx_workqueue);
2380 destroy_workqueue(bam_mux_tx_workqueue);
2381 pr_err("%s: smsm cb register failed, rc: %d\n", __func__, rc);
2382 return -ENOMEM;
2383 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002384
Jeff Hugoaab7ebc2011-09-07 16:46:04 -06002385 rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
2386 bam_dmux_smsm_ack_cb, NULL);
2387
2388 if (rc) {
2389 destroy_workqueue(bam_mux_rx_workqueue);
2390 destroy_workqueue(bam_mux_tx_workqueue);
2391 smsm_state_cb_deregister(SMSM_MODEM_STATE,
2392 SMSM_A2_POWER_CONTROL,
2393 bam_dmux_smsm_cb, NULL);
2394 pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
2395 rc);
2396 for (rc = 0; rc < BAM_DMUX_NUM_CHANNELS; ++rc)
2397 platform_device_put(bam_ch[rc].pdev);
2398 return -ENOMEM;
2399 }
2400
Eric Holmbergfd1e2ae2011-11-15 18:28:17 -07002401 if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
2402 bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
2403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 return 0;
2405}
2406
Jeff Hugo7bf02052012-08-21 14:08:20 -06002407static struct of_device_id msm_match_table[] = {
2408 {.compatible = "qcom,bam_dmux"},
2409 {},
2410};
2411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002412static struct platform_driver bam_dmux_driver = {
2413 .probe = bam_dmux_probe,
2414 .driver = {
2415 .name = "BAM_RMNT",
2416 .owner = THIS_MODULE,
Jeff Hugo7bf02052012-08-21 14:08:20 -06002417 .of_match_table = msm_match_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002418 },
2419};
2420
2421static int __init bam_dmux_init(void)
2422{
2423#ifdef CONFIG_DEBUG_FS
2424 struct dentry *dent;
2425
2426 dent = debugfs_create_dir("bam_dmux", 0);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002427 if (!IS_ERR(dent)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002428 debug_create("tbl", 0444, dent, debug_tbl);
Eric Holmberg2fddbcd2011-11-28 18:25:57 -07002429 debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
2430 debug_create("stats", 0444, dent, debug_stats);
2431 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002432#endif
Zaheerulla Meeraa9fd5c2013-01-31 17:06:44 +05302433
2434 bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux");
2435 if (!bam_ipc_log_txt) {
2436 pr_err("%s : unable to create IPC Logging Context", __func__);
Eric Holmberg878923a2012-01-10 14:28:19 -07002437 bam_dmux_state_logging_disabled = 1;
2438 }
2439
Anurag Singhdcd8b4e2012-07-30 16:46:37 -07002440 rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
2441
Jeff Hugo6e7a92a2011-10-24 05:25:13 -06002442 subsys_notif_register_notifier("modem", &restart_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002443 return platform_driver_register(&bam_dmux_driver);
2444}
2445
Jeff Hugoade1f842011-08-03 15:53:59 -06002446late_initcall(bam_dmux_init); /* needs to init after SMD */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002447MODULE_DESCRIPTION("MSM BAM DMUX");
2448MODULE_LICENSE("GPL v2");