blob: 48ca6b7a67dbce19af04724e72e9435d0098849f [file] [log] [blame]
Zaheerulla Meer78458a02013-01-02 16:02:49 +05301/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define DEBUG
14
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/fs.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/sched.h>
20#include <linux/spinlock.h>
21#include <linux/mutex.h>
22#include <linux/uaccess.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/termios.h>
27#include <linux/debugfs.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070028#include <linux/moduleparam.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include <mach/sdio_al.h>
31#include <mach/sdio_cmux.h>
32
33#include "modem_notifier.h"
34
35#define MAX_WRITE_RETRY 5
36#define MAGIC_NO_V1 0x33FC
37
38static int msm_sdio_cmux_debug_mask;
39module_param_named(debug_mask, msm_sdio_cmux_debug_mask,
40 int, S_IRUGO | S_IWUSR | S_IWGRP);
41
42enum cmd_type {
43 DATA = 0,
44 OPEN,
45 CLOSE,
46 STATUS,
47 NUM_CMDS
48};
49
50#define DSR_POS 0x1
51#define CTS_POS 0x2
52#define RI_POS 0x4
53#define CD_POS 0x8
54
55struct sdio_cmux_ch {
56 int lc_id;
57
58 struct mutex lc_lock;
59 wait_queue_head_t open_wait_queue;
60 int is_remote_open;
61 int is_local_open;
62 int is_channel_reset;
63
64 char local_status;
65 char remote_status;
66
67 struct mutex tx_lock;
68 struct list_head tx_list;
69
70 void *priv;
Zaheerulla Meer78458a02013-01-02 16:02:49 +053071 struct mutex rx_cb_lock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 void (*receive_cb)(void *, int, void *);
73 void (*write_done)(void *, int, void *);
74 void (*status_callback)(int, void *);
75} logical_ch[SDIO_CMUX_NUM_CHANNELS];
76
77struct sdio_cmux_hdr {
78 uint16_t magic_no;
79 uint8_t status; /* This field is reserved for commands other
80 * than STATUS */
81 uint8_t cmd;
82 uint8_t pad_bytes;
83 uint8_t lc_id;
84 uint16_t pkt_len;
85};
86
87struct sdio_cmux_pkt {
88 struct sdio_cmux_hdr *hdr;
89 void *data;
90};
91
92struct sdio_cmux_list_elem {
93 struct list_head list;
94 struct sdio_cmux_pkt cmux_pkt;
95};
96
97#define logical_ch_is_local_open(x) \
98 (logical_ch[(x)].is_local_open)
99
100#define logical_ch_is_remote_open(x) \
101 (logical_ch[(x)].is_remote_open)
102
103static void sdio_cdemux_fn(struct work_struct *work);
104static DECLARE_WORK(sdio_cdemux_work, sdio_cdemux_fn);
105static struct workqueue_struct *sdio_cdemux_wq;
106
107static DEFINE_MUTEX(write_lock);
108static uint32_t bytes_to_write;
109static DEFINE_MUTEX(temp_rx_lock);
110static LIST_HEAD(temp_rx_list);
111
112static void sdio_cmux_fn(struct work_struct *work);
113static DECLARE_WORK(sdio_cmux_work, sdio_cmux_fn);
114static struct workqueue_struct *sdio_cmux_wq;
115
116static struct sdio_channel *sdio_qmi_chl;
117static uint32_t sdio_cmux_inited;
118
119static uint32_t abort_tx;
120static DEFINE_MUTEX(modem_reset_lock);
121
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600122static DEFINE_MUTEX(probe_lock);
123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124enum {
125 MSM_SDIO_CMUX_DEBUG = 1U << 0,
126 MSM_SDIO_CMUX_DUMP_BUFFER = 1U << 1,
127};
128
129static struct platform_device sdio_ctl_dev = {
130 .name = "SDIO_CTL",
131 .id = -1,
132};
133
134#if defined(DEBUG)
135#define D_DUMP_BUFFER(prestr, cnt, buf) \
136do { \
137 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DUMP_BUFFER) { \
138 int i; \
139 pr_debug("%s", prestr); \
140 for (i = 0; i < cnt; i++) \
141 pr_info("%.2x", buf[i]); \
142 pr_debug("\n"); \
143 } \
144} while (0)
145
146#define D(x...) \
147do { \
148 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DEBUG) \
149 pr_debug(x); \
150} while (0)
151
152#else
153#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
154#define D(x...) do {} while (0)
155#endif
156
157static int sdio_cmux_ch_alloc(int id)
158{
159 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
160 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
161 return -EINVAL;
162 }
163
164 logical_ch[id].lc_id = id;
165 mutex_init(&logical_ch[id].lc_lock);
166 init_waitqueue_head(&logical_ch[id].open_wait_queue);
167 logical_ch[id].is_remote_open = 0;
168 logical_ch[id].is_local_open = 0;
169 logical_ch[id].is_channel_reset = 0;
170
171 INIT_LIST_HEAD(&logical_ch[id].tx_list);
172 mutex_init(&logical_ch[id].tx_lock);
173
174 logical_ch[id].priv = NULL;
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530175 mutex_init(&logical_ch[id].rx_cb_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 logical_ch[id].receive_cb = NULL;
177 logical_ch[id].write_done = NULL;
178 return 0;
179}
180
181static int sdio_cmux_ch_clear_and_signal(int id)
182{
183 struct sdio_cmux_list_elem *list_elem;
184
185 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
186 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
187 return -EINVAL;
188 }
189
190 mutex_lock(&logical_ch[id].lc_lock);
191 logical_ch[id].is_remote_open = 0;
192 mutex_lock(&logical_ch[id].tx_lock);
193 while (!list_empty(&logical_ch[id].tx_list)) {
194 list_elem = list_first_entry(&logical_ch[id].tx_list,
195 struct sdio_cmux_list_elem,
196 list);
197 list_del(&list_elem->list);
198 kfree(list_elem->cmux_pkt.hdr);
199 kfree(list_elem);
200 }
201 mutex_unlock(&logical_ch[id].tx_lock);
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530202 mutex_lock(&logical_ch[id].rx_cb_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 if (logical_ch[id].receive_cb)
204 logical_ch[id].receive_cb(NULL, 0, logical_ch[id].priv);
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530205 mutex_unlock(&logical_ch[id].rx_cb_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206 if (logical_ch[id].write_done)
207 logical_ch[id].write_done(NULL, 0, logical_ch[id].priv);
208 mutex_unlock(&logical_ch[id].lc_lock);
209 wake_up(&logical_ch[id].open_wait_queue);
210 return 0;
211}
212
213static int sdio_cmux_write_cmd(const int id, enum cmd_type type)
214{
215 int write_size = 0;
216 void *write_data = NULL;
217 struct sdio_cmux_list_elem *list_elem;
218
219 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
220 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
221 return -EINVAL;
222 }
223
224 if (type < 0 || type > NUM_CMDS) {
225 pr_err("%s: Invalid cmd - %d\n", __func__, type);
226 return -EINVAL;
227 }
228
229 write_size = sizeof(struct sdio_cmux_hdr);
230 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
231 if (!list_elem) {
232 pr_err("%s: list_elem alloc failed\n", __func__);
233 return -ENOMEM;
234 }
235
236 write_data = kmalloc(write_size, GFP_KERNEL);
237 if (!write_data) {
238 pr_err("%s: write_data alloc failed\n", __func__);
239 kfree(list_elem);
240 return -ENOMEM;
241 }
242
243 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
244 list_elem->cmux_pkt.data = NULL;
245
246 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)id;
247 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)0;
248 list_elem->cmux_pkt.hdr->cmd = (uint8_t)type;
249 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
250 if (type == STATUS)
251 list_elem->cmux_pkt.hdr->status = logical_ch[id].local_status;
252 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
253 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
254
255 mutex_lock(&logical_ch[id].tx_lock);
256 list_add_tail(&list_elem->list, &logical_ch[id].tx_list);
257 mutex_unlock(&logical_ch[id].tx_lock);
258
259 mutex_lock(&write_lock);
260 bytes_to_write += write_size;
261 mutex_unlock(&write_lock);
262 queue_work(sdio_cmux_wq, &sdio_cmux_work);
263
264 return 0;
265}
266
267int sdio_cmux_open(const int id,
268 void (*receive_cb)(void *, int, void *),
269 void (*write_done)(void *, int, void *),
270 void (*status_callback)(int, void *),
271 void *priv)
272{
273 int r;
274 struct sdio_cmux_list_elem *list_elem, *list_elem_tmp;
275
276 if (!sdio_cmux_inited)
277 return -ENODEV;
278 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
279 pr_err("%s: Invalid id - %d\n", __func__, id);
280 return -EINVAL;
281 }
282
283 r = wait_event_timeout(logical_ch[id].open_wait_queue,
284 logical_ch[id].is_remote_open, (1 * HZ));
285 if (r < 0) {
286 pr_err("ERROR %s: wait_event_timeout() failed for"
287 " ch%d with rc %d\n", __func__, id, r);
288 return r;
289 }
290 if (r == 0) {
291 pr_err("ERROR %s: Wait Timed Out for ch%d\n", __func__, id);
292 return -ETIMEDOUT;
293 }
294
295 mutex_lock(&logical_ch[id].lc_lock);
296 if (!logical_ch[id].is_remote_open) {
297 pr_err("%s: Remote ch%d not opened\n", __func__, id);
298 mutex_unlock(&logical_ch[id].lc_lock);
299 return -EINVAL;
300 }
301 if (logical_ch[id].is_local_open) {
302 mutex_unlock(&logical_ch[id].lc_lock);
303 return 0;
304 }
305 logical_ch[id].is_local_open = 1;
306 logical_ch[id].priv = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307 logical_ch[id].write_done = write_done;
308 logical_ch[id].status_callback = status_callback;
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530309 mutex_lock(&logical_ch[id].rx_cb_lock);
310 logical_ch[id].receive_cb = receive_cb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 if (logical_ch[id].receive_cb) {
312 mutex_lock(&temp_rx_lock);
313 list_for_each_entry_safe(list_elem, list_elem_tmp,
314 &temp_rx_list, list) {
315 if ((int)list_elem->cmux_pkt.hdr->lc_id == id) {
316 logical_ch[id].receive_cb(
317 list_elem->cmux_pkt.data,
318 (int)list_elem->cmux_pkt.hdr->pkt_len,
319 logical_ch[id].priv);
320 list_del(&list_elem->list);
321 kfree(list_elem->cmux_pkt.hdr);
322 kfree(list_elem);
323 }
324 }
325 mutex_unlock(&temp_rx_lock);
326 }
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530327 mutex_unlock(&logical_ch[id].rx_cb_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 mutex_unlock(&logical_ch[id].lc_lock);
329 sdio_cmux_write_cmd(id, OPEN);
330 return 0;
331}
332EXPORT_SYMBOL(sdio_cmux_open);
333
334int sdio_cmux_close(int id)
335{
336 struct sdio_cmux_ch *ch;
337
338 if (!sdio_cmux_inited)
339 return -ENODEV;
340 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
341 pr_err("%s: Invalid channel close\n", __func__);
342 return -EINVAL;
343 }
344
345 ch = &logical_ch[id];
346 mutex_lock(&ch->lc_lock);
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530347 mutex_lock(&logical_ch[id].rx_cb_lock);
Karthikeyan Ramasubramanian90f76682011-10-17 12:19:29 -0600348 ch->receive_cb = NULL;
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530349 mutex_unlock(&logical_ch[id].rx_cb_lock);
Karthikeyan Ramasubramanian90f76682011-10-17 12:19:29 -0600350 mutex_lock(&ch->tx_lock);
351 ch->write_done = NULL;
352 mutex_unlock(&ch->tx_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 ch->is_local_open = 0;
354 ch->priv = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 mutex_unlock(&ch->lc_lock);
356 sdio_cmux_write_cmd(ch->lc_id, CLOSE);
357 return 0;
358}
359EXPORT_SYMBOL(sdio_cmux_close);
360
361int sdio_cmux_write_avail(int id)
362{
363 int write_avail;
364
365 mutex_lock(&logical_ch[id].lc_lock);
366 if (logical_ch[id].is_channel_reset) {
367 mutex_unlock(&logical_ch[id].lc_lock);
368 return -ENETRESET;
369 }
370 mutex_unlock(&logical_ch[id].lc_lock);
371 write_avail = sdio_write_avail(sdio_qmi_chl);
372 return write_avail - bytes_to_write;
373}
374EXPORT_SYMBOL(sdio_cmux_write_avail);
375
376int sdio_cmux_write(int id, void *data, int len)
377{
378 struct sdio_cmux_list_elem *list_elem;
379 uint32_t write_size;
380 void *write_data = NULL;
381 struct sdio_cmux_ch *ch;
382 int ret;
383
384 if (!sdio_cmux_inited)
385 return -ENODEV;
386 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
387 pr_err("%s: Invalid channel id %d\n", __func__, id);
388 return -ENODEV;
389 }
390
391 ch = &logical_ch[id];
392 if (len <= 0) {
393 pr_err("%s: Invalid len %d bytes to write\n",
394 __func__, len);
395 return -EINVAL;
396 }
397
398 write_size = sizeof(struct sdio_cmux_hdr) + len;
399 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
400 if (!list_elem) {
401 pr_err("%s: list_elem alloc failed\n", __func__);
402 return -ENOMEM;
403 }
404
405 write_data = kmalloc(write_size, GFP_KERNEL);
406 if (!write_data) {
407 pr_err("%s: write_data alloc failed\n", __func__);
408 kfree(list_elem);
409 return -ENOMEM;
410 }
411
412 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
413 list_elem->cmux_pkt.data = (void *)((char *)write_data +
414 sizeof(struct sdio_cmux_hdr));
415 memcpy(list_elem->cmux_pkt.data, data, len);
416
417 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)ch->lc_id;
418 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)len;
419 list_elem->cmux_pkt.hdr->cmd = (uint8_t)DATA;
420 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
421 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
422 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
423
424 mutex_lock(&ch->lc_lock);
425 if (!ch->is_remote_open || !ch->is_local_open) {
426 pr_err("%s: Local ch%d sending data before sending/receiving"
427 " OPEN command\n", __func__, ch->lc_id);
428 if (ch->is_channel_reset)
429 ret = -ENETRESET;
430 else
431 ret = -ENODEV;
432 mutex_unlock(&ch->lc_lock);
433 kfree(write_data);
434 kfree(list_elem);
435 return ret;
436 }
437 mutex_lock(&ch->tx_lock);
438 list_add_tail(&list_elem->list, &ch->tx_list);
439 mutex_unlock(&ch->tx_lock);
440 mutex_unlock(&ch->lc_lock);
441
442 mutex_lock(&write_lock);
443 bytes_to_write += write_size;
444 mutex_unlock(&write_lock);
445 queue_work(sdio_cmux_wq, &sdio_cmux_work);
446
447 return len;
448}
449EXPORT_SYMBOL(sdio_cmux_write);
450
451int is_remote_open(int id)
452{
453 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
454 return -ENODEV;
455
456 return logical_ch_is_remote_open(id);
457}
458EXPORT_SYMBOL(is_remote_open);
459
460int sdio_cmux_is_channel_reset(int id)
461{
462 int ret;
463 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
464 return -ENODEV;
465
466 mutex_lock(&logical_ch[id].lc_lock);
467 ret = logical_ch[id].is_channel_reset;
468 mutex_unlock(&logical_ch[id].lc_lock);
469 return ret;
470}
471EXPORT_SYMBOL(sdio_cmux_is_channel_reset);
472
473int sdio_cmux_tiocmget(int id)
474{
475 int ret = (logical_ch[id].remote_status & DSR_POS ? TIOCM_DSR : 0) |
476 (logical_ch[id].remote_status & CTS_POS ? TIOCM_CTS : 0) |
477 (logical_ch[id].remote_status & CD_POS ? TIOCM_CD : 0) |
478 (logical_ch[id].remote_status & RI_POS ? TIOCM_RI : 0) |
479 (logical_ch[id].local_status & CTS_POS ? TIOCM_RTS : 0) |
480 (logical_ch[id].local_status & DSR_POS ? TIOCM_DTR : 0);
481 return ret;
482}
483EXPORT_SYMBOL(sdio_cmux_tiocmget);
484
485int sdio_cmux_tiocmset(int id, unsigned int set, unsigned int clear)
486{
487 if (set & TIOCM_DTR)
488 logical_ch[id].local_status |= DSR_POS;
489
490 if (set & TIOCM_RTS)
491 logical_ch[id].local_status |= CTS_POS;
492
493 if (clear & TIOCM_DTR)
494 logical_ch[id].local_status &= ~DSR_POS;
495
496 if (clear & TIOCM_RTS)
497 logical_ch[id].local_status &= ~CTS_POS;
498
499 sdio_cmux_write_cmd(id, STATUS);
500 return 0;
501}
502EXPORT_SYMBOL(sdio_cmux_tiocmset);
503
504static int copy_packet(void *pkt, int size)
505{
506 struct sdio_cmux_list_elem *list_elem = NULL;
507 void *temp_pkt = NULL;
508
509 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
510 if (!list_elem) {
511 pr_err("%s: list_elem alloc failed\n", __func__);
512 return -ENOMEM;
513 }
514 temp_pkt = kmalloc(size, GFP_KERNEL);
515 if (!temp_pkt) {
516 pr_err("%s: temp_pkt alloc failed\n", __func__);
517 kfree(list_elem);
518 return -ENOMEM;
519 }
520
521 memcpy(temp_pkt, pkt, size);
522 list_elem->cmux_pkt.hdr = temp_pkt;
523 list_elem->cmux_pkt.data = (void *)((char *)temp_pkt +
524 sizeof(struct sdio_cmux_hdr));
525 mutex_lock(&temp_rx_lock);
526 list_add_tail(&list_elem->list, &temp_rx_list);
527 mutex_unlock(&temp_rx_lock);
528 return 0;
529}
530
531static int process_cmux_pkt(void *pkt, int size)
532{
533 struct sdio_cmux_hdr *mux_hdr;
534 uint32_t id, data_size;
535 void *data;
536 char *dump_buf = (char *)pkt;
537
538 D_DUMP_BUFFER("process_cmux_pkt:", size, dump_buf);
539 mux_hdr = (struct sdio_cmux_hdr *)pkt;
540 switch (mux_hdr->cmd) {
541 case OPEN:
542 id = (uint32_t)(mux_hdr->lc_id);
543 D("%s: Received OPEN command for ch%d\n", __func__, id);
544 mutex_lock(&logical_ch[id].lc_lock);
545 logical_ch[id].is_remote_open = 1;
Karthikeyan Ramasubramanian550be3d2011-09-12 16:55:36 -0600546 if (logical_ch[id].is_channel_reset) {
547 sdio_cmux_write_cmd(id, OPEN);
548 logical_ch[id].is_channel_reset = 0;
549 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 mutex_unlock(&logical_ch[id].lc_lock);
551 wake_up(&logical_ch[id].open_wait_queue);
552 break;
553
554 case CLOSE:
555 id = (uint32_t)(mux_hdr->lc_id);
556 D("%s: Received CLOSE command for ch%d\n", __func__, id);
557 sdio_cmux_ch_clear_and_signal(id);
558 break;
559
560 case DATA:
561 id = (uint32_t)(mux_hdr->lc_id);
562 D("%s: Received DATA for ch%d\n", __func__, id);
563 /*Channel is not locally open & if single packet received
564 then drop it*/
565 mutex_lock(&logical_ch[id].lc_lock);
566 if (!logical_ch[id].is_remote_open) {
567 mutex_unlock(&logical_ch[id].lc_lock);
568 pr_err("%s: Remote Ch%d sent data before sending/"
569 "receiving OPEN command\n", __func__, id);
570 return -ENODEV;
571 }
572
573 data = (void *)((char *)pkt + sizeof(struct sdio_cmux_hdr));
574 data_size = (int)(((struct sdio_cmux_hdr *)pkt)->pkt_len);
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530575 mutex_unlock(&logical_ch[id].lc_lock);
576 /*
577 * The lc_lock is released before the call to receive_cb
578 * to avoid a dead lock where in the receive_cb would call a
579 * function that tries to acquire a rx_lock which is already
580 * acquired by a Thread that is waiting on lc_lock.
581 */
582 mutex_lock(&logical_ch[id].rx_cb_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583 if (logical_ch[id].receive_cb)
584 logical_ch[id].receive_cb(data, data_size,
585 logical_ch[id].priv);
586 else
587 copy_packet(pkt, size);
Zaheerulla Meer78458a02013-01-02 16:02:49 +0530588 mutex_unlock(&logical_ch[id].rx_cb_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 break;
590
591 case STATUS:
592 id = (uint32_t)(mux_hdr->lc_id);
593 D("%s: Received STATUS command for ch%d\n", __func__, id);
594 if (logical_ch[id].remote_status != mux_hdr->status) {
595 mutex_lock(&logical_ch[id].lc_lock);
596 logical_ch[id].remote_status = mux_hdr->status;
597 mutex_unlock(&logical_ch[id].lc_lock);
598 if (logical_ch[id].status_callback)
599 logical_ch[id].status_callback(
600 sdio_cmux_tiocmget(id),
601 logical_ch[id].priv);
602 }
603 break;
604 }
605 return 0;
606}
607
608static void parse_cmux_data(void *data, int size)
609{
610 int data_parsed = 0, pkt_size;
611 char *temp_ptr;
612
613 D("Entered %s\n", __func__);
614 temp_ptr = (char *)data;
615 while (data_parsed < size) {
616 pkt_size = sizeof(struct sdio_cmux_hdr) +
617 (int)(((struct sdio_cmux_hdr *)temp_ptr)->pkt_len);
618 D("Parsed %d bytes, Current Pkt Size %d bytes,"
619 " Total size %d bytes\n", data_parsed, pkt_size, size);
620 process_cmux_pkt((void *)temp_ptr, pkt_size);
621 data_parsed += pkt_size;
622 temp_ptr += pkt_size;
623 }
624
625 kfree(data);
626}
627
628static void sdio_cdemux_fn(struct work_struct *work)
629{
630 int r = 0, read_avail = 0;
631 void *cmux_data;
632
633 while (1) {
634 read_avail = sdio_read_avail(sdio_qmi_chl);
635 if (read_avail < 0) {
636 pr_err("%s: sdio_read_avail failed with rc %d\n",
637 __func__, read_avail);
638 return;
639 }
640
641 if (read_avail == 0) {
642 D("%s: Nothing to read\n", __func__);
643 return;
644 }
645
646 D("%s: kmalloc %d bytes\n", __func__, read_avail);
647 cmux_data = kmalloc(read_avail, GFP_KERNEL);
648 if (!cmux_data) {
649 pr_err("%s: kmalloc Failed\n", __func__);
650 return;
651 }
652
653 D("%s: sdio_read %d bytes\n", __func__, read_avail);
654 r = sdio_read(sdio_qmi_chl, cmux_data, read_avail);
655 if (r < 0) {
656 pr_err("%s: sdio_read failed with rc %d\n",
657 __func__, r);
658 kfree(cmux_data);
659 return;
660 }
661
662 parse_cmux_data(cmux_data, read_avail);
663 }
664 return;
665}
666
667static void sdio_cmux_fn(struct work_struct *work)
668{
669 int i, r = 0;
670 void *write_data;
671 uint32_t write_size, write_avail, write_retry = 0;
672 int bytes_written;
673 struct sdio_cmux_list_elem *list_elem = NULL;
674 struct sdio_cmux_ch *ch;
675
676 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
677 ch = &logical_ch[i];
678 bytes_written = 0;
679 mutex_lock(&ch->tx_lock);
680 while (!list_empty(&ch->tx_list)) {
681 list_elem = list_first_entry(&ch->tx_list,
682 struct sdio_cmux_list_elem,
683 list);
684 list_del(&list_elem->list);
685 mutex_unlock(&ch->tx_lock);
686
687 write_data = (void *)list_elem->cmux_pkt.hdr;
688 write_size = sizeof(struct sdio_cmux_hdr) +
689 (uint32_t)list_elem->cmux_pkt.hdr->pkt_len;
690
691 mutex_lock(&modem_reset_lock);
692 while (!(abort_tx) &&
693 ((write_avail = sdio_write_avail(sdio_qmi_chl))
694 < write_size)) {
695 mutex_unlock(&modem_reset_lock);
696 pr_err("%s: sdio_write_avail %d bytes, "
697 "write size %d bytes. Waiting...\n",
698 __func__, write_avail, write_size);
699 msleep(250);
700 mutex_lock(&modem_reset_lock);
701 }
702 while (!(abort_tx) &&
703 ((r = sdio_write(sdio_qmi_chl,
704 write_data, write_size)) < 0)
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600705 && (r != -ENODEV)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 && (write_retry++ < MAX_WRITE_RETRY)) {
707 mutex_unlock(&modem_reset_lock);
708 pr_err("%s: sdio_write failed with rc %d."
709 "Retrying...", __func__, r);
710 msleep(250);
711 mutex_lock(&modem_reset_lock);
712 }
713 if (!r && !abort_tx) {
714 D("%s: sdio_write_completed %dbytes\n",
715 __func__, write_size);
716 bytes_written += write_size;
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600717 } else if (r == -ENODEV) {
718 pr_err("%s: aborting_tx because sdio_write"
719 " returned %d\n", __func__, r);
720 r = 0;
721 abort_tx = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 }
723 mutex_unlock(&modem_reset_lock);
724 kfree(list_elem->cmux_pkt.hdr);
725 kfree(list_elem);
726 mutex_lock(&write_lock);
727 bytes_to_write -= write_size;
728 mutex_unlock(&write_lock);
729 mutex_lock(&ch->tx_lock);
730 }
731 if (ch->write_done)
732 ch->write_done(NULL, bytes_written, ch->priv);
733 mutex_unlock(&ch->tx_lock);
734 }
735 return;
736}
737
738static void sdio_qmi_chl_notify(void *priv, unsigned event)
739{
740 if (event == SDIO_EVENT_DATA_READ_AVAIL) {
741 D("%s: Received SDIO_EVENT_DATA_READ_AVAIL\n", __func__);
742 queue_work(sdio_cdemux_wq, &sdio_cdemux_work);
743 }
744}
745
746#ifdef CONFIG_DEBUG_FS
747
748static int debug_tbl(char *buf, int max)
749{
750 int i = 0;
751 int j;
752
753 for (j = 0; j < SDIO_CMUX_NUM_CHANNELS; ++j) {
754 i += scnprintf(buf + i, max - i,
755 "ch%02d local open=%s remote open=%s\n",
756 j, logical_ch_is_local_open(j) ? "Y" : "N",
757 logical_ch_is_remote_open(j) ? "Y" : "N");
758 }
759
760 return i;
761}
762
763#define DEBUG_BUFMAX 4096
764static char debug_buffer[DEBUG_BUFMAX];
765
766static ssize_t debug_read(struct file *file, char __user *buf,
767 size_t count, loff_t *ppos)
768{
769 int (*fill)(char *buf, int max) = file->private_data;
770 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
771 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
772}
773
774static int debug_open(struct inode *inode, struct file *file)
775{
776 file->private_data = inode->i_private;
777 return 0;
778}
779
780
781static const struct file_operations debug_ops = {
782 .read = debug_read,
783 .open = debug_open,
784};
785
786static void debug_create(const char *name, mode_t mode,
787 struct dentry *dent,
788 int (*fill)(char *buf, int max))
789{
790 debugfs_create_file(name, mode, dent, fill, &debug_ops);
791}
792
793#endif
794
795static int sdio_cmux_probe(struct platform_device *pdev)
796{
797 int i, r;
798
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600799 mutex_lock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 D("%s Begins\n", __func__);
801 if (sdio_cmux_inited) {
802 mutex_lock(&modem_reset_lock);
803 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL,
804 sdio_qmi_chl_notify);
805 if (r < 0) {
806 mutex_unlock(&modem_reset_lock);
807 pr_err("%s: sdio_open() failed\n", __func__);
Jeff Hugo1efef082011-07-13 13:02:38 -0600808 goto error0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700809 }
810 abort_tx = 0;
811 mutex_unlock(&modem_reset_lock);
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600812 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 return 0;
814 }
815
816 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i)
817 sdio_cmux_ch_alloc(i);
818 INIT_LIST_HEAD(&temp_rx_list);
819
820 sdio_cmux_wq = create_singlethread_workqueue("sdio_cmux");
821 if (IS_ERR(sdio_cmux_wq)) {
822 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
823 __func__);
824 r = -ENOMEM;
825 goto error0;
826 }
827
828 sdio_cdemux_wq = create_singlethread_workqueue("sdio_cdemux");
829 if (IS_ERR(sdio_cdemux_wq)) {
830 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
831 __func__);
832 r = -ENOMEM;
833 goto error1;
834 }
835
836 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL, sdio_qmi_chl_notify);
837 if (r < 0) {
838 pr_err("%s: sdio_open() failed\n", __func__);
839 goto error2;
840 }
841
842 platform_device_register(&sdio_ctl_dev);
843 sdio_cmux_inited = 1;
844 D("SDIO Control MUX Driver Initialized.\n");
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600845 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 return 0;
847
848error2:
849 destroy_workqueue(sdio_cdemux_wq);
850error1:
851 destroy_workqueue(sdio_cmux_wq);
852error0:
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600853 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 return r;
855}
856
857static int sdio_cmux_remove(struct platform_device *pdev)
858{
859 int i;
860
861 mutex_lock(&modem_reset_lock);
862 abort_tx = 1;
863
864 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
865 mutex_lock(&logical_ch[i].lc_lock);
866 logical_ch[i].is_channel_reset = 1;
867 mutex_unlock(&logical_ch[i].lc_lock);
868 sdio_cmux_ch_clear_and_signal(i);
869 }
870 sdio_qmi_chl = NULL;
871 mutex_unlock(&modem_reset_lock);
872
873 return 0;
874}
875
876static struct platform_driver sdio_cmux_driver = {
877 .probe = sdio_cmux_probe,
878 .remove = sdio_cmux_remove,
879 .driver = {
880 .name = "SDIO_QMI",
881 .owner = THIS_MODULE,
882 },
883};
884
885static int __init sdio_cmux_init(void)
886{
887#ifdef CONFIG_DEBUG_FS
888 struct dentry *dent;
889
890 dent = debugfs_create_dir("sdio_cmux", 0);
891 if (!IS_ERR(dent))
892 debug_create("tbl", 0444, dent, debug_tbl);
893#endif
894
895 msm_sdio_cmux_debug_mask = 0;
896 return platform_driver_register(&sdio_cmux_driver);
897}
898
899module_init(sdio_cmux_init);
900MODULE_DESCRIPTION("MSM SDIO Control MUX");
901MODULE_LICENSE("GPL v2");