blob: d04a0b03b75fcc295b3df14ae025ee6bb5b5c6b6 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define DEBUG
14
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/fs.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/sched.h>
20#include <linux/spinlock.h>
21#include <linux/mutex.h>
22#include <linux/uaccess.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/termios.h>
27#include <linux/debugfs.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070028#include <linux/moduleparam.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include <mach/sdio_al.h>
31#include <mach/sdio_cmux.h>
32
33#include "modem_notifier.h"
34
35#define MAX_WRITE_RETRY 5
36#define MAGIC_NO_V1 0x33FC
37
38static int msm_sdio_cmux_debug_mask;
39module_param_named(debug_mask, msm_sdio_cmux_debug_mask,
40 int, S_IRUGO | S_IWUSR | S_IWGRP);
41
42enum cmd_type {
43 DATA = 0,
44 OPEN,
45 CLOSE,
46 STATUS,
47 NUM_CMDS
48};
49
50#define DSR_POS 0x1
51#define CTS_POS 0x2
52#define RI_POS 0x4
53#define CD_POS 0x8
54
55struct sdio_cmux_ch {
56 int lc_id;
57
58 struct mutex lc_lock;
59 wait_queue_head_t open_wait_queue;
60 int is_remote_open;
61 int is_local_open;
62 int is_channel_reset;
63
64 char local_status;
65 char remote_status;
66
67 struct mutex tx_lock;
68 struct list_head tx_list;
69
70 void *priv;
71 void (*receive_cb)(void *, int, void *);
72 void (*write_done)(void *, int, void *);
73 void (*status_callback)(int, void *);
74} logical_ch[SDIO_CMUX_NUM_CHANNELS];
75
76struct sdio_cmux_hdr {
77 uint16_t magic_no;
78 uint8_t status; /* This field is reserved for commands other
79 * than STATUS */
80 uint8_t cmd;
81 uint8_t pad_bytes;
82 uint8_t lc_id;
83 uint16_t pkt_len;
84};
85
86struct sdio_cmux_pkt {
87 struct sdio_cmux_hdr *hdr;
88 void *data;
89};
90
91struct sdio_cmux_list_elem {
92 struct list_head list;
93 struct sdio_cmux_pkt cmux_pkt;
94};
95
96#define logical_ch_is_local_open(x) \
97 (logical_ch[(x)].is_local_open)
98
99#define logical_ch_is_remote_open(x) \
100 (logical_ch[(x)].is_remote_open)
101
102static void sdio_cdemux_fn(struct work_struct *work);
103static DECLARE_WORK(sdio_cdemux_work, sdio_cdemux_fn);
104static struct workqueue_struct *sdio_cdemux_wq;
105
106static DEFINE_MUTEX(write_lock);
107static uint32_t bytes_to_write;
108static DEFINE_MUTEX(temp_rx_lock);
109static LIST_HEAD(temp_rx_list);
110
111static void sdio_cmux_fn(struct work_struct *work);
112static DECLARE_WORK(sdio_cmux_work, sdio_cmux_fn);
113static struct workqueue_struct *sdio_cmux_wq;
114
115static struct sdio_channel *sdio_qmi_chl;
116static uint32_t sdio_cmux_inited;
117
118static uint32_t abort_tx;
119static DEFINE_MUTEX(modem_reset_lock);
120
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600121static DEFINE_MUTEX(probe_lock);
122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123enum {
124 MSM_SDIO_CMUX_DEBUG = 1U << 0,
125 MSM_SDIO_CMUX_DUMP_BUFFER = 1U << 1,
126};
127
128static struct platform_device sdio_ctl_dev = {
129 .name = "SDIO_CTL",
130 .id = -1,
131};
132
133#if defined(DEBUG)
134#define D_DUMP_BUFFER(prestr, cnt, buf) \
135do { \
136 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DUMP_BUFFER) { \
137 int i; \
138 pr_debug("%s", prestr); \
139 for (i = 0; i < cnt; i++) \
140 pr_info("%.2x", buf[i]); \
141 pr_debug("\n"); \
142 } \
143} while (0)
144
145#define D(x...) \
146do { \
147 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DEBUG) \
148 pr_debug(x); \
149} while (0)
150
151#else
152#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
153#define D(x...) do {} while (0)
154#endif
155
156static int sdio_cmux_ch_alloc(int id)
157{
158 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
159 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
160 return -EINVAL;
161 }
162
163 logical_ch[id].lc_id = id;
164 mutex_init(&logical_ch[id].lc_lock);
165 init_waitqueue_head(&logical_ch[id].open_wait_queue);
166 logical_ch[id].is_remote_open = 0;
167 logical_ch[id].is_local_open = 0;
168 logical_ch[id].is_channel_reset = 0;
169
170 INIT_LIST_HEAD(&logical_ch[id].tx_list);
171 mutex_init(&logical_ch[id].tx_lock);
172
173 logical_ch[id].priv = NULL;
174 logical_ch[id].receive_cb = NULL;
175 logical_ch[id].write_done = NULL;
176 return 0;
177}
178
179static int sdio_cmux_ch_clear_and_signal(int id)
180{
181 struct sdio_cmux_list_elem *list_elem;
182
183 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
184 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
185 return -EINVAL;
186 }
187
188 mutex_lock(&logical_ch[id].lc_lock);
189 logical_ch[id].is_remote_open = 0;
190 mutex_lock(&logical_ch[id].tx_lock);
191 while (!list_empty(&logical_ch[id].tx_list)) {
192 list_elem = list_first_entry(&logical_ch[id].tx_list,
193 struct sdio_cmux_list_elem,
194 list);
195 list_del(&list_elem->list);
196 kfree(list_elem->cmux_pkt.hdr);
197 kfree(list_elem);
198 }
199 mutex_unlock(&logical_ch[id].tx_lock);
200 if (logical_ch[id].receive_cb)
201 logical_ch[id].receive_cb(NULL, 0, logical_ch[id].priv);
202 if (logical_ch[id].write_done)
203 logical_ch[id].write_done(NULL, 0, logical_ch[id].priv);
204 mutex_unlock(&logical_ch[id].lc_lock);
205 wake_up(&logical_ch[id].open_wait_queue);
206 return 0;
207}
208
209static int sdio_cmux_write_cmd(const int id, enum cmd_type type)
210{
211 int write_size = 0;
212 void *write_data = NULL;
213 struct sdio_cmux_list_elem *list_elem;
214
215 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
216 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
217 return -EINVAL;
218 }
219
220 if (type < 0 || type > NUM_CMDS) {
221 pr_err("%s: Invalid cmd - %d\n", __func__, type);
222 return -EINVAL;
223 }
224
225 write_size = sizeof(struct sdio_cmux_hdr);
226 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
227 if (!list_elem) {
228 pr_err("%s: list_elem alloc failed\n", __func__);
229 return -ENOMEM;
230 }
231
232 write_data = kmalloc(write_size, GFP_KERNEL);
233 if (!write_data) {
234 pr_err("%s: write_data alloc failed\n", __func__);
235 kfree(list_elem);
236 return -ENOMEM;
237 }
238
239 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
240 list_elem->cmux_pkt.data = NULL;
241
242 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)id;
243 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)0;
244 list_elem->cmux_pkt.hdr->cmd = (uint8_t)type;
245 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
246 if (type == STATUS)
247 list_elem->cmux_pkt.hdr->status = logical_ch[id].local_status;
248 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
249 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
250
251 mutex_lock(&logical_ch[id].tx_lock);
252 list_add_tail(&list_elem->list, &logical_ch[id].tx_list);
253 mutex_unlock(&logical_ch[id].tx_lock);
254
255 mutex_lock(&write_lock);
256 bytes_to_write += write_size;
257 mutex_unlock(&write_lock);
258 queue_work(sdio_cmux_wq, &sdio_cmux_work);
259
260 return 0;
261}
262
263int sdio_cmux_open(const int id,
264 void (*receive_cb)(void *, int, void *),
265 void (*write_done)(void *, int, void *),
266 void (*status_callback)(int, void *),
267 void *priv)
268{
269 int r;
270 struct sdio_cmux_list_elem *list_elem, *list_elem_tmp;
271
272 if (!sdio_cmux_inited)
273 return -ENODEV;
274 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
275 pr_err("%s: Invalid id - %d\n", __func__, id);
276 return -EINVAL;
277 }
278
279 r = wait_event_timeout(logical_ch[id].open_wait_queue,
280 logical_ch[id].is_remote_open, (1 * HZ));
281 if (r < 0) {
282 pr_err("ERROR %s: wait_event_timeout() failed for"
283 " ch%d with rc %d\n", __func__, id, r);
284 return r;
285 }
286 if (r == 0) {
287 pr_err("ERROR %s: Wait Timed Out for ch%d\n", __func__, id);
288 return -ETIMEDOUT;
289 }
290
291 mutex_lock(&logical_ch[id].lc_lock);
292 if (!logical_ch[id].is_remote_open) {
293 pr_err("%s: Remote ch%d not opened\n", __func__, id);
294 mutex_unlock(&logical_ch[id].lc_lock);
295 return -EINVAL;
296 }
297 if (logical_ch[id].is_local_open) {
298 mutex_unlock(&logical_ch[id].lc_lock);
299 return 0;
300 }
301 logical_ch[id].is_local_open = 1;
302 logical_ch[id].priv = priv;
303 logical_ch[id].receive_cb = receive_cb;
304 logical_ch[id].write_done = write_done;
305 logical_ch[id].status_callback = status_callback;
306 if (logical_ch[id].receive_cb) {
307 mutex_lock(&temp_rx_lock);
308 list_for_each_entry_safe(list_elem, list_elem_tmp,
309 &temp_rx_list, list) {
310 if ((int)list_elem->cmux_pkt.hdr->lc_id == id) {
311 logical_ch[id].receive_cb(
312 list_elem->cmux_pkt.data,
313 (int)list_elem->cmux_pkt.hdr->pkt_len,
314 logical_ch[id].priv);
315 list_del(&list_elem->list);
316 kfree(list_elem->cmux_pkt.hdr);
317 kfree(list_elem);
318 }
319 }
320 mutex_unlock(&temp_rx_lock);
321 }
322 mutex_unlock(&logical_ch[id].lc_lock);
323 sdio_cmux_write_cmd(id, OPEN);
324 return 0;
325}
326EXPORT_SYMBOL(sdio_cmux_open);
327
328int sdio_cmux_close(int id)
329{
330 struct sdio_cmux_ch *ch;
331
332 if (!sdio_cmux_inited)
333 return -ENODEV;
334 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
335 pr_err("%s: Invalid channel close\n", __func__);
336 return -EINVAL;
337 }
338
339 ch = &logical_ch[id];
340 mutex_lock(&ch->lc_lock);
Karthikeyan Ramasubramanian90f76682011-10-17 12:19:29 -0600341 ch->receive_cb = NULL;
342 mutex_lock(&ch->tx_lock);
343 ch->write_done = NULL;
344 mutex_unlock(&ch->tx_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 ch->is_local_open = 0;
346 ch->priv = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347 mutex_unlock(&ch->lc_lock);
348 sdio_cmux_write_cmd(ch->lc_id, CLOSE);
349 return 0;
350}
351EXPORT_SYMBOL(sdio_cmux_close);
352
353int sdio_cmux_write_avail(int id)
354{
355 int write_avail;
356
357 mutex_lock(&logical_ch[id].lc_lock);
358 if (logical_ch[id].is_channel_reset) {
359 mutex_unlock(&logical_ch[id].lc_lock);
360 return -ENETRESET;
361 }
362 mutex_unlock(&logical_ch[id].lc_lock);
363 write_avail = sdio_write_avail(sdio_qmi_chl);
364 return write_avail - bytes_to_write;
365}
366EXPORT_SYMBOL(sdio_cmux_write_avail);
367
368int sdio_cmux_write(int id, void *data, int len)
369{
370 struct sdio_cmux_list_elem *list_elem;
371 uint32_t write_size;
372 void *write_data = NULL;
373 struct sdio_cmux_ch *ch;
374 int ret;
375
376 if (!sdio_cmux_inited)
377 return -ENODEV;
378 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
379 pr_err("%s: Invalid channel id %d\n", __func__, id);
380 return -ENODEV;
381 }
382
383 ch = &logical_ch[id];
384 if (len <= 0) {
385 pr_err("%s: Invalid len %d bytes to write\n",
386 __func__, len);
387 return -EINVAL;
388 }
389
390 write_size = sizeof(struct sdio_cmux_hdr) + len;
391 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
392 if (!list_elem) {
393 pr_err("%s: list_elem alloc failed\n", __func__);
394 return -ENOMEM;
395 }
396
397 write_data = kmalloc(write_size, GFP_KERNEL);
398 if (!write_data) {
399 pr_err("%s: write_data alloc failed\n", __func__);
400 kfree(list_elem);
401 return -ENOMEM;
402 }
403
404 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
405 list_elem->cmux_pkt.data = (void *)((char *)write_data +
406 sizeof(struct sdio_cmux_hdr));
407 memcpy(list_elem->cmux_pkt.data, data, len);
408
409 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)ch->lc_id;
410 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)len;
411 list_elem->cmux_pkt.hdr->cmd = (uint8_t)DATA;
412 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
413 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
414 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
415
416 mutex_lock(&ch->lc_lock);
417 if (!ch->is_remote_open || !ch->is_local_open) {
418 pr_err("%s: Local ch%d sending data before sending/receiving"
419 " OPEN command\n", __func__, ch->lc_id);
420 if (ch->is_channel_reset)
421 ret = -ENETRESET;
422 else
423 ret = -ENODEV;
424 mutex_unlock(&ch->lc_lock);
425 kfree(write_data);
426 kfree(list_elem);
427 return ret;
428 }
429 mutex_lock(&ch->tx_lock);
430 list_add_tail(&list_elem->list, &ch->tx_list);
431 mutex_unlock(&ch->tx_lock);
432 mutex_unlock(&ch->lc_lock);
433
434 mutex_lock(&write_lock);
435 bytes_to_write += write_size;
436 mutex_unlock(&write_lock);
437 queue_work(sdio_cmux_wq, &sdio_cmux_work);
438
439 return len;
440}
441EXPORT_SYMBOL(sdio_cmux_write);
442
443int is_remote_open(int id)
444{
445 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
446 return -ENODEV;
447
448 return logical_ch_is_remote_open(id);
449}
450EXPORT_SYMBOL(is_remote_open);
451
452int sdio_cmux_is_channel_reset(int id)
453{
454 int ret;
455 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
456 return -ENODEV;
457
458 mutex_lock(&logical_ch[id].lc_lock);
459 ret = logical_ch[id].is_channel_reset;
460 mutex_unlock(&logical_ch[id].lc_lock);
461 return ret;
462}
463EXPORT_SYMBOL(sdio_cmux_is_channel_reset);
464
465int sdio_cmux_tiocmget(int id)
466{
467 int ret = (logical_ch[id].remote_status & DSR_POS ? TIOCM_DSR : 0) |
468 (logical_ch[id].remote_status & CTS_POS ? TIOCM_CTS : 0) |
469 (logical_ch[id].remote_status & CD_POS ? TIOCM_CD : 0) |
470 (logical_ch[id].remote_status & RI_POS ? TIOCM_RI : 0) |
471 (logical_ch[id].local_status & CTS_POS ? TIOCM_RTS : 0) |
472 (logical_ch[id].local_status & DSR_POS ? TIOCM_DTR : 0);
473 return ret;
474}
475EXPORT_SYMBOL(sdio_cmux_tiocmget);
476
477int sdio_cmux_tiocmset(int id, unsigned int set, unsigned int clear)
478{
479 if (set & TIOCM_DTR)
480 logical_ch[id].local_status |= DSR_POS;
481
482 if (set & TIOCM_RTS)
483 logical_ch[id].local_status |= CTS_POS;
484
485 if (clear & TIOCM_DTR)
486 logical_ch[id].local_status &= ~DSR_POS;
487
488 if (clear & TIOCM_RTS)
489 logical_ch[id].local_status &= ~CTS_POS;
490
491 sdio_cmux_write_cmd(id, STATUS);
492 return 0;
493}
494EXPORT_SYMBOL(sdio_cmux_tiocmset);
495
496static int copy_packet(void *pkt, int size)
497{
498 struct sdio_cmux_list_elem *list_elem = NULL;
499 void *temp_pkt = NULL;
500
501 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
502 if (!list_elem) {
503 pr_err("%s: list_elem alloc failed\n", __func__);
504 return -ENOMEM;
505 }
506 temp_pkt = kmalloc(size, GFP_KERNEL);
507 if (!temp_pkt) {
508 pr_err("%s: temp_pkt alloc failed\n", __func__);
509 kfree(list_elem);
510 return -ENOMEM;
511 }
512
513 memcpy(temp_pkt, pkt, size);
514 list_elem->cmux_pkt.hdr = temp_pkt;
515 list_elem->cmux_pkt.data = (void *)((char *)temp_pkt +
516 sizeof(struct sdio_cmux_hdr));
517 mutex_lock(&temp_rx_lock);
518 list_add_tail(&list_elem->list, &temp_rx_list);
519 mutex_unlock(&temp_rx_lock);
520 return 0;
521}
522
523static int process_cmux_pkt(void *pkt, int size)
524{
525 struct sdio_cmux_hdr *mux_hdr;
526 uint32_t id, data_size;
527 void *data;
528 char *dump_buf = (char *)pkt;
529
530 D_DUMP_BUFFER("process_cmux_pkt:", size, dump_buf);
531 mux_hdr = (struct sdio_cmux_hdr *)pkt;
532 switch (mux_hdr->cmd) {
533 case OPEN:
534 id = (uint32_t)(mux_hdr->lc_id);
535 D("%s: Received OPEN command for ch%d\n", __func__, id);
536 mutex_lock(&logical_ch[id].lc_lock);
537 logical_ch[id].is_remote_open = 1;
Karthikeyan Ramasubramanian550be3d2011-09-12 16:55:36 -0600538 if (logical_ch[id].is_channel_reset) {
539 sdio_cmux_write_cmd(id, OPEN);
540 logical_ch[id].is_channel_reset = 0;
541 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 mutex_unlock(&logical_ch[id].lc_lock);
543 wake_up(&logical_ch[id].open_wait_queue);
544 break;
545
546 case CLOSE:
547 id = (uint32_t)(mux_hdr->lc_id);
548 D("%s: Received CLOSE command for ch%d\n", __func__, id);
549 sdio_cmux_ch_clear_and_signal(id);
550 break;
551
552 case DATA:
553 id = (uint32_t)(mux_hdr->lc_id);
554 D("%s: Received DATA for ch%d\n", __func__, id);
555 /*Channel is not locally open & if single packet received
556 then drop it*/
557 mutex_lock(&logical_ch[id].lc_lock);
558 if (!logical_ch[id].is_remote_open) {
559 mutex_unlock(&logical_ch[id].lc_lock);
560 pr_err("%s: Remote Ch%d sent data before sending/"
561 "receiving OPEN command\n", __func__, id);
562 return -ENODEV;
563 }
564
565 data = (void *)((char *)pkt + sizeof(struct sdio_cmux_hdr));
566 data_size = (int)(((struct sdio_cmux_hdr *)pkt)->pkt_len);
567 if (logical_ch[id].receive_cb)
568 logical_ch[id].receive_cb(data, data_size,
569 logical_ch[id].priv);
570 else
571 copy_packet(pkt, size);
572 mutex_unlock(&logical_ch[id].lc_lock);
573 break;
574
575 case STATUS:
576 id = (uint32_t)(mux_hdr->lc_id);
577 D("%s: Received STATUS command for ch%d\n", __func__, id);
578 if (logical_ch[id].remote_status != mux_hdr->status) {
579 mutex_lock(&logical_ch[id].lc_lock);
580 logical_ch[id].remote_status = mux_hdr->status;
581 mutex_unlock(&logical_ch[id].lc_lock);
582 if (logical_ch[id].status_callback)
583 logical_ch[id].status_callback(
584 sdio_cmux_tiocmget(id),
585 logical_ch[id].priv);
586 }
587 break;
588 }
589 return 0;
590}
591
592static void parse_cmux_data(void *data, int size)
593{
594 int data_parsed = 0, pkt_size;
595 char *temp_ptr;
596
597 D("Entered %s\n", __func__);
598 temp_ptr = (char *)data;
599 while (data_parsed < size) {
600 pkt_size = sizeof(struct sdio_cmux_hdr) +
601 (int)(((struct sdio_cmux_hdr *)temp_ptr)->pkt_len);
602 D("Parsed %d bytes, Current Pkt Size %d bytes,"
603 " Total size %d bytes\n", data_parsed, pkt_size, size);
604 process_cmux_pkt((void *)temp_ptr, pkt_size);
605 data_parsed += pkt_size;
606 temp_ptr += pkt_size;
607 }
608
609 kfree(data);
610}
611
612static void sdio_cdemux_fn(struct work_struct *work)
613{
614 int r = 0, read_avail = 0;
615 void *cmux_data;
616
617 while (1) {
618 read_avail = sdio_read_avail(sdio_qmi_chl);
619 if (read_avail < 0) {
620 pr_err("%s: sdio_read_avail failed with rc %d\n",
621 __func__, read_avail);
622 return;
623 }
624
625 if (read_avail == 0) {
626 D("%s: Nothing to read\n", __func__);
627 return;
628 }
629
630 D("%s: kmalloc %d bytes\n", __func__, read_avail);
631 cmux_data = kmalloc(read_avail, GFP_KERNEL);
632 if (!cmux_data) {
633 pr_err("%s: kmalloc Failed\n", __func__);
634 return;
635 }
636
637 D("%s: sdio_read %d bytes\n", __func__, read_avail);
638 r = sdio_read(sdio_qmi_chl, cmux_data, read_avail);
639 if (r < 0) {
640 pr_err("%s: sdio_read failed with rc %d\n",
641 __func__, r);
642 kfree(cmux_data);
643 return;
644 }
645
646 parse_cmux_data(cmux_data, read_avail);
647 }
648 return;
649}
650
651static void sdio_cmux_fn(struct work_struct *work)
652{
653 int i, r = 0;
654 void *write_data;
655 uint32_t write_size, write_avail, write_retry = 0;
656 int bytes_written;
657 struct sdio_cmux_list_elem *list_elem = NULL;
658 struct sdio_cmux_ch *ch;
659
660 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
661 ch = &logical_ch[i];
662 bytes_written = 0;
663 mutex_lock(&ch->tx_lock);
664 while (!list_empty(&ch->tx_list)) {
665 list_elem = list_first_entry(&ch->tx_list,
666 struct sdio_cmux_list_elem,
667 list);
668 list_del(&list_elem->list);
669 mutex_unlock(&ch->tx_lock);
670
671 write_data = (void *)list_elem->cmux_pkt.hdr;
672 write_size = sizeof(struct sdio_cmux_hdr) +
673 (uint32_t)list_elem->cmux_pkt.hdr->pkt_len;
674
675 mutex_lock(&modem_reset_lock);
676 while (!(abort_tx) &&
677 ((write_avail = sdio_write_avail(sdio_qmi_chl))
678 < write_size)) {
679 mutex_unlock(&modem_reset_lock);
680 pr_err("%s: sdio_write_avail %d bytes, "
681 "write size %d bytes. Waiting...\n",
682 __func__, write_avail, write_size);
683 msleep(250);
684 mutex_lock(&modem_reset_lock);
685 }
686 while (!(abort_tx) &&
687 ((r = sdio_write(sdio_qmi_chl,
688 write_data, write_size)) < 0)
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600689 && (r != -ENODEV)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 && (write_retry++ < MAX_WRITE_RETRY)) {
691 mutex_unlock(&modem_reset_lock);
692 pr_err("%s: sdio_write failed with rc %d."
693 "Retrying...", __func__, r);
694 msleep(250);
695 mutex_lock(&modem_reset_lock);
696 }
697 if (!r && !abort_tx) {
698 D("%s: sdio_write_completed %dbytes\n",
699 __func__, write_size);
700 bytes_written += write_size;
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600701 } else if (r == -ENODEV) {
702 pr_err("%s: aborting_tx because sdio_write"
703 " returned %d\n", __func__, r);
704 r = 0;
705 abort_tx = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 }
707 mutex_unlock(&modem_reset_lock);
708 kfree(list_elem->cmux_pkt.hdr);
709 kfree(list_elem);
710 mutex_lock(&write_lock);
711 bytes_to_write -= write_size;
712 mutex_unlock(&write_lock);
713 mutex_lock(&ch->tx_lock);
714 }
715 if (ch->write_done)
716 ch->write_done(NULL, bytes_written, ch->priv);
717 mutex_unlock(&ch->tx_lock);
718 }
719 return;
720}
721
722static void sdio_qmi_chl_notify(void *priv, unsigned event)
723{
724 if (event == SDIO_EVENT_DATA_READ_AVAIL) {
725 D("%s: Received SDIO_EVENT_DATA_READ_AVAIL\n", __func__);
726 queue_work(sdio_cdemux_wq, &sdio_cdemux_work);
727 }
728}
729
730#ifdef CONFIG_DEBUG_FS
731
732static int debug_tbl(char *buf, int max)
733{
734 int i = 0;
735 int j;
736
737 for (j = 0; j < SDIO_CMUX_NUM_CHANNELS; ++j) {
738 i += scnprintf(buf + i, max - i,
739 "ch%02d local open=%s remote open=%s\n",
740 j, logical_ch_is_local_open(j) ? "Y" : "N",
741 logical_ch_is_remote_open(j) ? "Y" : "N");
742 }
743
744 return i;
745}
746
747#define DEBUG_BUFMAX 4096
748static char debug_buffer[DEBUG_BUFMAX];
749
750static ssize_t debug_read(struct file *file, char __user *buf,
751 size_t count, loff_t *ppos)
752{
753 int (*fill)(char *buf, int max) = file->private_data;
754 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
755 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
756}
757
758static int debug_open(struct inode *inode, struct file *file)
759{
760 file->private_data = inode->i_private;
761 return 0;
762}
763
764
765static const struct file_operations debug_ops = {
766 .read = debug_read,
767 .open = debug_open,
768};
769
770static void debug_create(const char *name, mode_t mode,
771 struct dentry *dent,
772 int (*fill)(char *buf, int max))
773{
774 debugfs_create_file(name, mode, dent, fill, &debug_ops);
775}
776
777#endif
778
779static int sdio_cmux_probe(struct platform_device *pdev)
780{
781 int i, r;
782
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600783 mutex_lock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 D("%s Begins\n", __func__);
785 if (sdio_cmux_inited) {
786 mutex_lock(&modem_reset_lock);
787 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL,
788 sdio_qmi_chl_notify);
789 if (r < 0) {
790 mutex_unlock(&modem_reset_lock);
791 pr_err("%s: sdio_open() failed\n", __func__);
Jeff Hugo1efef082011-07-13 13:02:38 -0600792 goto error0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 }
794 abort_tx = 0;
795 mutex_unlock(&modem_reset_lock);
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600796 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797 return 0;
798 }
799
800 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i)
801 sdio_cmux_ch_alloc(i);
802 INIT_LIST_HEAD(&temp_rx_list);
803
804 sdio_cmux_wq = create_singlethread_workqueue("sdio_cmux");
805 if (IS_ERR(sdio_cmux_wq)) {
806 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
807 __func__);
808 r = -ENOMEM;
809 goto error0;
810 }
811
812 sdio_cdemux_wq = create_singlethread_workqueue("sdio_cdemux");
813 if (IS_ERR(sdio_cdemux_wq)) {
814 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
815 __func__);
816 r = -ENOMEM;
817 goto error1;
818 }
819
820 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL, sdio_qmi_chl_notify);
821 if (r < 0) {
822 pr_err("%s: sdio_open() failed\n", __func__);
823 goto error2;
824 }
825
826 platform_device_register(&sdio_ctl_dev);
827 sdio_cmux_inited = 1;
828 D("SDIO Control MUX Driver Initialized.\n");
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600829 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 return 0;
831
832error2:
833 destroy_workqueue(sdio_cdemux_wq);
834error1:
835 destroy_workqueue(sdio_cmux_wq);
836error0:
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600837 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 return r;
839}
840
841static int sdio_cmux_remove(struct platform_device *pdev)
842{
843 int i;
844
845 mutex_lock(&modem_reset_lock);
846 abort_tx = 1;
847
848 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
849 mutex_lock(&logical_ch[i].lc_lock);
850 logical_ch[i].is_channel_reset = 1;
851 mutex_unlock(&logical_ch[i].lc_lock);
852 sdio_cmux_ch_clear_and_signal(i);
853 }
854 sdio_qmi_chl = NULL;
855 mutex_unlock(&modem_reset_lock);
856
857 return 0;
858}
859
860static struct platform_driver sdio_cmux_driver = {
861 .probe = sdio_cmux_probe,
862 .remove = sdio_cmux_remove,
863 .driver = {
864 .name = "SDIO_QMI",
865 .owner = THIS_MODULE,
866 },
867};
868
869static int __init sdio_cmux_init(void)
870{
871#ifdef CONFIG_DEBUG_FS
872 struct dentry *dent;
873
874 dent = debugfs_create_dir("sdio_cmux", 0);
875 if (!IS_ERR(dent))
876 debug_create("tbl", 0444, dent, debug_tbl);
877#endif
878
879 msm_sdio_cmux_debug_mask = 0;
880 return platform_driver_register(&sdio_cmux_driver);
881}
882
883module_init(sdio_cmux_init);
884MODULE_DESCRIPTION("MSM SDIO Control MUX");
885MODULE_LICENSE("GPL v2");