blob: f7c25c38d08e0e6f4636afd80dae62fd9d410eb8 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define DEBUG
14
15#include <linux/fs.h>
16#include <linux/device.h>
17#include <linux/delay.h>
18#include <linux/sched.h>
19#include <linux/spinlock.h>
20#include <linux/mutex.h>
21#include <linux/uaccess.h>
22#include <linux/workqueue.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/termios.h>
26#include <linux/debugfs.h>
27
28#include <mach/sdio_al.h>
29#include <mach/sdio_cmux.h>
30
31#include "modem_notifier.h"
32
33#define MAX_WRITE_RETRY 5
34#define MAGIC_NO_V1 0x33FC
35
36static int msm_sdio_cmux_debug_mask;
37module_param_named(debug_mask, msm_sdio_cmux_debug_mask,
38 int, S_IRUGO | S_IWUSR | S_IWGRP);
39
40enum cmd_type {
41 DATA = 0,
42 OPEN,
43 CLOSE,
44 STATUS,
45 NUM_CMDS
46};
47
48#define DSR_POS 0x1
49#define CTS_POS 0x2
50#define RI_POS 0x4
51#define CD_POS 0x8
52
53struct sdio_cmux_ch {
54 int lc_id;
55
56 struct mutex lc_lock;
57 wait_queue_head_t open_wait_queue;
58 int is_remote_open;
59 int is_local_open;
60 int is_channel_reset;
61
62 char local_status;
63 char remote_status;
64
65 struct mutex tx_lock;
66 struct list_head tx_list;
67
68 void *priv;
69 void (*receive_cb)(void *, int, void *);
70 void (*write_done)(void *, int, void *);
71 void (*status_callback)(int, void *);
72} logical_ch[SDIO_CMUX_NUM_CHANNELS];
73
74struct sdio_cmux_hdr {
75 uint16_t magic_no;
76 uint8_t status; /* This field is reserved for commands other
77 * than STATUS */
78 uint8_t cmd;
79 uint8_t pad_bytes;
80 uint8_t lc_id;
81 uint16_t pkt_len;
82};
83
84struct sdio_cmux_pkt {
85 struct sdio_cmux_hdr *hdr;
86 void *data;
87};
88
89struct sdio_cmux_list_elem {
90 struct list_head list;
91 struct sdio_cmux_pkt cmux_pkt;
92};
93
94#define logical_ch_is_local_open(x) \
95 (logical_ch[(x)].is_local_open)
96
97#define logical_ch_is_remote_open(x) \
98 (logical_ch[(x)].is_remote_open)
99
100static void sdio_cdemux_fn(struct work_struct *work);
101static DECLARE_WORK(sdio_cdemux_work, sdio_cdemux_fn);
102static struct workqueue_struct *sdio_cdemux_wq;
103
104static DEFINE_MUTEX(write_lock);
105static uint32_t bytes_to_write;
106static DEFINE_MUTEX(temp_rx_lock);
107static LIST_HEAD(temp_rx_list);
108
109static void sdio_cmux_fn(struct work_struct *work);
110static DECLARE_WORK(sdio_cmux_work, sdio_cmux_fn);
111static struct workqueue_struct *sdio_cmux_wq;
112
113static struct sdio_channel *sdio_qmi_chl;
114static uint32_t sdio_cmux_inited;
115
116static uint32_t abort_tx;
117static DEFINE_MUTEX(modem_reset_lock);
118
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600119static DEFINE_MUTEX(probe_lock);
120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121enum {
122 MSM_SDIO_CMUX_DEBUG = 1U << 0,
123 MSM_SDIO_CMUX_DUMP_BUFFER = 1U << 1,
124};
125
126static struct platform_device sdio_ctl_dev = {
127 .name = "SDIO_CTL",
128 .id = -1,
129};
130
131#if defined(DEBUG)
132#define D_DUMP_BUFFER(prestr, cnt, buf) \
133do { \
134 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DUMP_BUFFER) { \
135 int i; \
136 pr_debug("%s", prestr); \
137 for (i = 0; i < cnt; i++) \
138 pr_info("%.2x", buf[i]); \
139 pr_debug("\n"); \
140 } \
141} while (0)
142
143#define D(x...) \
144do { \
145 if (msm_sdio_cmux_debug_mask & MSM_SDIO_CMUX_DEBUG) \
146 pr_debug(x); \
147} while (0)
148
149#else
150#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
151#define D(x...) do {} while (0)
152#endif
153
154static int sdio_cmux_ch_alloc(int id)
155{
156 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
157 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
158 return -EINVAL;
159 }
160
161 logical_ch[id].lc_id = id;
162 mutex_init(&logical_ch[id].lc_lock);
163 init_waitqueue_head(&logical_ch[id].open_wait_queue);
164 logical_ch[id].is_remote_open = 0;
165 logical_ch[id].is_local_open = 0;
166 logical_ch[id].is_channel_reset = 0;
167
168 INIT_LIST_HEAD(&logical_ch[id].tx_list);
169 mutex_init(&logical_ch[id].tx_lock);
170
171 logical_ch[id].priv = NULL;
172 logical_ch[id].receive_cb = NULL;
173 logical_ch[id].write_done = NULL;
174 return 0;
175}
176
177static int sdio_cmux_ch_clear_and_signal(int id)
178{
179 struct sdio_cmux_list_elem *list_elem;
180
181 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
182 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
183 return -EINVAL;
184 }
185
186 mutex_lock(&logical_ch[id].lc_lock);
187 logical_ch[id].is_remote_open = 0;
188 mutex_lock(&logical_ch[id].tx_lock);
189 while (!list_empty(&logical_ch[id].tx_list)) {
190 list_elem = list_first_entry(&logical_ch[id].tx_list,
191 struct sdio_cmux_list_elem,
192 list);
193 list_del(&list_elem->list);
194 kfree(list_elem->cmux_pkt.hdr);
195 kfree(list_elem);
196 }
197 mutex_unlock(&logical_ch[id].tx_lock);
198 if (logical_ch[id].receive_cb)
199 logical_ch[id].receive_cb(NULL, 0, logical_ch[id].priv);
200 if (logical_ch[id].write_done)
201 logical_ch[id].write_done(NULL, 0, logical_ch[id].priv);
202 mutex_unlock(&logical_ch[id].lc_lock);
203 wake_up(&logical_ch[id].open_wait_queue);
204 return 0;
205}
206
207static int sdio_cmux_write_cmd(const int id, enum cmd_type type)
208{
209 int write_size = 0;
210 void *write_data = NULL;
211 struct sdio_cmux_list_elem *list_elem;
212
213 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
214 pr_err("%s: Invalid lc_id - %d\n", __func__, id);
215 return -EINVAL;
216 }
217
218 if (type < 0 || type > NUM_CMDS) {
219 pr_err("%s: Invalid cmd - %d\n", __func__, type);
220 return -EINVAL;
221 }
222
223 write_size = sizeof(struct sdio_cmux_hdr);
224 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
225 if (!list_elem) {
226 pr_err("%s: list_elem alloc failed\n", __func__);
227 return -ENOMEM;
228 }
229
230 write_data = kmalloc(write_size, GFP_KERNEL);
231 if (!write_data) {
232 pr_err("%s: write_data alloc failed\n", __func__);
233 kfree(list_elem);
234 return -ENOMEM;
235 }
236
237 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
238 list_elem->cmux_pkt.data = NULL;
239
240 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)id;
241 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)0;
242 list_elem->cmux_pkt.hdr->cmd = (uint8_t)type;
243 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
244 if (type == STATUS)
245 list_elem->cmux_pkt.hdr->status = logical_ch[id].local_status;
246 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
247 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
248
249 mutex_lock(&logical_ch[id].tx_lock);
250 list_add_tail(&list_elem->list, &logical_ch[id].tx_list);
251 mutex_unlock(&logical_ch[id].tx_lock);
252
253 mutex_lock(&write_lock);
254 bytes_to_write += write_size;
255 mutex_unlock(&write_lock);
256 queue_work(sdio_cmux_wq, &sdio_cmux_work);
257
258 return 0;
259}
260
261int sdio_cmux_open(const int id,
262 void (*receive_cb)(void *, int, void *),
263 void (*write_done)(void *, int, void *),
264 void (*status_callback)(int, void *),
265 void *priv)
266{
267 int r;
268 struct sdio_cmux_list_elem *list_elem, *list_elem_tmp;
269
270 if (!sdio_cmux_inited)
271 return -ENODEV;
272 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
273 pr_err("%s: Invalid id - %d\n", __func__, id);
274 return -EINVAL;
275 }
276
277 r = wait_event_timeout(logical_ch[id].open_wait_queue,
278 logical_ch[id].is_remote_open, (1 * HZ));
279 if (r < 0) {
280 pr_err("ERROR %s: wait_event_timeout() failed for"
281 " ch%d with rc %d\n", __func__, id, r);
282 return r;
283 }
284 if (r == 0) {
285 pr_err("ERROR %s: Wait Timed Out for ch%d\n", __func__, id);
286 return -ETIMEDOUT;
287 }
288
289 mutex_lock(&logical_ch[id].lc_lock);
290 if (!logical_ch[id].is_remote_open) {
291 pr_err("%s: Remote ch%d not opened\n", __func__, id);
292 mutex_unlock(&logical_ch[id].lc_lock);
293 return -EINVAL;
294 }
295 if (logical_ch[id].is_local_open) {
296 mutex_unlock(&logical_ch[id].lc_lock);
297 return 0;
298 }
299 logical_ch[id].is_local_open = 1;
300 logical_ch[id].priv = priv;
301 logical_ch[id].receive_cb = receive_cb;
302 logical_ch[id].write_done = write_done;
303 logical_ch[id].status_callback = status_callback;
304 if (logical_ch[id].receive_cb) {
305 mutex_lock(&temp_rx_lock);
306 list_for_each_entry_safe(list_elem, list_elem_tmp,
307 &temp_rx_list, list) {
308 if ((int)list_elem->cmux_pkt.hdr->lc_id == id) {
309 logical_ch[id].receive_cb(
310 list_elem->cmux_pkt.data,
311 (int)list_elem->cmux_pkt.hdr->pkt_len,
312 logical_ch[id].priv);
313 list_del(&list_elem->list);
314 kfree(list_elem->cmux_pkt.hdr);
315 kfree(list_elem);
316 }
317 }
318 mutex_unlock(&temp_rx_lock);
319 }
320 mutex_unlock(&logical_ch[id].lc_lock);
321 sdio_cmux_write_cmd(id, OPEN);
322 return 0;
323}
324EXPORT_SYMBOL(sdio_cmux_open);
325
326int sdio_cmux_close(int id)
327{
328 struct sdio_cmux_ch *ch;
329
330 if (!sdio_cmux_inited)
331 return -ENODEV;
332 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
333 pr_err("%s: Invalid channel close\n", __func__);
334 return -EINVAL;
335 }
336
337 ch = &logical_ch[id];
338 mutex_lock(&ch->lc_lock);
339 ch->is_local_open = 0;
340 ch->priv = NULL;
341 ch->receive_cb = NULL;
342 ch->write_done = NULL;
343 mutex_unlock(&ch->lc_lock);
344 sdio_cmux_write_cmd(ch->lc_id, CLOSE);
345 return 0;
346}
347EXPORT_SYMBOL(sdio_cmux_close);
348
349int sdio_cmux_write_avail(int id)
350{
351 int write_avail;
352
353 mutex_lock(&logical_ch[id].lc_lock);
354 if (logical_ch[id].is_channel_reset) {
355 mutex_unlock(&logical_ch[id].lc_lock);
356 return -ENETRESET;
357 }
358 mutex_unlock(&logical_ch[id].lc_lock);
359 write_avail = sdio_write_avail(sdio_qmi_chl);
360 return write_avail - bytes_to_write;
361}
362EXPORT_SYMBOL(sdio_cmux_write_avail);
363
364int sdio_cmux_write(int id, void *data, int len)
365{
366 struct sdio_cmux_list_elem *list_elem;
367 uint32_t write_size;
368 void *write_data = NULL;
369 struct sdio_cmux_ch *ch;
370 int ret;
371
372 if (!sdio_cmux_inited)
373 return -ENODEV;
374 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS) {
375 pr_err("%s: Invalid channel id %d\n", __func__, id);
376 return -ENODEV;
377 }
378
379 ch = &logical_ch[id];
380 if (len <= 0) {
381 pr_err("%s: Invalid len %d bytes to write\n",
382 __func__, len);
383 return -EINVAL;
384 }
385
386 write_size = sizeof(struct sdio_cmux_hdr) + len;
387 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
388 if (!list_elem) {
389 pr_err("%s: list_elem alloc failed\n", __func__);
390 return -ENOMEM;
391 }
392
393 write_data = kmalloc(write_size, GFP_KERNEL);
394 if (!write_data) {
395 pr_err("%s: write_data alloc failed\n", __func__);
396 kfree(list_elem);
397 return -ENOMEM;
398 }
399
400 list_elem->cmux_pkt.hdr = (struct sdio_cmux_hdr *)write_data;
401 list_elem->cmux_pkt.data = (void *)((char *)write_data +
402 sizeof(struct sdio_cmux_hdr));
403 memcpy(list_elem->cmux_pkt.data, data, len);
404
405 list_elem->cmux_pkt.hdr->lc_id = (uint8_t)ch->lc_id;
406 list_elem->cmux_pkt.hdr->pkt_len = (uint16_t)len;
407 list_elem->cmux_pkt.hdr->cmd = (uint8_t)DATA;
408 list_elem->cmux_pkt.hdr->status = (uint8_t)0;
409 list_elem->cmux_pkt.hdr->pad_bytes = (uint8_t)0;
410 list_elem->cmux_pkt.hdr->magic_no = (uint16_t)MAGIC_NO_V1;
411
412 mutex_lock(&ch->lc_lock);
413 if (!ch->is_remote_open || !ch->is_local_open) {
414 pr_err("%s: Local ch%d sending data before sending/receiving"
415 " OPEN command\n", __func__, ch->lc_id);
416 if (ch->is_channel_reset)
417 ret = -ENETRESET;
418 else
419 ret = -ENODEV;
420 mutex_unlock(&ch->lc_lock);
421 kfree(write_data);
422 kfree(list_elem);
423 return ret;
424 }
425 mutex_lock(&ch->tx_lock);
426 list_add_tail(&list_elem->list, &ch->tx_list);
427 mutex_unlock(&ch->tx_lock);
428 mutex_unlock(&ch->lc_lock);
429
430 mutex_lock(&write_lock);
431 bytes_to_write += write_size;
432 mutex_unlock(&write_lock);
433 queue_work(sdio_cmux_wq, &sdio_cmux_work);
434
435 return len;
436}
437EXPORT_SYMBOL(sdio_cmux_write);
438
439int is_remote_open(int id)
440{
441 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
442 return -ENODEV;
443
444 return logical_ch_is_remote_open(id);
445}
446EXPORT_SYMBOL(is_remote_open);
447
448int sdio_cmux_is_channel_reset(int id)
449{
450 int ret;
451 if (id < 0 || id >= SDIO_CMUX_NUM_CHANNELS)
452 return -ENODEV;
453
454 mutex_lock(&logical_ch[id].lc_lock);
455 ret = logical_ch[id].is_channel_reset;
456 mutex_unlock(&logical_ch[id].lc_lock);
457 return ret;
458}
459EXPORT_SYMBOL(sdio_cmux_is_channel_reset);
460
461int sdio_cmux_tiocmget(int id)
462{
463 int ret = (logical_ch[id].remote_status & DSR_POS ? TIOCM_DSR : 0) |
464 (logical_ch[id].remote_status & CTS_POS ? TIOCM_CTS : 0) |
465 (logical_ch[id].remote_status & CD_POS ? TIOCM_CD : 0) |
466 (logical_ch[id].remote_status & RI_POS ? TIOCM_RI : 0) |
467 (logical_ch[id].local_status & CTS_POS ? TIOCM_RTS : 0) |
468 (logical_ch[id].local_status & DSR_POS ? TIOCM_DTR : 0);
469 return ret;
470}
471EXPORT_SYMBOL(sdio_cmux_tiocmget);
472
473int sdio_cmux_tiocmset(int id, unsigned int set, unsigned int clear)
474{
475 if (set & TIOCM_DTR)
476 logical_ch[id].local_status |= DSR_POS;
477
478 if (set & TIOCM_RTS)
479 logical_ch[id].local_status |= CTS_POS;
480
481 if (clear & TIOCM_DTR)
482 logical_ch[id].local_status &= ~DSR_POS;
483
484 if (clear & TIOCM_RTS)
485 logical_ch[id].local_status &= ~CTS_POS;
486
487 sdio_cmux_write_cmd(id, STATUS);
488 return 0;
489}
490EXPORT_SYMBOL(sdio_cmux_tiocmset);
491
492static int copy_packet(void *pkt, int size)
493{
494 struct sdio_cmux_list_elem *list_elem = NULL;
495 void *temp_pkt = NULL;
496
497 list_elem = kmalloc(sizeof(struct sdio_cmux_list_elem), GFP_KERNEL);
498 if (!list_elem) {
499 pr_err("%s: list_elem alloc failed\n", __func__);
500 return -ENOMEM;
501 }
502 temp_pkt = kmalloc(size, GFP_KERNEL);
503 if (!temp_pkt) {
504 pr_err("%s: temp_pkt alloc failed\n", __func__);
505 kfree(list_elem);
506 return -ENOMEM;
507 }
508
509 memcpy(temp_pkt, pkt, size);
510 list_elem->cmux_pkt.hdr = temp_pkt;
511 list_elem->cmux_pkt.data = (void *)((char *)temp_pkt +
512 sizeof(struct sdio_cmux_hdr));
513 mutex_lock(&temp_rx_lock);
514 list_add_tail(&list_elem->list, &temp_rx_list);
515 mutex_unlock(&temp_rx_lock);
516 return 0;
517}
518
519static int process_cmux_pkt(void *pkt, int size)
520{
521 struct sdio_cmux_hdr *mux_hdr;
522 uint32_t id, data_size;
523 void *data;
524 char *dump_buf = (char *)pkt;
525
526 D_DUMP_BUFFER("process_cmux_pkt:", size, dump_buf);
527 mux_hdr = (struct sdio_cmux_hdr *)pkt;
528 switch (mux_hdr->cmd) {
529 case OPEN:
530 id = (uint32_t)(mux_hdr->lc_id);
531 D("%s: Received OPEN command for ch%d\n", __func__, id);
532 mutex_lock(&logical_ch[id].lc_lock);
533 logical_ch[id].is_remote_open = 1;
Karthikeyan Ramasubramanian550be3d2011-09-12 16:55:36 -0600534 if (logical_ch[id].is_channel_reset) {
535 sdio_cmux_write_cmd(id, OPEN);
536 logical_ch[id].is_channel_reset = 0;
537 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 mutex_unlock(&logical_ch[id].lc_lock);
539 wake_up(&logical_ch[id].open_wait_queue);
540 break;
541
542 case CLOSE:
543 id = (uint32_t)(mux_hdr->lc_id);
544 D("%s: Received CLOSE command for ch%d\n", __func__, id);
545 sdio_cmux_ch_clear_and_signal(id);
546 break;
547
548 case DATA:
549 id = (uint32_t)(mux_hdr->lc_id);
550 D("%s: Received DATA for ch%d\n", __func__, id);
551 /*Channel is not locally open & if single packet received
552 then drop it*/
553 mutex_lock(&logical_ch[id].lc_lock);
554 if (!logical_ch[id].is_remote_open) {
555 mutex_unlock(&logical_ch[id].lc_lock);
556 pr_err("%s: Remote Ch%d sent data before sending/"
557 "receiving OPEN command\n", __func__, id);
558 return -ENODEV;
559 }
560
561 data = (void *)((char *)pkt + sizeof(struct sdio_cmux_hdr));
562 data_size = (int)(((struct sdio_cmux_hdr *)pkt)->pkt_len);
563 if (logical_ch[id].receive_cb)
564 logical_ch[id].receive_cb(data, data_size,
565 logical_ch[id].priv);
566 else
567 copy_packet(pkt, size);
568 mutex_unlock(&logical_ch[id].lc_lock);
569 break;
570
571 case STATUS:
572 id = (uint32_t)(mux_hdr->lc_id);
573 D("%s: Received STATUS command for ch%d\n", __func__, id);
574 if (logical_ch[id].remote_status != mux_hdr->status) {
575 mutex_lock(&logical_ch[id].lc_lock);
576 logical_ch[id].remote_status = mux_hdr->status;
577 mutex_unlock(&logical_ch[id].lc_lock);
578 if (logical_ch[id].status_callback)
579 logical_ch[id].status_callback(
580 sdio_cmux_tiocmget(id),
581 logical_ch[id].priv);
582 }
583 break;
584 }
585 return 0;
586}
587
588static void parse_cmux_data(void *data, int size)
589{
590 int data_parsed = 0, pkt_size;
591 char *temp_ptr;
592
593 D("Entered %s\n", __func__);
594 temp_ptr = (char *)data;
595 while (data_parsed < size) {
596 pkt_size = sizeof(struct sdio_cmux_hdr) +
597 (int)(((struct sdio_cmux_hdr *)temp_ptr)->pkt_len);
598 D("Parsed %d bytes, Current Pkt Size %d bytes,"
599 " Total size %d bytes\n", data_parsed, pkt_size, size);
600 process_cmux_pkt((void *)temp_ptr, pkt_size);
601 data_parsed += pkt_size;
602 temp_ptr += pkt_size;
603 }
604
605 kfree(data);
606}
607
608static void sdio_cdemux_fn(struct work_struct *work)
609{
610 int r = 0, read_avail = 0;
611 void *cmux_data;
612
613 while (1) {
614 read_avail = sdio_read_avail(sdio_qmi_chl);
615 if (read_avail < 0) {
616 pr_err("%s: sdio_read_avail failed with rc %d\n",
617 __func__, read_avail);
618 return;
619 }
620
621 if (read_avail == 0) {
622 D("%s: Nothing to read\n", __func__);
623 return;
624 }
625
626 D("%s: kmalloc %d bytes\n", __func__, read_avail);
627 cmux_data = kmalloc(read_avail, GFP_KERNEL);
628 if (!cmux_data) {
629 pr_err("%s: kmalloc Failed\n", __func__);
630 return;
631 }
632
633 D("%s: sdio_read %d bytes\n", __func__, read_avail);
634 r = sdio_read(sdio_qmi_chl, cmux_data, read_avail);
635 if (r < 0) {
636 pr_err("%s: sdio_read failed with rc %d\n",
637 __func__, r);
638 kfree(cmux_data);
639 return;
640 }
641
642 parse_cmux_data(cmux_data, read_avail);
643 }
644 return;
645}
646
647static void sdio_cmux_fn(struct work_struct *work)
648{
649 int i, r = 0;
650 void *write_data;
651 uint32_t write_size, write_avail, write_retry = 0;
652 int bytes_written;
653 struct sdio_cmux_list_elem *list_elem = NULL;
654 struct sdio_cmux_ch *ch;
655
656 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
657 ch = &logical_ch[i];
658 bytes_written = 0;
659 mutex_lock(&ch->tx_lock);
660 while (!list_empty(&ch->tx_list)) {
661 list_elem = list_first_entry(&ch->tx_list,
662 struct sdio_cmux_list_elem,
663 list);
664 list_del(&list_elem->list);
665 mutex_unlock(&ch->tx_lock);
666
667 write_data = (void *)list_elem->cmux_pkt.hdr;
668 write_size = sizeof(struct sdio_cmux_hdr) +
669 (uint32_t)list_elem->cmux_pkt.hdr->pkt_len;
670
671 mutex_lock(&modem_reset_lock);
672 while (!(abort_tx) &&
673 ((write_avail = sdio_write_avail(sdio_qmi_chl))
674 < write_size)) {
675 mutex_unlock(&modem_reset_lock);
676 pr_err("%s: sdio_write_avail %d bytes, "
677 "write size %d bytes. Waiting...\n",
678 __func__, write_avail, write_size);
679 msleep(250);
680 mutex_lock(&modem_reset_lock);
681 }
682 while (!(abort_tx) &&
683 ((r = sdio_write(sdio_qmi_chl,
684 write_data, write_size)) < 0)
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600685 && (r != -ENODEV)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 && (write_retry++ < MAX_WRITE_RETRY)) {
687 mutex_unlock(&modem_reset_lock);
688 pr_err("%s: sdio_write failed with rc %d."
689 "Retrying...", __func__, r);
690 msleep(250);
691 mutex_lock(&modem_reset_lock);
692 }
693 if (!r && !abort_tx) {
694 D("%s: sdio_write_completed %dbytes\n",
695 __func__, write_size);
696 bytes_written += write_size;
Karthikeyan Ramasubramaniane549b762011-09-02 12:16:09 -0600697 } else if (r == -ENODEV) {
698 pr_err("%s: aborting_tx because sdio_write"
699 " returned %d\n", __func__, r);
700 r = 0;
701 abort_tx = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 }
703 mutex_unlock(&modem_reset_lock);
704 kfree(list_elem->cmux_pkt.hdr);
705 kfree(list_elem);
706 mutex_lock(&write_lock);
707 bytes_to_write -= write_size;
708 mutex_unlock(&write_lock);
709 mutex_lock(&ch->tx_lock);
710 }
711 if (ch->write_done)
712 ch->write_done(NULL, bytes_written, ch->priv);
713 mutex_unlock(&ch->tx_lock);
714 }
715 return;
716}
717
718static void sdio_qmi_chl_notify(void *priv, unsigned event)
719{
720 if (event == SDIO_EVENT_DATA_READ_AVAIL) {
721 D("%s: Received SDIO_EVENT_DATA_READ_AVAIL\n", __func__);
722 queue_work(sdio_cdemux_wq, &sdio_cdemux_work);
723 }
724}
725
726#ifdef CONFIG_DEBUG_FS
727
728static int debug_tbl(char *buf, int max)
729{
730 int i = 0;
731 int j;
732
733 for (j = 0; j < SDIO_CMUX_NUM_CHANNELS; ++j) {
734 i += scnprintf(buf + i, max - i,
735 "ch%02d local open=%s remote open=%s\n",
736 j, logical_ch_is_local_open(j) ? "Y" : "N",
737 logical_ch_is_remote_open(j) ? "Y" : "N");
738 }
739
740 return i;
741}
742
743#define DEBUG_BUFMAX 4096
744static char debug_buffer[DEBUG_BUFMAX];
745
746static ssize_t debug_read(struct file *file, char __user *buf,
747 size_t count, loff_t *ppos)
748{
749 int (*fill)(char *buf, int max) = file->private_data;
750 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
751 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
752}
753
754static int debug_open(struct inode *inode, struct file *file)
755{
756 file->private_data = inode->i_private;
757 return 0;
758}
759
760
761static const struct file_operations debug_ops = {
762 .read = debug_read,
763 .open = debug_open,
764};
765
766static void debug_create(const char *name, mode_t mode,
767 struct dentry *dent,
768 int (*fill)(char *buf, int max))
769{
770 debugfs_create_file(name, mode, dent, fill, &debug_ops);
771}
772
773#endif
774
775static int sdio_cmux_probe(struct platform_device *pdev)
776{
777 int i, r;
778
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600779 mutex_lock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780 D("%s Begins\n", __func__);
781 if (sdio_cmux_inited) {
782 mutex_lock(&modem_reset_lock);
783 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL,
784 sdio_qmi_chl_notify);
785 if (r < 0) {
786 mutex_unlock(&modem_reset_lock);
787 pr_err("%s: sdio_open() failed\n", __func__);
Jeff Hugo1efef082011-07-13 13:02:38 -0600788 goto error0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 }
790 abort_tx = 0;
791 mutex_unlock(&modem_reset_lock);
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600792 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 return 0;
794 }
795
796 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i)
797 sdio_cmux_ch_alloc(i);
798 INIT_LIST_HEAD(&temp_rx_list);
799
800 sdio_cmux_wq = create_singlethread_workqueue("sdio_cmux");
801 if (IS_ERR(sdio_cmux_wq)) {
802 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
803 __func__);
804 r = -ENOMEM;
805 goto error0;
806 }
807
808 sdio_cdemux_wq = create_singlethread_workqueue("sdio_cdemux");
809 if (IS_ERR(sdio_cdemux_wq)) {
810 pr_err("%s: create_singlethread_workqueue() ENOMEM\n",
811 __func__);
812 r = -ENOMEM;
813 goto error1;
814 }
815
816 r = sdio_open("SDIO_QMI", &sdio_qmi_chl, NULL, sdio_qmi_chl_notify);
817 if (r < 0) {
818 pr_err("%s: sdio_open() failed\n", __func__);
819 goto error2;
820 }
821
822 platform_device_register(&sdio_ctl_dev);
823 sdio_cmux_inited = 1;
824 D("SDIO Control MUX Driver Initialized.\n");
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600825 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 return 0;
827
828error2:
829 destroy_workqueue(sdio_cdemux_wq);
830error1:
831 destroy_workqueue(sdio_cmux_wq);
832error0:
Jeff Hugoa1f008b2011-07-13 14:33:59 -0600833 mutex_unlock(&probe_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 return r;
835}
836
837static int sdio_cmux_remove(struct platform_device *pdev)
838{
839 int i;
840
841 mutex_lock(&modem_reset_lock);
842 abort_tx = 1;
843
844 for (i = 0; i < SDIO_CMUX_NUM_CHANNELS; ++i) {
845 mutex_lock(&logical_ch[i].lc_lock);
846 logical_ch[i].is_channel_reset = 1;
847 mutex_unlock(&logical_ch[i].lc_lock);
848 sdio_cmux_ch_clear_and_signal(i);
849 }
850 sdio_qmi_chl = NULL;
851 mutex_unlock(&modem_reset_lock);
852
853 return 0;
854}
855
856static struct platform_driver sdio_cmux_driver = {
857 .probe = sdio_cmux_probe,
858 .remove = sdio_cmux_remove,
859 .driver = {
860 .name = "SDIO_QMI",
861 .owner = THIS_MODULE,
862 },
863};
864
865static int __init sdio_cmux_init(void)
866{
867#ifdef CONFIG_DEBUG_FS
868 struct dentry *dent;
869
870 dent = debugfs_create_dir("sdio_cmux", 0);
871 if (!IS_ERR(dent))
872 debug_create("tbl", 0444, dent, debug_tbl);
873#endif
874
875 msm_sdio_cmux_debug_mask = 0;
876 return platform_driver_register(&sdio_cmux_driver);
877}
878
879module_init(sdio_cmux_init);
880MODULE_DESCRIPTION("MSM SDIO Control MUX");
881MODULE_LICENSE("GPL v2");