blob: ea3382e6e44c77e9bd2d436df8426fcc937c5b68 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * SDIO DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/wakelock.h>
27#include <linux/debugfs.h>
28
29#include <mach/sdio_al.h>
30#include <mach/sdio_dmux.h>
31
32#define SDIO_CH_LOCAL_OPEN 0x1
33#define SDIO_CH_REMOTE_OPEN 0x2
34#define SDIO_CH_IN_RESET 0x4
35
36#define SDIO_MUX_HDR_MAGIC_NO 0x33fc
37
38#define SDIO_MUX_HDR_CMD_DATA 0
39#define SDIO_MUX_HDR_CMD_OPEN 1
40#define SDIO_MUX_HDR_CMD_CLOSE 2
41
42#define LOW_WATERMARK 2
43#define HIGH_WATERMARK 4
44
45static int msm_sdio_dmux_debug_enable;
46module_param_named(debug_enable, msm_sdio_dmux_debug_enable,
47 int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#if defined(DEBUG)
50static uint32_t sdio_dmux_read_cnt;
51static uint32_t sdio_dmux_write_cnt;
52static uint32_t sdio_dmux_write_cpy_cnt;
53static uint32_t sdio_dmux_write_cpy_bytes;
54
55#define DBG(x...) do { \
56 if (msm_sdio_dmux_debug_enable) \
57 pr_debug(x); \
58 } while (0)
59
60#define DBG_INC_READ_CNT(x) do { \
61 sdio_dmux_read_cnt += (x); \
62 if (msm_sdio_dmux_debug_enable) \
63 pr_debug("%s: total read bytes %u\n", \
64 __func__, sdio_dmux_read_cnt); \
65 } while (0)
66
67#define DBG_INC_WRITE_CNT(x) do { \
68 sdio_dmux_write_cnt += (x); \
69 if (msm_sdio_dmux_debug_enable) \
70 pr_debug("%s: total written bytes %u\n", \
71 __func__, sdio_dmux_write_cnt); \
72 } while (0)
73
74#define DBG_INC_WRITE_CPY(x) do { \
75 sdio_dmux_write_cpy_bytes += (x); \
76 sdio_dmux_write_cpy_cnt++; \
77 if (msm_sdio_dmux_debug_enable) \
78 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
79 __func__, sdio_dmux_write_cpy_cnt, \
80 sdio_dmux_write_cpy_bytes); \
81 } while (0)
82#else
83#define DBG(x...) do { } while (0)
84#define DBG_INC_READ_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CPY(x...) do { } while (0)
87#endif
88
89struct sdio_ch_info {
90 uint32_t status;
91 void (*receive_cb)(void *, struct sk_buff *);
92 void (*write_done)(void *, struct sk_buff *);
93 void *priv;
94 spinlock_t lock;
95 int num_tx_pkts;
96 int use_wm;
97};
98
99static struct sk_buff_head sdio_mux_write_pool;
100static spinlock_t sdio_mux_write_lock;
101
102static struct sdio_channel *sdio_mux_ch;
103static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS];
104struct wake_lock sdio_mux_ch_wakelock;
105static int sdio_mux_initialized;
106static int fatal_error;
107
108struct sdio_mux_hdr {
109 uint16_t magic_num;
110 uint8_t reserved;
111 uint8_t cmd;
112 uint8_t pad_len;
113 uint8_t ch_id;
114 uint16_t pkt_len;
115};
116
117struct sdio_partial_pkt_info {
118 uint32_t valid;
119 struct sk_buff *skb;
120 struct sdio_mux_hdr *hdr;
121};
122
123static void sdio_mux_read_data(struct work_struct *work);
124static void sdio_mux_write_data(struct work_struct *work);
125static void sdio_mux_send_open_cmd(uint32_t id);
126
Eric Holmberg2973cd92011-09-22 11:02:33 -0600127static DEFINE_MUTEX(sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data);
129static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data);
130static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data);
131
Eric Holmbergf2275762011-09-22 10:58:56 -0600132static struct workqueue_struct *sdio_mux_workqueue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133static struct sdio_partial_pkt_info sdio_partial_pkt;
134
135#define sdio_ch_is_open(x) \
136 (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN))
137
138#define sdio_ch_is_local_open(x) \
139 (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN)
140
141#define sdio_ch_is_remote_open(x) \
142 (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN)
143
144#define sdio_ch_is_in_reset(x) \
145 (sdio_ch[(x)].status & SDIO_CH_IN_RESET)
146
147static inline void skb_set_data(struct sk_buff *skb,
148 unsigned char *data,
149 unsigned int len)
150{
151 /* panic if tail > end */
152 skb->data = data;
153 skb->tail = skb->data + len;
154 skb->len = len;
155 skb->truesize = len + sizeof(struct sk_buff);
156}
157
158static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr,
159 struct sk_buff *skb_mux)
160{
161 struct sk_buff *skb;
162
163 /* i think we can avoid cloning here */
164 skb = skb_clone(skb_mux, GFP_KERNEL);
165 if (!skb) {
166 pr_err("%s: cannot clone skb\n", __func__);
167 return;
168 }
169
170 /* protect? */
171 skb_set_data(skb, (unsigned char *)hdr,
172 skb->tail - (unsigned char *)hdr);
173 sdio_partial_pkt.skb = skb;
174 sdio_partial_pkt.valid = 1;
175 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
176 skb->head, skb->data, skb->tail, skb->end, skb->len);
177 return;
178}
179
180static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
181 struct sk_buff *skb_mux)
182{
183 struct sk_buff *skb;
184 void *rp = (void *)hdr;
185 unsigned long flags;
186
187 /* protect? */
188 rp += sizeof(*hdr);
189 if (rp < (void *)skb_mux->tail)
190 rp += (hdr->pkt_len + hdr->pad_len);
191
192 if (rp > (void *)skb_mux->tail) {
193 /* partial packet */
194 sdio_mux_save_partial_pkt(hdr, skb_mux);
195 goto packet_done;
196 }
197
198 DBG("%s: hdr %p next %p tail %p pkt_size %d\n",
199 __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);
200
201 skb = skb_clone(skb_mux, GFP_KERNEL);
202 if (!skb) {
203 pr_err("%s: cannot clone skb\n", __func__);
204 goto packet_done;
205 }
206
207 skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
208 DBG("%s: head %p data %p tail %p end %p len %d\n",
209 __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);
210
211 /* probably we should check channel status */
212 /* discard packet early if local side not open */
213 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
214 if (sdio_ch[hdr->ch_id].receive_cb)
215 sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
216 else
217 dev_kfree_skb_any(skb);
218 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
219
220packet_done:
221 return rp;
222}
223
224static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr,
225 struct sk_buff *skb_mux)
226{
227 void *rp;
228 unsigned long flags;
229 int send_open = 0;
230
231 DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id);
232 switch (hdr->cmd) {
233 case SDIO_MUX_HDR_CMD_DATA:
234 rp = handle_sdio_mux_data(hdr, skb_mux);
235 break;
236 case SDIO_MUX_HDR_CMD_OPEN:
237 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
238 sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN;
Eric Holmberg0d0de822011-09-16 11:28:06 -0600239 sdio_ch[hdr->ch_id].num_tx_pkts = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240
241 if (sdio_ch_is_in_reset(hdr->ch_id)) {
242 DBG("%s: in reset - sending open cmd\n", __func__);
243 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET;
244 send_open = 1;
245 }
246
247 /* notify client so it can update its status */
248 if (sdio_ch[hdr->ch_id].receive_cb)
249 sdio_ch[hdr->ch_id].receive_cb(
250 sdio_ch[hdr->ch_id].priv, NULL);
251
252 if (sdio_ch[hdr->ch_id].write_done)
253 sdio_ch[hdr->ch_id].write_done(
254 sdio_ch[hdr->ch_id].priv, NULL);
255 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
256 rp = hdr + 1;
257 if (send_open)
258 sdio_mux_send_open_cmd(hdr->ch_id);
259
260 break;
261 case SDIO_MUX_HDR_CMD_CLOSE:
262 /* probably should drop pending write */
263 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
264 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN;
265 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
266 rp = hdr + 1;
267 break;
268 default:
269 rp = hdr + 1;
270 }
271
272 return rp;
273}
274
275static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux)
276{
277 struct sk_buff *p_skb;
278 struct sdio_mux_hdr *p_hdr;
279 void *ptr, *rp = skb_mux->data;
280
281 /* protoect? */
282 if (sdio_partial_pkt.valid) {
283 p_skb = sdio_partial_pkt.skb;
284
285 ptr = skb_push(skb_mux, p_skb->len);
286 memcpy(ptr, p_skb->data, p_skb->len);
287 sdio_partial_pkt.skb = NULL;
288 sdio_partial_pkt.valid = 0;
289 dev_kfree_skb_any(p_skb);
290
291 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
292 skb_mux->head, skb_mux->data, skb_mux->tail,
293 skb_mux->end, skb_mux->len);
294
295 p_hdr = (struct sdio_mux_hdr *)skb_mux->data;
296 rp = handle_sdio_mux_command(p_hdr, skb_mux);
297 }
298 return rp;
299}
300
301static void sdio_mux_read_data(struct work_struct *work)
302{
303 struct sk_buff *skb_mux;
304 void *ptr = 0;
305 int sz, rc, len = 0;
306 struct sdio_mux_hdr *hdr;
307
308 DBG("%s: reading\n", __func__);
309 /* should probably have a separate read lock */
Eric Holmberg2973cd92011-09-22 11:02:33 -0600310 mutex_lock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 sz = sdio_read_avail(sdio_mux_ch);
312 DBG("%s: read avail %d\n", __func__, sz);
313 if (sz <= 0) {
314 if (sz)
315 pr_err("%s: read avail failed %d\n", __func__, sz);
Eric Holmberg2973cd92011-09-22 11:02:33 -0600316 mutex_unlock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 return;
318 }
319
320 /* net_ip_aling is probably not required */
321 if (sdio_partial_pkt.valid)
322 len = sdio_partial_pkt.skb->len;
323
324 /* If allocation fails attempt to get a smaller chunk of mem */
325 do {
326 skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
327 if (skb_mux)
328 break;
329
330 pr_err("%s: cannot allocate skb of size:%d + "
331 "%d (NET_SKB_PAD)\n", __func__,
332 sz + NET_IP_ALIGN + len, NET_SKB_PAD);
333 /* the skb structure adds NET_SKB_PAD bytes to the memory
334 * request, which may push the actual request above PAGE_SIZE
335 * in that case, we need to iterate one more time to make sure
336 * we get the memory request under PAGE_SIZE
337 */
338 if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
339 pr_err("%s: allocation failed\n", __func__);
Eric Holmberg2973cd92011-09-22 11:02:33 -0600340 mutex_unlock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341 return;
342 }
343 sz /= 2;
344 } while (1);
345
346 skb_reserve(skb_mux, NET_IP_ALIGN + len);
347 ptr = skb_put(skb_mux, sz);
348
349 /* half second wakelock is fine? */
350 wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
351 rc = sdio_read(sdio_mux_ch, ptr, sz);
352 DBG("%s: read %d\n", __func__, rc);
353 if (rc) {
354 pr_err("%s: sdio read failed %d\n", __func__, rc);
355 dev_kfree_skb_any(skb_mux);
Eric Holmberg2973cd92011-09-22 11:02:33 -0600356 mutex_unlock(&sdio_mux_lock);
Eric Holmbergf2275762011-09-22 10:58:56 -0600357 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 return;
359 }
Eric Holmberg2973cd92011-09-22 11:02:33 -0600360 mutex_unlock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361
362 DBG_INC_READ_CNT(sz);
363 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
364 skb_mux->head, skb_mux->data, skb_mux->tail,
365 skb_mux->end, skb_mux->len);
366
367 /* move to a separate function */
368 /* probably do skb_pull instead of pointer adjustment */
369 hdr = handle_sdio_partial_pkt(skb_mux);
370 while ((void *)hdr < (void *)skb_mux->tail) {
371
372 if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
373 /* handle partial header */
374 sdio_mux_save_partial_pkt(hdr, skb_mux);
375 break;
376 }
377
378 if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
379 pr_err("%s: packet error\n", __func__);
380 break;
381 }
382
383 hdr = handle_sdio_mux_command(hdr, skb_mux);
384 }
385 dev_kfree_skb_any(skb_mux);
386
387 DBG("%s: read done\n", __func__);
Eric Holmbergf2275762011-09-22 10:58:56 -0600388 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389}
390
391static int sdio_mux_write(struct sk_buff *skb)
392{
393 int rc, sz;
394
Eric Holmberg2973cd92011-09-22 11:02:33 -0600395 mutex_lock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 sz = sdio_write_avail(sdio_mux_ch);
397 DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
398 if (skb->len <= sz) {
399 rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
400 DBG("%s: write returned %d\n", __func__, rc);
401 if (rc == 0)
402 DBG_INC_WRITE_CNT(skb->len);
403 } else
404 rc = -ENOMEM;
405
Eric Holmberg2973cd92011-09-22 11:02:33 -0600406 mutex_unlock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 return rc;
408}
409
410static int sdio_mux_write_cmd(void *data, uint32_t len)
411{
412 int avail, rc;
413 for (;;) {
Eric Holmberg2973cd92011-09-22 11:02:33 -0600414 mutex_lock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 avail = sdio_write_avail(sdio_mux_ch);
416 DBG("%s: avail %d len %d\n", __func__, avail, len);
417 if (avail >= len) {
418 rc = sdio_write(sdio_mux_ch, data, len);
419 DBG("%s: write returned %d\n", __func__, rc);
420 if (!rc) {
421 DBG_INC_WRITE_CNT(len);
422 break;
423 }
424 }
Eric Holmberg2973cd92011-09-22 11:02:33 -0600425 mutex_unlock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 msleep(250);
427 }
Eric Holmberg2973cd92011-09-22 11:02:33 -0600428 mutex_unlock(&sdio_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 return 0;
430}
431
432static void sdio_mux_send_open_cmd(uint32_t id)
433{
434 struct sdio_mux_hdr hdr = {
435 .magic_num = SDIO_MUX_HDR_MAGIC_NO,
436 .cmd = SDIO_MUX_HDR_CMD_OPEN,
437 .reserved = 0,
438 .ch_id = id,
439 .pkt_len = 0,
440 .pad_len = 0
441 };
442
443 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
444}
445
446static void sdio_mux_write_data(struct work_struct *work)
447{
448 int rc, reschedule = 0;
449 int notify = 0;
450 struct sk_buff *skb;
451 unsigned long flags;
452 int avail;
453 int ch_id;
454
455 spin_lock_irqsave(&sdio_mux_write_lock, flags);
456 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
457 ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;
458
459 avail = sdio_write_avail(sdio_mux_ch);
460 if (avail < skb->len) {
461 /* we may have to wait for write avail
462 * notification from sdio al
463 */
464 DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
465 __func__, avail, skb->len);
466
467 reschedule = 1;
468 break;
469 }
470 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
471 rc = sdio_mux_write(skb);
472 spin_lock_irqsave(&sdio_mux_write_lock, flags);
473 if (rc == 0) {
474
475 spin_lock(&sdio_ch[ch_id].lock);
476 sdio_ch[ch_id].num_tx_pkts--;
477 spin_unlock(&sdio_ch[ch_id].lock);
478
479 if (sdio_ch[ch_id].write_done)
480 sdio_ch[ch_id].write_done(
481 sdio_ch[ch_id].priv, skb);
482 else
483 dev_kfree_skb_any(skb);
484 } else if (rc == -EAGAIN || rc == -ENOMEM) {
485 /* recoverable error - retry again later */
486 reschedule = 1;
487 break;
488 } else if (rc == -ENODEV) {
489 /*
490 * sdio_al suffered some kind of fatal error
491 * prevent future writes and clean up pending ones
492 */
493 fatal_error = 1;
Eric Holmberg0d0de822011-09-16 11:28:06 -0600494 do {
495 ch_id = ((struct sdio_mux_hdr *)
496 skb->data)->ch_id;
497 spin_lock(&sdio_ch[ch_id].lock);
498 sdio_ch[ch_id].num_tx_pkts--;
499 spin_unlock(&sdio_ch[ch_id].lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 dev_kfree_skb_any(skb);
Eric Holmberg0d0de822011-09-16 11:28:06 -0600501 } while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
503 return;
504 } else {
505 /* unknown error condition - drop the
506 * skb and reschedule for the
507 * other skb's
508 */
509 pr_err("%s: sdio_mux_write error %d"
510 " for ch %d, skb=%p\n",
511 __func__, rc, ch_id, skb);
512 notify = 1;
513 break;
514 }
515 }
516
517 if (reschedule) {
518 if (sdio_ch_is_in_reset(ch_id)) {
519 notify = 1;
520 } else {
521 __skb_queue_head(&sdio_mux_write_pool, skb);
Eric Holmbergf2275762011-09-22 10:58:56 -0600522 queue_delayed_work(sdio_mux_workqueue,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 &delayed_work_sdio_mux_write,
524 msecs_to_jiffies(250)
525 );
526 }
527 }
528
529 if (notify) {
530 spin_lock(&sdio_ch[ch_id].lock);
531 sdio_ch[ch_id].num_tx_pkts--;
532 spin_unlock(&sdio_ch[ch_id].lock);
533
534 if (sdio_ch[ch_id].write_done)
535 sdio_ch[ch_id].write_done(
536 sdio_ch[ch_id].priv, skb);
537 else
538 dev_kfree_skb_any(skb);
539 }
540 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
541}
542
543int msm_sdio_is_channel_in_reset(uint32_t id)
544{
545 int rc = 0;
546
547 if (id >= SDIO_DMUX_NUM_CHANNELS)
548 return -EINVAL;
549
550 if (sdio_ch_is_in_reset(id))
551 rc = 1;
552
553 return rc;
554}
555
556int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
557{
558 int rc = 0;
559 struct sdio_mux_hdr *hdr;
560 unsigned long flags;
561 struct sk_buff *new_skb;
562
563 if (id >= SDIO_DMUX_NUM_CHANNELS)
564 return -EINVAL;
565 if (!skb)
566 return -EINVAL;
567 if (!sdio_mux_initialized)
568 return -ENODEV;
569 if (fatal_error)
570 return -ENODEV;
571
572 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
573 spin_lock_irqsave(&sdio_ch[id].lock, flags);
574 if (sdio_ch_is_in_reset(id)) {
575 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
576 pr_err("%s: port is in reset: %d\n", __func__,
577 sdio_ch[id].status);
578 return -ENETRESET;
579 }
580 if (!sdio_ch_is_local_open(id)) {
581 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
582 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
583 return -ENODEV;
584 }
585 if (sdio_ch[id].use_wm &&
586 (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
587 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
588 pr_err("%s: watermark exceeded: %d\n", __func__, id);
589 return -EAGAIN;
590 }
591 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
592
Eric Holmberg2973cd92011-09-22 11:02:33 -0600593 spin_lock_irqsave(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 /* if skb do not have any tailroom for padding,
595 copy the skb into a new expanded skb */
596 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
597 /* revisit, probably dev_alloc_skb and memcpy is effecient */
598 new_skb = skb_copy_expand(skb, skb_headroom(skb),
Eric Holmberg2973cd92011-09-22 11:02:33 -0600599 4 - (skb->len & 0x3), GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 if (new_skb == NULL) {
601 pr_err("%s: cannot allocate skb\n", __func__);
Eric Holmberg2973cd92011-09-22 11:02:33 -0600602 rc = -ENOMEM;
603 goto write_done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 }
605 dev_kfree_skb_any(skb);
606 skb = new_skb;
607 DBG_INC_WRITE_CPY(skb->len);
608 }
609
610 hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));
611
612 /* caller should allocate for hdr and padding
613 hdr is fine, padding is tricky */
614 hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
615 hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
616 hdr->reserved = 0;
617 hdr->ch_id = id;
618 hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
619 if (skb->len & 0x3)
620 skb_put(skb, 4 - (skb->len & 0x3));
621
622 hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);
623
624 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
625 __func__, skb->data, skb->tail, skb->len,
626 hdr->pkt_len, hdr->pad_len);
Eric Holmberg7835f312011-06-09 17:58:56 -0600627 __skb_queue_tail(&sdio_mux_write_pool, skb);
Eric Holmberg2973cd92011-09-22 11:02:33 -0600628
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 spin_lock(&sdio_ch[id].lock);
630 sdio_ch[id].num_tx_pkts++;
631 spin_unlock(&sdio_ch[id].lock);
632
Eric Holmbergf2275762011-09-22 10:58:56 -0600633 queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
Eric Holmberg2973cd92011-09-22 11:02:33 -0600635write_done:
636 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 return rc;
638}
639
640int msm_sdio_dmux_open(uint32_t id, void *priv,
641 void (*receive_cb)(void *, struct sk_buff *),
642 void (*write_done)(void *, struct sk_buff *))
643{
644 unsigned long flags;
645
646 DBG("%s: opening ch %d\n", __func__, id);
647 if (!sdio_mux_initialized)
648 return -ENODEV;
649 if (id >= SDIO_DMUX_NUM_CHANNELS)
650 return -EINVAL;
651
652 spin_lock_irqsave(&sdio_ch[id].lock, flags);
653 if (sdio_ch_is_local_open(id)) {
654 pr_info("%s: Already opened %d\n", __func__, id);
655 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
656 goto open_done;
657 }
658
659 sdio_ch[id].receive_cb = receive_cb;
660 sdio_ch[id].write_done = write_done;
661 sdio_ch[id].priv = priv;
662 sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
663 sdio_ch[id].num_tx_pkts = 0;
664 sdio_ch[id].use_wm = 0;
665 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
666
667 sdio_mux_send_open_cmd(id);
668
669open_done:
670 pr_info("%s: opened ch %d\n", __func__, id);
671 return 0;
672}
673
674int msm_sdio_dmux_close(uint32_t id)
675{
676 struct sdio_mux_hdr hdr;
677 unsigned long flags;
678
679 if (id >= SDIO_DMUX_NUM_CHANNELS)
680 return -EINVAL;
681 DBG("%s: closing ch %d\n", __func__, id);
682 if (!sdio_mux_initialized)
683 return -ENODEV;
684 spin_lock_irqsave(&sdio_ch[id].lock, flags);
685
686 sdio_ch[id].receive_cb = NULL;
687 sdio_ch[id].priv = NULL;
688 sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
689 sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
690 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
691
692 hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
693 hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
694 hdr.reserved = 0;
695 hdr.ch_id = id;
696 hdr.pkt_len = 0;
697 hdr.pad_len = 0;
698
699 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
700
701 pr_info("%s: closed ch %d\n", __func__, id);
702 return 0;
703}
704
705static void sdio_mux_notify(void *_dev, unsigned event)
706{
707 DBG("%s: event %d notified\n", __func__, event);
708
709 /* write avail may not be enouogh for a packet, but should be fine */
710 if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
711 sdio_write_avail(sdio_mux_ch))
Eric Holmbergf2275762011-09-22 10:58:56 -0600712 queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713
714 if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
715 sdio_read_avail(sdio_mux_ch))
Eric Holmbergf2275762011-09-22 10:58:56 -0600716 queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717}
718
719int msm_sdio_dmux_is_ch_full(uint32_t id)
720{
721 unsigned long flags;
722 int ret;
723
724 if (id >= SDIO_DMUX_NUM_CHANNELS)
725 return -EINVAL;
726
727 spin_lock_irqsave(&sdio_ch[id].lock, flags);
728 sdio_ch[id].use_wm = 1;
729 ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
730 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
731 id, sdio_ch[id].num_tx_pkts, ret);
732 if (!sdio_ch_is_local_open(id)) {
733 ret = -ENODEV;
734 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
735 }
736 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
737
738 return ret;
739}
740
741int msm_sdio_dmux_is_ch_low(uint32_t id)
742{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 int ret;
744
745 if (id >= SDIO_DMUX_NUM_CHANNELS)
746 return -EINVAL;
747
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748 sdio_ch[id].use_wm = 1;
749 ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
750 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
751 id, sdio_ch[id].num_tx_pkts, ret);
752 if (!sdio_ch_is_local_open(id)) {
753 ret = -ENODEV;
754 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
755 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756
757 return ret;
758}
759
760#ifdef CONFIG_DEBUG_FS
761
762static int debug_tbl(char *buf, int max)
763{
764 int i = 0;
765 int j;
766
767 for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
768 i += scnprintf(buf + i, max - i,
769 "ch%02d local open=%s remote open=%s\n",
770 j, sdio_ch_is_local_open(j) ? "Y" : "N",
771 sdio_ch_is_remote_open(j) ? "Y" : "N");
772 }
773
774 return i;
775}
776
777#define DEBUG_BUFMAX 4096
778static char debug_buffer[DEBUG_BUFMAX];
779
780static ssize_t debug_read(struct file *file, char __user *buf,
781 size_t count, loff_t *ppos)
782{
783 int (*fill)(char *buf, int max) = file->private_data;
784 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
785 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
786}
787
788static int debug_open(struct inode *inode, struct file *file)
789{
790 file->private_data = inode->i_private;
791 return 0;
792}
793
794
795static const struct file_operations debug_ops = {
796 .read = debug_read,
797 .open = debug_open,
798};
799
800static void debug_create(const char *name, mode_t mode,
801 struct dentry *dent,
802 int (*fill)(char *buf, int max))
803{
804 debugfs_create_file(name, mode, dent, fill, &debug_ops);
805}
806
807#endif
808
809static int sdio_dmux_probe(struct platform_device *pdev)
810{
811 int rc;
812
813 DBG("%s probe called\n", __func__);
814
815 if (!sdio_mux_initialized) {
Eric Holmbergf2275762011-09-22 10:58:56 -0600816 sdio_mux_workqueue = create_singlethread_workqueue("sdio_dmux");
817 if (!sdio_mux_workqueue)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 return -ENOMEM;
819
820 skb_queue_head_init(&sdio_mux_write_pool);
821 spin_lock_init(&sdio_mux_write_lock);
822
823 for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
824 spin_lock_init(&sdio_ch[rc].lock);
825
826
827 wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
828 "sdio_dmux");
829 }
830
831 rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
832 if (rc < 0) {
833 pr_err("%s: sido open failed %d\n", __func__, rc);
834 wake_lock_destroy(&sdio_mux_ch_wakelock);
Eric Holmbergf2275762011-09-22 10:58:56 -0600835 destroy_workqueue(sdio_mux_workqueue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 sdio_mux_initialized = 0;
837 return rc;
838 }
839
Karthikeyan Ramasubramaniane297a3e2011-09-13 18:26:13 -0600840 fatal_error = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 sdio_mux_initialized = 1;
842 return 0;
843}
844
845static int sdio_dmux_remove(struct platform_device *pdev)
846{
847 int i;
848 unsigned long ch_lock_flags;
849 unsigned long write_lock_flags;
850 struct sk_buff *skb;
851
852 DBG("%s remove called\n", __func__);
853 if (!sdio_mux_initialized)
854 return 0;
855
856 /* set reset state for any open channels */
857 for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
858 spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags);
859 if (sdio_ch_is_open(i)) {
860 sdio_ch[i].status |= SDIO_CH_IN_RESET;
861 sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN;
862
Eric Holmberg3ff8dae2011-07-19 18:50:15 -0600863 /* notify client so it can update its status */
864 if (sdio_ch[i].receive_cb)
865 sdio_ch[i].receive_cb(
866 sdio_ch[i].priv, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 }
868 spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags);
869 }
870
Vamsi Krishnac6299de2011-07-21 18:18:53 -0700871 /* cancel any pending writes */
872 spin_lock_irqsave(&sdio_mux_write_lock, write_lock_flags);
873 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
874 i = ((struct sdio_mux_hdr *)skb->data)->ch_id;
875 if (sdio_ch[i].write_done)
876 sdio_ch[i].write_done(
877 sdio_ch[i].priv, skb);
878 else
879 dev_kfree_skb_any(skb);
880 }
881 spin_unlock_irqrestore(&sdio_mux_write_lock,
882 write_lock_flags);
883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884 return 0;
885}
886
887static struct platform_driver sdio_dmux_driver = {
888 .probe = sdio_dmux_probe,
889 .remove = sdio_dmux_remove,
890 .driver = {
891 .name = "SDIO_RMNT",
892 .owner = THIS_MODULE,
893 },
894};
895
896static int __init sdio_dmux_init(void)
897{
898#ifdef CONFIG_DEBUG_FS
899 struct dentry *dent;
900
901 dent = debugfs_create_dir("sdio_dmux", 0);
902 if (!IS_ERR(dent))
903 debug_create("tbl", 0444, dent, debug_tbl);
904#endif
905 return platform_driver_register(&sdio_dmux_driver);
906}
907
908module_init(sdio_dmux_init);
909MODULE_DESCRIPTION("MSM SDIO DMUX");
910MODULE_LICENSE("GPL v2");