blob: c99840312ab59a7d669164bb33a3e4de6cae1f09 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * SDIO DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/wakelock.h>
27#include <linux/debugfs.h>
28
29#include <mach/sdio_al.h>
30#include <mach/sdio_dmux.h>
31
32#define SDIO_CH_LOCAL_OPEN 0x1
33#define SDIO_CH_REMOTE_OPEN 0x2
34#define SDIO_CH_IN_RESET 0x4
35
36#define SDIO_MUX_HDR_MAGIC_NO 0x33fc
37
38#define SDIO_MUX_HDR_CMD_DATA 0
39#define SDIO_MUX_HDR_CMD_OPEN 1
40#define SDIO_MUX_HDR_CMD_CLOSE 2
41
42#define LOW_WATERMARK 2
43#define HIGH_WATERMARK 4
44
45static int msm_sdio_dmux_debug_enable;
46module_param_named(debug_enable, msm_sdio_dmux_debug_enable,
47 int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#if defined(DEBUG)
50static uint32_t sdio_dmux_read_cnt;
51static uint32_t sdio_dmux_write_cnt;
52static uint32_t sdio_dmux_write_cpy_cnt;
53static uint32_t sdio_dmux_write_cpy_bytes;
54
55#define DBG(x...) do { \
56 if (msm_sdio_dmux_debug_enable) \
57 pr_debug(x); \
58 } while (0)
59
60#define DBG_INC_READ_CNT(x) do { \
61 sdio_dmux_read_cnt += (x); \
62 if (msm_sdio_dmux_debug_enable) \
63 pr_debug("%s: total read bytes %u\n", \
64 __func__, sdio_dmux_read_cnt); \
65 } while (0)
66
67#define DBG_INC_WRITE_CNT(x) do { \
68 sdio_dmux_write_cnt += (x); \
69 if (msm_sdio_dmux_debug_enable) \
70 pr_debug("%s: total written bytes %u\n", \
71 __func__, sdio_dmux_write_cnt); \
72 } while (0)
73
74#define DBG_INC_WRITE_CPY(x) do { \
75 sdio_dmux_write_cpy_bytes += (x); \
76 sdio_dmux_write_cpy_cnt++; \
77 if (msm_sdio_dmux_debug_enable) \
78 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
79 __func__, sdio_dmux_write_cpy_cnt, \
80 sdio_dmux_write_cpy_bytes); \
81 } while (0)
82#else
83#define DBG(x...) do { } while (0)
84#define DBG_INC_READ_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CPY(x...) do { } while (0)
87#endif
88
89struct sdio_ch_info {
90 uint32_t status;
91 void (*receive_cb)(void *, struct sk_buff *);
92 void (*write_done)(void *, struct sk_buff *);
93 void *priv;
94 spinlock_t lock;
95 int num_tx_pkts;
96 int use_wm;
97};
98
99static struct sk_buff_head sdio_mux_write_pool;
100static spinlock_t sdio_mux_write_lock;
101
102static struct sdio_channel *sdio_mux_ch;
103static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS];
104struct wake_lock sdio_mux_ch_wakelock;
105static int sdio_mux_initialized;
106static int fatal_error;
107
108struct sdio_mux_hdr {
109 uint16_t magic_num;
110 uint8_t reserved;
111 uint8_t cmd;
112 uint8_t pad_len;
113 uint8_t ch_id;
114 uint16_t pkt_len;
115};
116
117struct sdio_partial_pkt_info {
118 uint32_t valid;
119 struct sk_buff *skb;
120 struct sdio_mux_hdr *hdr;
121};
122
123static void sdio_mux_read_data(struct work_struct *work);
124static void sdio_mux_write_data(struct work_struct *work);
125static void sdio_mux_send_open_cmd(uint32_t id);
126
Eric Holmberg7835f312011-06-09 17:58:56 -0600127static DEFINE_MUTEX(sdio_read_mux_lock);
128static DEFINE_MUTEX(sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data);
130static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data);
131static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data);
132
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600133static struct workqueue_struct *sdio_mux_read_workqueue;
134static struct workqueue_struct *sdio_mux_write_workqueue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135static struct sdio_partial_pkt_info sdio_partial_pkt;
136
137#define sdio_ch_is_open(x) \
138 (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN))
139
140#define sdio_ch_is_local_open(x) \
141 (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN)
142
143#define sdio_ch_is_remote_open(x) \
144 (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN)
145
146#define sdio_ch_is_in_reset(x) \
147 (sdio_ch[(x)].status & SDIO_CH_IN_RESET)
148
149static inline void skb_set_data(struct sk_buff *skb,
150 unsigned char *data,
151 unsigned int len)
152{
153 /* panic if tail > end */
154 skb->data = data;
155 skb->tail = skb->data + len;
156 skb->len = len;
157 skb->truesize = len + sizeof(struct sk_buff);
158}
159
160static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr,
161 struct sk_buff *skb_mux)
162{
163 struct sk_buff *skb;
164
165 /* i think we can avoid cloning here */
166 skb = skb_clone(skb_mux, GFP_KERNEL);
167 if (!skb) {
168 pr_err("%s: cannot clone skb\n", __func__);
169 return;
170 }
171
172 /* protect? */
173 skb_set_data(skb, (unsigned char *)hdr,
174 skb->tail - (unsigned char *)hdr);
175 sdio_partial_pkt.skb = skb;
176 sdio_partial_pkt.valid = 1;
177 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
178 skb->head, skb->data, skb->tail, skb->end, skb->len);
179 return;
180}
181
182static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
183 struct sk_buff *skb_mux)
184{
185 struct sk_buff *skb;
186 void *rp = (void *)hdr;
187 unsigned long flags;
188
189 /* protect? */
190 rp += sizeof(*hdr);
191 if (rp < (void *)skb_mux->tail)
192 rp += (hdr->pkt_len + hdr->pad_len);
193
194 if (rp > (void *)skb_mux->tail) {
195 /* partial packet */
196 sdio_mux_save_partial_pkt(hdr, skb_mux);
197 goto packet_done;
198 }
199
200 DBG("%s: hdr %p next %p tail %p pkt_size %d\n",
201 __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);
202
203 skb = skb_clone(skb_mux, GFP_KERNEL);
204 if (!skb) {
205 pr_err("%s: cannot clone skb\n", __func__);
206 goto packet_done;
207 }
208
209 skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
210 DBG("%s: head %p data %p tail %p end %p len %d\n",
211 __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);
212
213 /* probably we should check channel status */
214 /* discard packet early if local side not open */
215 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
216 if (sdio_ch[hdr->ch_id].receive_cb)
217 sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
218 else
219 dev_kfree_skb_any(skb);
220 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
221
222packet_done:
223 return rp;
224}
225
226static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr,
227 struct sk_buff *skb_mux)
228{
229 void *rp;
230 unsigned long flags;
231 int send_open = 0;
232
233 DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id);
234 switch (hdr->cmd) {
235 case SDIO_MUX_HDR_CMD_DATA:
236 rp = handle_sdio_mux_data(hdr, skb_mux);
237 break;
238 case SDIO_MUX_HDR_CMD_OPEN:
239 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
240 sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN;
241
242 if (sdio_ch_is_in_reset(hdr->ch_id)) {
243 DBG("%s: in reset - sending open cmd\n", __func__);
244 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET;
245 send_open = 1;
246 }
247
248 /* notify client so it can update its status */
249 if (sdio_ch[hdr->ch_id].receive_cb)
250 sdio_ch[hdr->ch_id].receive_cb(
251 sdio_ch[hdr->ch_id].priv, NULL);
252
253 if (sdio_ch[hdr->ch_id].write_done)
254 sdio_ch[hdr->ch_id].write_done(
255 sdio_ch[hdr->ch_id].priv, NULL);
256 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
257 rp = hdr + 1;
258 if (send_open)
259 sdio_mux_send_open_cmd(hdr->ch_id);
260
261 break;
262 case SDIO_MUX_HDR_CMD_CLOSE:
263 /* probably should drop pending write */
264 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
265 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN;
266 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
267 rp = hdr + 1;
268 break;
269 default:
270 rp = hdr + 1;
271 }
272
273 return rp;
274}
275
276static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux)
277{
278 struct sk_buff *p_skb;
279 struct sdio_mux_hdr *p_hdr;
280 void *ptr, *rp = skb_mux->data;
281
282 /* protoect? */
283 if (sdio_partial_pkt.valid) {
284 p_skb = sdio_partial_pkt.skb;
285
286 ptr = skb_push(skb_mux, p_skb->len);
287 memcpy(ptr, p_skb->data, p_skb->len);
288 sdio_partial_pkt.skb = NULL;
289 sdio_partial_pkt.valid = 0;
290 dev_kfree_skb_any(p_skb);
291
292 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
293 skb_mux->head, skb_mux->data, skb_mux->tail,
294 skb_mux->end, skb_mux->len);
295
296 p_hdr = (struct sdio_mux_hdr *)skb_mux->data;
297 rp = handle_sdio_mux_command(p_hdr, skb_mux);
298 }
299 return rp;
300}
301
302static void sdio_mux_read_data(struct work_struct *work)
303{
304 struct sk_buff *skb_mux;
305 void *ptr = 0;
306 int sz, rc, len = 0;
307 struct sdio_mux_hdr *hdr;
308
309 DBG("%s: reading\n", __func__);
310 /* should probably have a separate read lock */
Eric Holmberg7835f312011-06-09 17:58:56 -0600311 mutex_lock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 sz = sdio_read_avail(sdio_mux_ch);
313 DBG("%s: read avail %d\n", __func__, sz);
314 if (sz <= 0) {
315 if (sz)
316 pr_err("%s: read avail failed %d\n", __func__, sz);
Eric Holmberg7835f312011-06-09 17:58:56 -0600317 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 return;
319 }
320
321 /* net_ip_aling is probably not required */
322 if (sdio_partial_pkt.valid)
323 len = sdio_partial_pkt.skb->len;
324
325 /* If allocation fails attempt to get a smaller chunk of mem */
326 do {
327 skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
328 if (skb_mux)
329 break;
330
331 pr_err("%s: cannot allocate skb of size:%d + "
332 "%d (NET_SKB_PAD)\n", __func__,
333 sz + NET_IP_ALIGN + len, NET_SKB_PAD);
334 /* the skb structure adds NET_SKB_PAD bytes to the memory
335 * request, which may push the actual request above PAGE_SIZE
336 * in that case, we need to iterate one more time to make sure
337 * we get the memory request under PAGE_SIZE
338 */
339 if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
340 pr_err("%s: allocation failed\n", __func__);
Eric Holmberg7835f312011-06-09 17:58:56 -0600341 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 return;
343 }
344 sz /= 2;
345 } while (1);
346
347 skb_reserve(skb_mux, NET_IP_ALIGN + len);
348 ptr = skb_put(skb_mux, sz);
349
350 /* half second wakelock is fine? */
351 wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
352 rc = sdio_read(sdio_mux_ch, ptr, sz);
353 DBG("%s: read %d\n", __func__, rc);
354 if (rc) {
355 pr_err("%s: sdio read failed %d\n", __func__, rc);
356 dev_kfree_skb_any(skb_mux);
Eric Holmberg7835f312011-06-09 17:58:56 -0600357 mutex_unlock(&sdio_read_mux_lock);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600358 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359 return;
360 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600361 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362
363 DBG_INC_READ_CNT(sz);
364 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
365 skb_mux->head, skb_mux->data, skb_mux->tail,
366 skb_mux->end, skb_mux->len);
367
368 /* move to a separate function */
369 /* probably do skb_pull instead of pointer adjustment */
370 hdr = handle_sdio_partial_pkt(skb_mux);
371 while ((void *)hdr < (void *)skb_mux->tail) {
372
373 if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
374 /* handle partial header */
375 sdio_mux_save_partial_pkt(hdr, skb_mux);
376 break;
377 }
378
379 if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
380 pr_err("%s: packet error\n", __func__);
381 break;
382 }
383
384 hdr = handle_sdio_mux_command(hdr, skb_mux);
385 }
386 dev_kfree_skb_any(skb_mux);
387
388 DBG("%s: read done\n", __func__);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600389 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390}
391
392static int sdio_mux_write(struct sk_buff *skb)
393{
394 int rc, sz;
395
Eric Holmberg7835f312011-06-09 17:58:56 -0600396 mutex_lock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 sz = sdio_write_avail(sdio_mux_ch);
398 DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
399 if (skb->len <= sz) {
400 rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
401 DBG("%s: write returned %d\n", __func__, rc);
402 if (rc == 0)
403 DBG_INC_WRITE_CNT(skb->len);
404 } else
405 rc = -ENOMEM;
406
Eric Holmberg7835f312011-06-09 17:58:56 -0600407 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 return rc;
409}
410
411static int sdio_mux_write_cmd(void *data, uint32_t len)
412{
413 int avail, rc;
414 for (;;) {
Eric Holmberg7835f312011-06-09 17:58:56 -0600415 mutex_lock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 avail = sdio_write_avail(sdio_mux_ch);
417 DBG("%s: avail %d len %d\n", __func__, avail, len);
418 if (avail >= len) {
419 rc = sdio_write(sdio_mux_ch, data, len);
420 DBG("%s: write returned %d\n", __func__, rc);
421 if (!rc) {
422 DBG_INC_WRITE_CNT(len);
423 break;
424 }
425 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600426 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 msleep(250);
428 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600429 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 return 0;
431}
432
433static void sdio_mux_send_open_cmd(uint32_t id)
434{
435 struct sdio_mux_hdr hdr = {
436 .magic_num = SDIO_MUX_HDR_MAGIC_NO,
437 .cmd = SDIO_MUX_HDR_CMD_OPEN,
438 .reserved = 0,
439 .ch_id = id,
440 .pkt_len = 0,
441 .pad_len = 0
442 };
443
444 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
445}
446
447static void sdio_mux_write_data(struct work_struct *work)
448{
449 int rc, reschedule = 0;
450 int notify = 0;
451 struct sk_buff *skb;
452 unsigned long flags;
453 int avail;
454 int ch_id;
455
456 spin_lock_irqsave(&sdio_mux_write_lock, flags);
457 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
458 ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;
459
460 avail = sdio_write_avail(sdio_mux_ch);
461 if (avail < skb->len) {
462 /* we may have to wait for write avail
463 * notification from sdio al
464 */
465 DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
466 __func__, avail, skb->len);
467
468 reschedule = 1;
469 break;
470 }
471 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
472 rc = sdio_mux_write(skb);
473 spin_lock_irqsave(&sdio_mux_write_lock, flags);
474 if (rc == 0) {
475
476 spin_lock(&sdio_ch[ch_id].lock);
477 sdio_ch[ch_id].num_tx_pkts--;
478 spin_unlock(&sdio_ch[ch_id].lock);
479
480 if (sdio_ch[ch_id].write_done)
481 sdio_ch[ch_id].write_done(
482 sdio_ch[ch_id].priv, skb);
483 else
484 dev_kfree_skb_any(skb);
485 } else if (rc == -EAGAIN || rc == -ENOMEM) {
486 /* recoverable error - retry again later */
487 reschedule = 1;
488 break;
489 } else if (rc == -ENODEV) {
490 /*
491 * sdio_al suffered some kind of fatal error
492 * prevent future writes and clean up pending ones
493 */
494 fatal_error = 1;
495 dev_kfree_skb_any(skb);
496 while ((skb = __skb_dequeue(&sdio_mux_write_pool)))
497 dev_kfree_skb_any(skb);
498 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
499 return;
500 } else {
501 /* unknown error condition - drop the
502 * skb and reschedule for the
503 * other skb's
504 */
505 pr_err("%s: sdio_mux_write error %d"
506 " for ch %d, skb=%p\n",
507 __func__, rc, ch_id, skb);
508 notify = 1;
509 break;
510 }
511 }
512
513 if (reschedule) {
514 if (sdio_ch_is_in_reset(ch_id)) {
515 notify = 1;
516 } else {
517 __skb_queue_head(&sdio_mux_write_pool, skb);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600518 queue_delayed_work(sdio_mux_write_workqueue,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 &delayed_work_sdio_mux_write,
520 msecs_to_jiffies(250)
521 );
522 }
523 }
524
525 if (notify) {
526 spin_lock(&sdio_ch[ch_id].lock);
527 sdio_ch[ch_id].num_tx_pkts--;
528 spin_unlock(&sdio_ch[ch_id].lock);
529
530 if (sdio_ch[ch_id].write_done)
531 sdio_ch[ch_id].write_done(
532 sdio_ch[ch_id].priv, skb);
533 else
534 dev_kfree_skb_any(skb);
535 }
536 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
537}
538
539int msm_sdio_is_channel_in_reset(uint32_t id)
540{
541 int rc = 0;
542
543 if (id >= SDIO_DMUX_NUM_CHANNELS)
544 return -EINVAL;
545
546 if (sdio_ch_is_in_reset(id))
547 rc = 1;
548
549 return rc;
550}
551
552int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
553{
554 int rc = 0;
555 struct sdio_mux_hdr *hdr;
556 unsigned long flags;
557 struct sk_buff *new_skb;
558
559 if (id >= SDIO_DMUX_NUM_CHANNELS)
560 return -EINVAL;
561 if (!skb)
562 return -EINVAL;
563 if (!sdio_mux_initialized)
564 return -ENODEV;
565 if (fatal_error)
566 return -ENODEV;
567
568 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
569 spin_lock_irqsave(&sdio_ch[id].lock, flags);
570 if (sdio_ch_is_in_reset(id)) {
571 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
572 pr_err("%s: port is in reset: %d\n", __func__,
573 sdio_ch[id].status);
574 return -ENETRESET;
575 }
576 if (!sdio_ch_is_local_open(id)) {
577 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
578 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
579 return -ENODEV;
580 }
581 if (sdio_ch[id].use_wm &&
582 (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
583 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
584 pr_err("%s: watermark exceeded: %d\n", __func__, id);
585 return -EAGAIN;
586 }
587 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 /* if skb do not have any tailroom for padding,
590 copy the skb into a new expanded skb */
591 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
592 /* revisit, probably dev_alloc_skb and memcpy is effecient */
593 new_skb = skb_copy_expand(skb, skb_headroom(skb),
Eric Holmberg7835f312011-06-09 17:58:56 -0600594 4 - (skb->len & 0x3), GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 if (new_skb == NULL) {
596 pr_err("%s: cannot allocate skb\n", __func__);
Eric Holmberg7835f312011-06-09 17:58:56 -0600597 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 }
599 dev_kfree_skb_any(skb);
600 skb = new_skb;
Eric Holmberg7835f312011-06-09 17:58:56 -0600601 spin_lock_irqsave(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 DBG_INC_WRITE_CPY(skb->len);
Eric Holmberg7835f312011-06-09 17:58:56 -0600603 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 }
605
606 hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));
607
608 /* caller should allocate for hdr and padding
609 hdr is fine, padding is tricky */
610 hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
611 hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
612 hdr->reserved = 0;
613 hdr->ch_id = id;
614 hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
615 if (skb->len & 0x3)
616 skb_put(skb, 4 - (skb->len & 0x3));
617
618 hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);
619
620 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
621 __func__, skb->data, skb->tail, skb->len,
622 hdr->pkt_len, hdr->pad_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623
Eric Holmberg7835f312011-06-09 17:58:56 -0600624 spin_lock_irqsave(&sdio_mux_write_lock, flags);
625 __skb_queue_tail(&sdio_mux_write_pool, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 spin_lock(&sdio_ch[id].lock);
627 sdio_ch[id].num_tx_pkts++;
628 spin_unlock(&sdio_ch[id].lock);
Eric Holmberg7835f312011-06-09 17:58:56 -0600629 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600631 queue_work(sdio_mux_write_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 return rc;
634}
635
636int msm_sdio_dmux_open(uint32_t id, void *priv,
637 void (*receive_cb)(void *, struct sk_buff *),
638 void (*write_done)(void *, struct sk_buff *))
639{
640 unsigned long flags;
641
642 DBG("%s: opening ch %d\n", __func__, id);
643 if (!sdio_mux_initialized)
644 return -ENODEV;
645 if (id >= SDIO_DMUX_NUM_CHANNELS)
646 return -EINVAL;
647
648 spin_lock_irqsave(&sdio_ch[id].lock, flags);
649 if (sdio_ch_is_local_open(id)) {
650 pr_info("%s: Already opened %d\n", __func__, id);
651 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
652 goto open_done;
653 }
654
655 sdio_ch[id].receive_cb = receive_cb;
656 sdio_ch[id].write_done = write_done;
657 sdio_ch[id].priv = priv;
658 sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
659 sdio_ch[id].num_tx_pkts = 0;
660 sdio_ch[id].use_wm = 0;
661 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
662
663 sdio_mux_send_open_cmd(id);
664
665open_done:
666 pr_info("%s: opened ch %d\n", __func__, id);
667 return 0;
668}
669
670int msm_sdio_dmux_close(uint32_t id)
671{
672 struct sdio_mux_hdr hdr;
673 unsigned long flags;
674
675 if (id >= SDIO_DMUX_NUM_CHANNELS)
676 return -EINVAL;
677 DBG("%s: closing ch %d\n", __func__, id);
678 if (!sdio_mux_initialized)
679 return -ENODEV;
680 spin_lock_irqsave(&sdio_ch[id].lock, flags);
681
682 sdio_ch[id].receive_cb = NULL;
683 sdio_ch[id].priv = NULL;
684 sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
685 sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
686 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
687
688 hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
689 hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
690 hdr.reserved = 0;
691 hdr.ch_id = id;
692 hdr.pkt_len = 0;
693 hdr.pad_len = 0;
694
695 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
696
697 pr_info("%s: closed ch %d\n", __func__, id);
698 return 0;
699}
700
701static void sdio_mux_notify(void *_dev, unsigned event)
702{
703 DBG("%s: event %d notified\n", __func__, event);
704
705 /* write avail may not be enouogh for a packet, but should be fine */
706 if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
707 sdio_write_avail(sdio_mux_ch))
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600708 queue_work(sdio_mux_write_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709
710 if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
711 sdio_read_avail(sdio_mux_ch))
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600712 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713}
714
715int msm_sdio_dmux_is_ch_full(uint32_t id)
716{
717 unsigned long flags;
718 int ret;
719
720 if (id >= SDIO_DMUX_NUM_CHANNELS)
721 return -EINVAL;
722
723 spin_lock_irqsave(&sdio_ch[id].lock, flags);
724 sdio_ch[id].use_wm = 1;
725 ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
726 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
727 id, sdio_ch[id].num_tx_pkts, ret);
728 if (!sdio_ch_is_local_open(id)) {
729 ret = -ENODEV;
730 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
731 }
732 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
733
734 return ret;
735}
736
737int msm_sdio_dmux_is_ch_low(uint32_t id)
738{
739 unsigned long flags;
740 int ret;
741
742 if (id >= SDIO_DMUX_NUM_CHANNELS)
743 return -EINVAL;
744
745 spin_lock_irqsave(&sdio_ch[id].lock, flags);
746 sdio_ch[id].use_wm = 1;
747 ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
748 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
749 id, sdio_ch[id].num_tx_pkts, ret);
750 if (!sdio_ch_is_local_open(id)) {
751 ret = -ENODEV;
752 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
753 }
754 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
755
756 return ret;
757}
758
759#ifdef CONFIG_DEBUG_FS
760
761static int debug_tbl(char *buf, int max)
762{
763 int i = 0;
764 int j;
765
766 for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
767 i += scnprintf(buf + i, max - i,
768 "ch%02d local open=%s remote open=%s\n",
769 j, sdio_ch_is_local_open(j) ? "Y" : "N",
770 sdio_ch_is_remote_open(j) ? "Y" : "N");
771 }
772
773 return i;
774}
775
776#define DEBUG_BUFMAX 4096
777static char debug_buffer[DEBUG_BUFMAX];
778
779static ssize_t debug_read(struct file *file, char __user *buf,
780 size_t count, loff_t *ppos)
781{
782 int (*fill)(char *buf, int max) = file->private_data;
783 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
784 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
785}
786
787static int debug_open(struct inode *inode, struct file *file)
788{
789 file->private_data = inode->i_private;
790 return 0;
791}
792
793
794static const struct file_operations debug_ops = {
795 .read = debug_read,
796 .open = debug_open,
797};
798
799static void debug_create(const char *name, mode_t mode,
800 struct dentry *dent,
801 int (*fill)(char *buf, int max))
802{
803 debugfs_create_file(name, mode, dent, fill, &debug_ops);
804}
805
806#endif
807
808static int sdio_dmux_probe(struct platform_device *pdev)
809{
810 int rc;
811
812 DBG("%s probe called\n", __func__);
813
814 if (!sdio_mux_initialized) {
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600815 sdio_mux_read_workqueue = create_singlethread_workqueue(
816 "sdio_dmux_read");
817 if (!sdio_mux_read_workqueue)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 return -ENOMEM;
819
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600820 sdio_mux_write_workqueue = create_singlethread_workqueue(
821 "sdio_dmux_write");
822 if (!sdio_mux_write_workqueue) {
823 destroy_workqueue(sdio_mux_read_workqueue);
824 return -ENOMEM;
825 }
826
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 skb_queue_head_init(&sdio_mux_write_pool);
828 spin_lock_init(&sdio_mux_write_lock);
829
830 for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
831 spin_lock_init(&sdio_ch[rc].lock);
832
833
834 wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
835 "sdio_dmux");
836 }
837
838 rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
839 if (rc < 0) {
840 pr_err("%s: sido open failed %d\n", __func__, rc);
841 wake_lock_destroy(&sdio_mux_ch_wakelock);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600842 destroy_workqueue(sdio_mux_read_workqueue);
843 destroy_workqueue(sdio_mux_write_workqueue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 sdio_mux_initialized = 0;
845 return rc;
846 }
847
848 sdio_mux_initialized = 1;
849 return 0;
850}
851
852static int sdio_dmux_remove(struct platform_device *pdev)
853{
854 int i;
855 unsigned long ch_lock_flags;
856 unsigned long write_lock_flags;
857 struct sk_buff *skb;
858
859 DBG("%s remove called\n", __func__);
860 if (!sdio_mux_initialized)
861 return 0;
862
863 /* set reset state for any open channels */
864 for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
865 spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags);
866 if (sdio_ch_is_open(i)) {
867 sdio_ch[i].status |= SDIO_CH_IN_RESET;
868 sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN;
869
Eric Holmberg3ff8dae2011-07-19 18:50:15 -0600870 /* notify client so it can update its status */
871 if (sdio_ch[i].receive_cb)
872 sdio_ch[i].receive_cb(
873 sdio_ch[i].priv, NULL);
874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 /* cancel any pending writes */
876 spin_lock_irqsave(&sdio_mux_write_lock,
877 write_lock_flags);
878 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
879 if (sdio_ch[i].write_done)
880 sdio_ch[i].write_done(
881 sdio_ch[i].priv, skb);
882 else
883 dev_kfree_skb_any(skb);
884 }
885 spin_unlock_irqrestore(&sdio_mux_write_lock,
886 write_lock_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 }
888 spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags);
889 }
890
891 return 0;
892}
893
894static struct platform_driver sdio_dmux_driver = {
895 .probe = sdio_dmux_probe,
896 .remove = sdio_dmux_remove,
897 .driver = {
898 .name = "SDIO_RMNT",
899 .owner = THIS_MODULE,
900 },
901};
902
903static int __init sdio_dmux_init(void)
904{
905#ifdef CONFIG_DEBUG_FS
906 struct dentry *dent;
907
908 dent = debugfs_create_dir("sdio_dmux", 0);
909 if (!IS_ERR(dent))
910 debug_create("tbl", 0444, dent, debug_tbl);
911#endif
912 return platform_driver_register(&sdio_dmux_driver);
913}
914
915module_init(sdio_dmux_init);
916MODULE_DESCRIPTION("MSM SDIO DMUX");
917MODULE_LICENSE("GPL v2");