blob: 77af7c379cdf60ca8dad606d2132b26064a1f306 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * SDIO DMUX module.
16 */
17
18#define DEBUG
19
20#include <linux/delay.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/skbuff.h>
26#include <linux/wakelock.h>
27#include <linux/debugfs.h>
28
29#include <mach/sdio_al.h>
30#include <mach/sdio_dmux.h>
31
32#define SDIO_CH_LOCAL_OPEN 0x1
33#define SDIO_CH_REMOTE_OPEN 0x2
34#define SDIO_CH_IN_RESET 0x4
35
36#define SDIO_MUX_HDR_MAGIC_NO 0x33fc
37
38#define SDIO_MUX_HDR_CMD_DATA 0
39#define SDIO_MUX_HDR_CMD_OPEN 1
40#define SDIO_MUX_HDR_CMD_CLOSE 2
41
42#define LOW_WATERMARK 2
43#define HIGH_WATERMARK 4
44
45static int msm_sdio_dmux_debug_enable;
46module_param_named(debug_enable, msm_sdio_dmux_debug_enable,
47 int, S_IRUGO | S_IWUSR | S_IWGRP);
48
49#if defined(DEBUG)
50static uint32_t sdio_dmux_read_cnt;
51static uint32_t sdio_dmux_write_cnt;
52static uint32_t sdio_dmux_write_cpy_cnt;
53static uint32_t sdio_dmux_write_cpy_bytes;
54
55#define DBG(x...) do { \
56 if (msm_sdio_dmux_debug_enable) \
57 pr_debug(x); \
58 } while (0)
59
60#define DBG_INC_READ_CNT(x) do { \
61 sdio_dmux_read_cnt += (x); \
62 if (msm_sdio_dmux_debug_enable) \
63 pr_debug("%s: total read bytes %u\n", \
64 __func__, sdio_dmux_read_cnt); \
65 } while (0)
66
67#define DBG_INC_WRITE_CNT(x) do { \
68 sdio_dmux_write_cnt += (x); \
69 if (msm_sdio_dmux_debug_enable) \
70 pr_debug("%s: total written bytes %u\n", \
71 __func__, sdio_dmux_write_cnt); \
72 } while (0)
73
74#define DBG_INC_WRITE_CPY(x) do { \
75 sdio_dmux_write_cpy_bytes += (x); \
76 sdio_dmux_write_cpy_cnt++; \
77 if (msm_sdio_dmux_debug_enable) \
78 pr_debug("%s: total write copy cnt %u, bytes %u\n", \
79 __func__, sdio_dmux_write_cpy_cnt, \
80 sdio_dmux_write_cpy_bytes); \
81 } while (0)
82#else
83#define DBG(x...) do { } while (0)
84#define DBG_INC_READ_CNT(x...) do { } while (0)
85#define DBG_INC_WRITE_CNT(x...) do { } while (0)
86#define DBG_INC_WRITE_CPY(x...) do { } while (0)
87#endif
88
89struct sdio_ch_info {
90 uint32_t status;
91 void (*receive_cb)(void *, struct sk_buff *);
92 void (*write_done)(void *, struct sk_buff *);
93 void *priv;
94 spinlock_t lock;
95 int num_tx_pkts;
96 int use_wm;
97};
98
99static struct sk_buff_head sdio_mux_write_pool;
100static spinlock_t sdio_mux_write_lock;
101
102static struct sdio_channel *sdio_mux_ch;
103static struct sdio_ch_info sdio_ch[SDIO_DMUX_NUM_CHANNELS];
104struct wake_lock sdio_mux_ch_wakelock;
105static int sdio_mux_initialized;
106static int fatal_error;
107
108struct sdio_mux_hdr {
109 uint16_t magic_num;
110 uint8_t reserved;
111 uint8_t cmd;
112 uint8_t pad_len;
113 uint8_t ch_id;
114 uint16_t pkt_len;
115};
116
117struct sdio_partial_pkt_info {
118 uint32_t valid;
119 struct sk_buff *skb;
120 struct sdio_mux_hdr *hdr;
121};
122
123static void sdio_mux_read_data(struct work_struct *work);
124static void sdio_mux_write_data(struct work_struct *work);
125static void sdio_mux_send_open_cmd(uint32_t id);
126
Eric Holmberg7835f312011-06-09 17:58:56 -0600127static DEFINE_MUTEX(sdio_read_mux_lock);
128static DEFINE_MUTEX(sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129static DECLARE_WORK(work_sdio_mux_read, sdio_mux_read_data);
130static DECLARE_WORK(work_sdio_mux_write, sdio_mux_write_data);
131static DECLARE_DELAYED_WORK(delayed_work_sdio_mux_write, sdio_mux_write_data);
132
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600133static struct workqueue_struct *sdio_mux_read_workqueue;
134static struct workqueue_struct *sdio_mux_write_workqueue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135static struct sdio_partial_pkt_info sdio_partial_pkt;
136
137#define sdio_ch_is_open(x) \
138 (sdio_ch[(x)].status == (SDIO_CH_LOCAL_OPEN | SDIO_CH_REMOTE_OPEN))
139
140#define sdio_ch_is_local_open(x) \
141 (sdio_ch[(x)].status & SDIO_CH_LOCAL_OPEN)
142
143#define sdio_ch_is_remote_open(x) \
144 (sdio_ch[(x)].status & SDIO_CH_REMOTE_OPEN)
145
146#define sdio_ch_is_in_reset(x) \
147 (sdio_ch[(x)].status & SDIO_CH_IN_RESET)
148
149static inline void skb_set_data(struct sk_buff *skb,
150 unsigned char *data,
151 unsigned int len)
152{
153 /* panic if tail > end */
154 skb->data = data;
155 skb->tail = skb->data + len;
156 skb->len = len;
157 skb->truesize = len + sizeof(struct sk_buff);
158}
159
160static void sdio_mux_save_partial_pkt(struct sdio_mux_hdr *hdr,
161 struct sk_buff *skb_mux)
162{
163 struct sk_buff *skb;
164
165 /* i think we can avoid cloning here */
166 skb = skb_clone(skb_mux, GFP_KERNEL);
167 if (!skb) {
168 pr_err("%s: cannot clone skb\n", __func__);
169 return;
170 }
171
172 /* protect? */
173 skb_set_data(skb, (unsigned char *)hdr,
174 skb->tail - (unsigned char *)hdr);
175 sdio_partial_pkt.skb = skb;
176 sdio_partial_pkt.valid = 1;
177 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
178 skb->head, skb->data, skb->tail, skb->end, skb->len);
179 return;
180}
181
182static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
183 struct sk_buff *skb_mux)
184{
185 struct sk_buff *skb;
186 void *rp = (void *)hdr;
187 unsigned long flags;
188
189 /* protect? */
190 rp += sizeof(*hdr);
191 if (rp < (void *)skb_mux->tail)
192 rp += (hdr->pkt_len + hdr->pad_len);
193
194 if (rp > (void *)skb_mux->tail) {
195 /* partial packet */
196 sdio_mux_save_partial_pkt(hdr, skb_mux);
197 goto packet_done;
198 }
199
200 DBG("%s: hdr %p next %p tail %p pkt_size %d\n",
201 __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);
202
203 skb = skb_clone(skb_mux, GFP_KERNEL);
204 if (!skb) {
205 pr_err("%s: cannot clone skb\n", __func__);
206 goto packet_done;
207 }
208
209 skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
210 DBG("%s: head %p data %p tail %p end %p len %d\n",
211 __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);
212
213 /* probably we should check channel status */
214 /* discard packet early if local side not open */
215 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
216 if (sdio_ch[hdr->ch_id].receive_cb)
217 sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
218 else
219 dev_kfree_skb_any(skb);
220 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
221
222packet_done:
223 return rp;
224}
225
226static void *handle_sdio_mux_command(struct sdio_mux_hdr *hdr,
227 struct sk_buff *skb_mux)
228{
229 void *rp;
230 unsigned long flags;
231 int send_open = 0;
232
233 DBG("%s: cmd %d ch %d\n", __func__, hdr->cmd, hdr->ch_id);
234 switch (hdr->cmd) {
235 case SDIO_MUX_HDR_CMD_DATA:
236 rp = handle_sdio_mux_data(hdr, skb_mux);
237 break;
238 case SDIO_MUX_HDR_CMD_OPEN:
239 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
240 sdio_ch[hdr->ch_id].status |= SDIO_CH_REMOTE_OPEN;
Eric Holmberg0d0de822011-09-16 11:28:06 -0600241 sdio_ch[hdr->ch_id].num_tx_pkts = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242
243 if (sdio_ch_is_in_reset(hdr->ch_id)) {
244 DBG("%s: in reset - sending open cmd\n", __func__);
245 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_IN_RESET;
246 send_open = 1;
247 }
248
249 /* notify client so it can update its status */
250 if (sdio_ch[hdr->ch_id].receive_cb)
251 sdio_ch[hdr->ch_id].receive_cb(
252 sdio_ch[hdr->ch_id].priv, NULL);
253
254 if (sdio_ch[hdr->ch_id].write_done)
255 sdio_ch[hdr->ch_id].write_done(
256 sdio_ch[hdr->ch_id].priv, NULL);
257 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
258 rp = hdr + 1;
259 if (send_open)
260 sdio_mux_send_open_cmd(hdr->ch_id);
261
262 break;
263 case SDIO_MUX_HDR_CMD_CLOSE:
264 /* probably should drop pending write */
265 spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
266 sdio_ch[hdr->ch_id].status &= ~SDIO_CH_REMOTE_OPEN;
267 spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);
268 rp = hdr + 1;
269 break;
270 default:
271 rp = hdr + 1;
272 }
273
274 return rp;
275}
276
277static void *handle_sdio_partial_pkt(struct sk_buff *skb_mux)
278{
279 struct sk_buff *p_skb;
280 struct sdio_mux_hdr *p_hdr;
281 void *ptr, *rp = skb_mux->data;
282
283 /* protoect? */
284 if (sdio_partial_pkt.valid) {
285 p_skb = sdio_partial_pkt.skb;
286
287 ptr = skb_push(skb_mux, p_skb->len);
288 memcpy(ptr, p_skb->data, p_skb->len);
289 sdio_partial_pkt.skb = NULL;
290 sdio_partial_pkt.valid = 0;
291 dev_kfree_skb_any(p_skb);
292
293 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
294 skb_mux->head, skb_mux->data, skb_mux->tail,
295 skb_mux->end, skb_mux->len);
296
297 p_hdr = (struct sdio_mux_hdr *)skb_mux->data;
298 rp = handle_sdio_mux_command(p_hdr, skb_mux);
299 }
300 return rp;
301}
302
303static void sdio_mux_read_data(struct work_struct *work)
304{
305 struct sk_buff *skb_mux;
306 void *ptr = 0;
307 int sz, rc, len = 0;
308 struct sdio_mux_hdr *hdr;
309
310 DBG("%s: reading\n", __func__);
311 /* should probably have a separate read lock */
Eric Holmberg7835f312011-06-09 17:58:56 -0600312 mutex_lock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 sz = sdio_read_avail(sdio_mux_ch);
314 DBG("%s: read avail %d\n", __func__, sz);
315 if (sz <= 0) {
316 if (sz)
317 pr_err("%s: read avail failed %d\n", __func__, sz);
Eric Holmberg7835f312011-06-09 17:58:56 -0600318 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 return;
320 }
321
322 /* net_ip_aling is probably not required */
323 if (sdio_partial_pkt.valid)
324 len = sdio_partial_pkt.skb->len;
325
326 /* If allocation fails attempt to get a smaller chunk of mem */
327 do {
328 skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
329 if (skb_mux)
330 break;
331
332 pr_err("%s: cannot allocate skb of size:%d + "
333 "%d (NET_SKB_PAD)\n", __func__,
334 sz + NET_IP_ALIGN + len, NET_SKB_PAD);
335 /* the skb structure adds NET_SKB_PAD bytes to the memory
336 * request, which may push the actual request above PAGE_SIZE
337 * in that case, we need to iterate one more time to make sure
338 * we get the memory request under PAGE_SIZE
339 */
340 if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
341 pr_err("%s: allocation failed\n", __func__);
Eric Holmberg7835f312011-06-09 17:58:56 -0600342 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 return;
344 }
345 sz /= 2;
346 } while (1);
347
348 skb_reserve(skb_mux, NET_IP_ALIGN + len);
349 ptr = skb_put(skb_mux, sz);
350
351 /* half second wakelock is fine? */
352 wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
353 rc = sdio_read(sdio_mux_ch, ptr, sz);
354 DBG("%s: read %d\n", __func__, rc);
355 if (rc) {
356 pr_err("%s: sdio read failed %d\n", __func__, rc);
357 dev_kfree_skb_any(skb_mux);
Eric Holmberg7835f312011-06-09 17:58:56 -0600358 mutex_unlock(&sdio_read_mux_lock);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600359 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 return;
361 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600362 mutex_unlock(&sdio_read_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363
364 DBG_INC_READ_CNT(sz);
365 DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
366 skb_mux->head, skb_mux->data, skb_mux->tail,
367 skb_mux->end, skb_mux->len);
368
369 /* move to a separate function */
370 /* probably do skb_pull instead of pointer adjustment */
371 hdr = handle_sdio_partial_pkt(skb_mux);
372 while ((void *)hdr < (void *)skb_mux->tail) {
373
374 if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
375 /* handle partial header */
376 sdio_mux_save_partial_pkt(hdr, skb_mux);
377 break;
378 }
379
380 if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
381 pr_err("%s: packet error\n", __func__);
382 break;
383 }
384
385 hdr = handle_sdio_mux_command(hdr, skb_mux);
386 }
387 dev_kfree_skb_any(skb_mux);
388
389 DBG("%s: read done\n", __func__);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600390 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391}
392
393static int sdio_mux_write(struct sk_buff *skb)
394{
395 int rc, sz;
396
Eric Holmberg7835f312011-06-09 17:58:56 -0600397 mutex_lock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 sz = sdio_write_avail(sdio_mux_ch);
399 DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
400 if (skb->len <= sz) {
401 rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
402 DBG("%s: write returned %d\n", __func__, rc);
403 if (rc == 0)
404 DBG_INC_WRITE_CNT(skb->len);
405 } else
406 rc = -ENOMEM;
407
Eric Holmberg7835f312011-06-09 17:58:56 -0600408 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 return rc;
410}
411
412static int sdio_mux_write_cmd(void *data, uint32_t len)
413{
414 int avail, rc;
415 for (;;) {
Eric Holmberg7835f312011-06-09 17:58:56 -0600416 mutex_lock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 avail = sdio_write_avail(sdio_mux_ch);
418 DBG("%s: avail %d len %d\n", __func__, avail, len);
419 if (avail >= len) {
420 rc = sdio_write(sdio_mux_ch, data, len);
421 DBG("%s: write returned %d\n", __func__, rc);
422 if (!rc) {
423 DBG_INC_WRITE_CNT(len);
424 break;
425 }
426 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600427 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 msleep(250);
429 }
Eric Holmberg7835f312011-06-09 17:58:56 -0600430 mutex_unlock(&sdio_write_mux_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 return 0;
432}
433
434static void sdio_mux_send_open_cmd(uint32_t id)
435{
436 struct sdio_mux_hdr hdr = {
437 .magic_num = SDIO_MUX_HDR_MAGIC_NO,
438 .cmd = SDIO_MUX_HDR_CMD_OPEN,
439 .reserved = 0,
440 .ch_id = id,
441 .pkt_len = 0,
442 .pad_len = 0
443 };
444
445 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
446}
447
448static void sdio_mux_write_data(struct work_struct *work)
449{
450 int rc, reschedule = 0;
451 int notify = 0;
452 struct sk_buff *skb;
453 unsigned long flags;
454 int avail;
455 int ch_id;
456
457 spin_lock_irqsave(&sdio_mux_write_lock, flags);
458 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
459 ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;
460
461 avail = sdio_write_avail(sdio_mux_ch);
462 if (avail < skb->len) {
463 /* we may have to wait for write avail
464 * notification from sdio al
465 */
466 DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
467 __func__, avail, skb->len);
468
469 reschedule = 1;
470 break;
471 }
472 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
473 rc = sdio_mux_write(skb);
474 spin_lock_irqsave(&sdio_mux_write_lock, flags);
475 if (rc == 0) {
476
477 spin_lock(&sdio_ch[ch_id].lock);
478 sdio_ch[ch_id].num_tx_pkts--;
479 spin_unlock(&sdio_ch[ch_id].lock);
480
481 if (sdio_ch[ch_id].write_done)
482 sdio_ch[ch_id].write_done(
483 sdio_ch[ch_id].priv, skb);
484 else
485 dev_kfree_skb_any(skb);
486 } else if (rc == -EAGAIN || rc == -ENOMEM) {
487 /* recoverable error - retry again later */
488 reschedule = 1;
489 break;
490 } else if (rc == -ENODEV) {
491 /*
492 * sdio_al suffered some kind of fatal error
493 * prevent future writes and clean up pending ones
494 */
495 fatal_error = 1;
Eric Holmberg0d0de822011-09-16 11:28:06 -0600496 do {
497 ch_id = ((struct sdio_mux_hdr *)
498 skb->data)->ch_id;
499 spin_lock(&sdio_ch[ch_id].lock);
500 sdio_ch[ch_id].num_tx_pkts--;
501 spin_unlock(&sdio_ch[ch_id].lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 dev_kfree_skb_any(skb);
Eric Holmberg0d0de822011-09-16 11:28:06 -0600503 } while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
505 return;
506 } else {
507 /* unknown error condition - drop the
508 * skb and reschedule for the
509 * other skb's
510 */
511 pr_err("%s: sdio_mux_write error %d"
512 " for ch %d, skb=%p\n",
513 __func__, rc, ch_id, skb);
514 notify = 1;
515 break;
516 }
517 }
518
519 if (reschedule) {
520 if (sdio_ch_is_in_reset(ch_id)) {
521 notify = 1;
522 } else {
523 __skb_queue_head(&sdio_mux_write_pool, skb);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600524 queue_delayed_work(sdio_mux_write_workqueue,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 &delayed_work_sdio_mux_write,
526 msecs_to_jiffies(250)
527 );
528 }
529 }
530
531 if (notify) {
532 spin_lock(&sdio_ch[ch_id].lock);
533 sdio_ch[ch_id].num_tx_pkts--;
534 spin_unlock(&sdio_ch[ch_id].lock);
535
536 if (sdio_ch[ch_id].write_done)
537 sdio_ch[ch_id].write_done(
538 sdio_ch[ch_id].priv, skb);
539 else
540 dev_kfree_skb_any(skb);
541 }
542 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
543}
544
545int msm_sdio_is_channel_in_reset(uint32_t id)
546{
547 int rc = 0;
548
549 if (id >= SDIO_DMUX_NUM_CHANNELS)
550 return -EINVAL;
551
552 if (sdio_ch_is_in_reset(id))
553 rc = 1;
554
555 return rc;
556}
557
558int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
559{
560 int rc = 0;
561 struct sdio_mux_hdr *hdr;
562 unsigned long flags;
563 struct sk_buff *new_skb;
564
565 if (id >= SDIO_DMUX_NUM_CHANNELS)
566 return -EINVAL;
567 if (!skb)
568 return -EINVAL;
569 if (!sdio_mux_initialized)
570 return -ENODEV;
571 if (fatal_error)
572 return -ENODEV;
573
574 DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
575 spin_lock_irqsave(&sdio_ch[id].lock, flags);
576 if (sdio_ch_is_in_reset(id)) {
577 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
578 pr_err("%s: port is in reset: %d\n", __func__,
579 sdio_ch[id].status);
580 return -ENETRESET;
581 }
582 if (!sdio_ch_is_local_open(id)) {
583 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
584 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
585 return -ENODEV;
586 }
587 if (sdio_ch[id].use_wm &&
588 (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
589 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
590 pr_err("%s: watermark exceeded: %d\n", __func__, id);
591 return -EAGAIN;
592 }
593 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 /* if skb do not have any tailroom for padding,
596 copy the skb into a new expanded skb */
597 if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
598 /* revisit, probably dev_alloc_skb and memcpy is effecient */
599 new_skb = skb_copy_expand(skb, skb_headroom(skb),
Eric Holmberg7835f312011-06-09 17:58:56 -0600600 4 - (skb->len & 0x3), GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 if (new_skb == NULL) {
602 pr_err("%s: cannot allocate skb\n", __func__);
Eric Holmberg7835f312011-06-09 17:58:56 -0600603 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 }
605 dev_kfree_skb_any(skb);
606 skb = new_skb;
Eric Holmberg7835f312011-06-09 17:58:56 -0600607 spin_lock_irqsave(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608 DBG_INC_WRITE_CPY(skb->len);
Eric Holmberg7835f312011-06-09 17:58:56 -0600609 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 }
611
612 hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));
613
614 /* caller should allocate for hdr and padding
615 hdr is fine, padding is tricky */
616 hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
617 hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
618 hdr->reserved = 0;
619 hdr->ch_id = id;
620 hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
621 if (skb->len & 0x3)
622 skb_put(skb, 4 - (skb->len & 0x3));
623
624 hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);
625
626 DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
627 __func__, skb->data, skb->tail, skb->len,
628 hdr->pkt_len, hdr->pad_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629
Eric Holmberg7835f312011-06-09 17:58:56 -0600630 spin_lock_irqsave(&sdio_mux_write_lock, flags);
631 __skb_queue_tail(&sdio_mux_write_pool, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 spin_lock(&sdio_ch[id].lock);
633 sdio_ch[id].num_tx_pkts++;
634 spin_unlock(&sdio_ch[id].lock);
Eric Holmberg7835f312011-06-09 17:58:56 -0600635 spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600637 queue_work(sdio_mux_write_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 return rc;
640}
641
642int msm_sdio_dmux_open(uint32_t id, void *priv,
643 void (*receive_cb)(void *, struct sk_buff *),
644 void (*write_done)(void *, struct sk_buff *))
645{
646 unsigned long flags;
647
648 DBG("%s: opening ch %d\n", __func__, id);
649 if (!sdio_mux_initialized)
650 return -ENODEV;
651 if (id >= SDIO_DMUX_NUM_CHANNELS)
652 return -EINVAL;
653
654 spin_lock_irqsave(&sdio_ch[id].lock, flags);
655 if (sdio_ch_is_local_open(id)) {
656 pr_info("%s: Already opened %d\n", __func__, id);
657 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
658 goto open_done;
659 }
660
661 sdio_ch[id].receive_cb = receive_cb;
662 sdio_ch[id].write_done = write_done;
663 sdio_ch[id].priv = priv;
664 sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
665 sdio_ch[id].num_tx_pkts = 0;
666 sdio_ch[id].use_wm = 0;
667 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
668
669 sdio_mux_send_open_cmd(id);
670
671open_done:
672 pr_info("%s: opened ch %d\n", __func__, id);
673 return 0;
674}
675
676int msm_sdio_dmux_close(uint32_t id)
677{
678 struct sdio_mux_hdr hdr;
679 unsigned long flags;
680
681 if (id >= SDIO_DMUX_NUM_CHANNELS)
682 return -EINVAL;
683 DBG("%s: closing ch %d\n", __func__, id);
684 if (!sdio_mux_initialized)
685 return -ENODEV;
686 spin_lock_irqsave(&sdio_ch[id].lock, flags);
687
688 sdio_ch[id].receive_cb = NULL;
689 sdio_ch[id].priv = NULL;
690 sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
691 sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
692 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
693
694 hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
695 hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
696 hdr.reserved = 0;
697 hdr.ch_id = id;
698 hdr.pkt_len = 0;
699 hdr.pad_len = 0;
700
701 sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
702
703 pr_info("%s: closed ch %d\n", __func__, id);
704 return 0;
705}
706
707static void sdio_mux_notify(void *_dev, unsigned event)
708{
709 DBG("%s: event %d notified\n", __func__, event);
710
711 /* write avail may not be enouogh for a packet, but should be fine */
712 if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
713 sdio_write_avail(sdio_mux_ch))
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600714 queue_work(sdio_mux_write_workqueue, &work_sdio_mux_write);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715
716 if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
717 sdio_read_avail(sdio_mux_ch))
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600718 queue_work(sdio_mux_read_workqueue, &work_sdio_mux_read);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719}
720
721int msm_sdio_dmux_is_ch_full(uint32_t id)
722{
723 unsigned long flags;
724 int ret;
725
726 if (id >= SDIO_DMUX_NUM_CHANNELS)
727 return -EINVAL;
728
729 spin_lock_irqsave(&sdio_ch[id].lock, flags);
730 sdio_ch[id].use_wm = 1;
731 ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
732 DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
733 id, sdio_ch[id].num_tx_pkts, ret);
734 if (!sdio_ch_is_local_open(id)) {
735 ret = -ENODEV;
736 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
737 }
738 spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
739
740 return ret;
741}
742
743int msm_sdio_dmux_is_ch_low(uint32_t id)
744{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 int ret;
746
747 if (id >= SDIO_DMUX_NUM_CHANNELS)
748 return -EINVAL;
749
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750 sdio_ch[id].use_wm = 1;
751 ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
752 DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
753 id, sdio_ch[id].num_tx_pkts, ret);
754 if (!sdio_ch_is_local_open(id)) {
755 ret = -ENODEV;
756 pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
757 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758
759 return ret;
760}
761
762#ifdef CONFIG_DEBUG_FS
763
764static int debug_tbl(char *buf, int max)
765{
766 int i = 0;
767 int j;
768
769 for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
770 i += scnprintf(buf + i, max - i,
771 "ch%02d local open=%s remote open=%s\n",
772 j, sdio_ch_is_local_open(j) ? "Y" : "N",
773 sdio_ch_is_remote_open(j) ? "Y" : "N");
774 }
775
776 return i;
777}
778
779#define DEBUG_BUFMAX 4096
780static char debug_buffer[DEBUG_BUFMAX];
781
782static ssize_t debug_read(struct file *file, char __user *buf,
783 size_t count, loff_t *ppos)
784{
785 int (*fill)(char *buf, int max) = file->private_data;
786 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
787 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
788}
789
790static int debug_open(struct inode *inode, struct file *file)
791{
792 file->private_data = inode->i_private;
793 return 0;
794}
795
796
797static const struct file_operations debug_ops = {
798 .read = debug_read,
799 .open = debug_open,
800};
801
802static void debug_create(const char *name, mode_t mode,
803 struct dentry *dent,
804 int (*fill)(char *buf, int max))
805{
806 debugfs_create_file(name, mode, dent, fill, &debug_ops);
807}
808
809#endif
810
811static int sdio_dmux_probe(struct platform_device *pdev)
812{
813 int rc;
814
815 DBG("%s probe called\n", __func__);
816
817 if (!sdio_mux_initialized) {
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600818 sdio_mux_read_workqueue = create_singlethread_workqueue(
819 "sdio_dmux_read");
820 if (!sdio_mux_read_workqueue)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 return -ENOMEM;
822
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600823 sdio_mux_write_workqueue = create_singlethread_workqueue(
824 "sdio_dmux_write");
825 if (!sdio_mux_write_workqueue) {
826 destroy_workqueue(sdio_mux_read_workqueue);
827 return -ENOMEM;
828 }
829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 skb_queue_head_init(&sdio_mux_write_pool);
831 spin_lock_init(&sdio_mux_write_lock);
832
833 for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
834 spin_lock_init(&sdio_ch[rc].lock);
835
836
837 wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
838 "sdio_dmux");
839 }
840
841 rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
842 if (rc < 0) {
843 pr_err("%s: sido open failed %d\n", __func__, rc);
844 wake_lock_destroy(&sdio_mux_ch_wakelock);
Eric Holmberg32edc1d2011-06-01 14:20:08 -0600845 destroy_workqueue(sdio_mux_read_workqueue);
846 destroy_workqueue(sdio_mux_write_workqueue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847 sdio_mux_initialized = 0;
848 return rc;
849 }
850
851 sdio_mux_initialized = 1;
852 return 0;
853}
854
855static int sdio_dmux_remove(struct platform_device *pdev)
856{
857 int i;
858 unsigned long ch_lock_flags;
859 unsigned long write_lock_flags;
860 struct sk_buff *skb;
861
862 DBG("%s remove called\n", __func__);
863 if (!sdio_mux_initialized)
864 return 0;
865
866 /* set reset state for any open channels */
867 for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
868 spin_lock_irqsave(&sdio_ch[i].lock, ch_lock_flags);
869 if (sdio_ch_is_open(i)) {
870 sdio_ch[i].status |= SDIO_CH_IN_RESET;
871 sdio_ch[i].status &= ~SDIO_CH_REMOTE_OPEN;
872
Eric Holmberg3ff8dae2011-07-19 18:50:15 -0600873 /* notify client so it can update its status */
874 if (sdio_ch[i].receive_cb)
875 sdio_ch[i].receive_cb(
876 sdio_ch[i].priv, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877 }
878 spin_unlock_irqrestore(&sdio_ch[i].lock, ch_lock_flags);
879 }
880
Vamsi Krishnac6299de2011-07-21 18:18:53 -0700881 /* cancel any pending writes */
882 spin_lock_irqsave(&sdio_mux_write_lock, write_lock_flags);
883 while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
884 i = ((struct sdio_mux_hdr *)skb->data)->ch_id;
885 if (sdio_ch[i].write_done)
886 sdio_ch[i].write_done(
887 sdio_ch[i].priv, skb);
888 else
889 dev_kfree_skb_any(skb);
890 }
891 spin_unlock_irqrestore(&sdio_mux_write_lock,
892 write_lock_flags);
893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 return 0;
895}
896
897static struct platform_driver sdio_dmux_driver = {
898 .probe = sdio_dmux_probe,
899 .remove = sdio_dmux_remove,
900 .driver = {
901 .name = "SDIO_RMNT",
902 .owner = THIS_MODULE,
903 },
904};
905
906static int __init sdio_dmux_init(void)
907{
908#ifdef CONFIG_DEBUG_FS
909 struct dentry *dent;
910
911 dent = debugfs_create_dir("sdio_dmux", 0);
912 if (!IS_ERR(dent))
913 debug_create("tbl", 0444, dent, debug_tbl);
914#endif
915 return platform_driver_register(&sdio_dmux_driver);
916}
917
918module_init(sdio_dmux_init);
919MODULE_DESCRIPTION("MSM SDIO DMUX");
920MODULE_LICENSE("GPL v2");