blob: f27f358ee62ac12d5e72aed84a219bdc7cfdb35d [file] [log] [blame]
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/uaccess.h>
16#include <linux/diagchar.h>
17#include <linux/sched.h>
18#include <linux/err.h>
19#include <linux/ratelimit.h>
20#include <linux/workqueue.h>
21#include <linux/pm_runtime.h>
22#include <linux/platform_device.h>
23#include <linux/msm_mhi.h>
24#include <linux/delay.h>
25#include <linux/vmalloc.h>
26#include <asm/current.h>
27#include <linux/atomic.h>
28#include "diagmem.h"
29#include "diagfwd_bridge.h"
30#include "diagfwd_mhi.h"
31#include "diag_ipc_logging.h"
32
33#define SET_CH_CTXT(index, type) (((index & 0xFF) << 8) | (type & 0xFF))
34#define GET_INFO_INDEX(val) ((val & 0xFF00) >> 8)
35#define GET_CH_TYPE(val) ((val & 0x00FF))
36
37#define CHANNELS_OPENED 0
38#define OPEN_CHANNELS 1
39#define CHANNELS_CLOSED 0
40#define CLOSE_CHANNELS 1
41
42#define DIAG_MHI_STRING_SZ 11
43
44struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
45 {
46 .id = MHI_1,
47 .dev_id = DIAGFWD_MDM,
48 .name = "MDM",
49 .enabled = 0,
50 .num_read = 0,
51 .mempool = POOL_TYPE_MDM,
52 .mempool_init = 0,
53 .mhi_wq = NULL,
54 .read_ch = {
55 .chan = MHI_CLIENT_DIAG_IN,
56 .type = TYPE_MHI_READ_CH,
57 .hdl = NULL,
58 },
59 .write_ch = {
60 .chan = MHI_CLIENT_DIAG_OUT,
61 .type = TYPE_MHI_WRITE_CH,
62 .hdl = NULL,
63 }
64 },
65 {
66 .id = MHI_DCI_1,
67 .dev_id = DIAGFWD_MDM_DCI,
68 .name = "MDM_DCI",
69 .enabled = 0,
70 .num_read = 0,
71 .mempool = POOL_TYPE_MDM_DCI,
72 .mempool_init = 0,
73 .mhi_wq = NULL,
74 .read_ch = {
75 .chan = MHI_CLIENT_DCI_IN,
76 .type = TYPE_MHI_READ_CH,
77 .hdl = NULL,
78 },
79 .write_ch = {
80 .chan = MHI_CLIENT_DCI_OUT,
81 .type = TYPE_MHI_WRITE_CH,
82 .hdl = NULL,
83 }
84 }
85};
86
87static int mhi_ch_open(struct diag_mhi_ch_t *ch)
88{
89 int err = 0;
90
91 if (!ch)
92 return -EINVAL;
93
94 if (atomic_read(&ch->opened)) {
95 pr_debug("diag: In %s, channel is already opened, id: %d\n",
96 __func__, ch->type);
97 return 0;
98 }
99 err = mhi_open_channel(ch->hdl);
100 if (err) {
101 pr_err("diag: In %s, unable to open ch, type: %d, err: %d\n",
102 __func__, ch->type, err);
103 return err;
104 }
105
106 atomic_set(&ch->opened, 1);
107 INIT_LIST_HEAD(&ch->buf_tbl);
108 return 0;
109}
110
111static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
112 void *buf, int len)
113{
114 unsigned long flags;
115 struct diag_mhi_buf_tbl_t *item;
116 struct diag_mhi_ch_t *ch = NULL;
117
118 if (!mhi_info || !buf || len < 0)
119 return -EINVAL;
120
121 switch (type) {
122 case TYPE_MHI_READ_CH:
123 ch = &mhi_info->read_ch;
124 break;
125 case TYPE_MHI_WRITE_CH:
126 ch = &mhi_info->write_ch;
127 break;
128 default:
129 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
130 __func__, type);
131 return -EINVAL;
132 }
133
134 item = kzalloc(sizeof(struct diag_mhi_buf_tbl_t), GFP_KERNEL);
135 if (!item)
136 return -ENOMEM;
137 kmemleak_not_leak(item);
138
139 spin_lock_irqsave(&ch->lock, flags);
140 item->buf = buf;
141 item->len = len;
142 list_add_tail(&item->link, &ch->buf_tbl);
143 spin_unlock_irqrestore(&ch->lock, flags);
144
145 return 0;
146}
147
148static void mhi_buf_tbl_remove(struct diag_mhi_info *mhi_info, int type,
149 void *buf, int len)
150{
151 int found = 0;
152 unsigned long flags;
153 struct list_head *start, *temp;
154 struct diag_mhi_buf_tbl_t *item = NULL;
155 struct diag_mhi_ch_t *ch = NULL;
156
157 if (!mhi_info || !buf || len < 0)
158 return;
159
160 switch (type) {
161 case TYPE_MHI_READ_CH:
162 ch = &mhi_info->read_ch;
163 break;
164 case TYPE_MHI_WRITE_CH:
165 ch = &mhi_info->write_ch;
166 break;
167 default:
168 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
169 __func__, type);
170 return;
171 }
172
173 spin_lock_irqsave(&ch->lock, flags);
174 list_for_each_safe(start, temp, &ch->buf_tbl) {
175 item = list_entry(start, struct diag_mhi_buf_tbl_t, link);
176 if (item->buf != buf)
177 continue;
178 list_del(&item->link);
179 if (type == TYPE_MHI_READ_CH)
180 diagmem_free(driver, item->buf, mhi_info->mempool);
181 kfree(item);
182 found = 1;
183 }
184 spin_unlock_irqrestore(&ch->lock, flags);
185
186 if (!found) {
187 pr_err_ratelimited("diag: In %s, unable to find buffer, ch: %pK, type: %d, buf: %pK\n",
188 __func__, ch, ch->type, buf);
189 }
190}
191
192static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
193{
194 unsigned long flags;
195 struct list_head *start, *temp;
196 struct diag_mhi_buf_tbl_t *item = NULL;
197 struct diag_mhi_ch_t *ch = NULL;
198
199 if (!mhi_info || !mhi_info->enabled)
200 return;
201
202 /* Clear all the pending reads */
203 ch = &mhi_info->read_ch;
204 /* At this point, the channel should already by closed */
205 if (!(atomic_read(&ch->opened))) {
206 spin_lock_irqsave(&ch->lock, flags);
207 list_for_each_safe(start, temp, &ch->buf_tbl) {
208 item = list_entry(start, struct diag_mhi_buf_tbl_t,
209 link);
210 list_del(&item->link);
211 diagmem_free(driver, item->buf, mhi_info->mempool);
212 kfree(item);
213
214 }
215 spin_unlock_irqrestore(&ch->lock, flags);
216 }
217
218 /* Clear all the pending writes */
219 ch = &mhi_info->write_ch;
220 /* At this point, the channel should already by closed */
221 if (!(atomic_read(&ch->opened))) {
222 spin_lock_irqsave(&ch->lock, flags);
223 list_for_each_safe(start, temp, &ch->buf_tbl) {
224 item = list_entry(start, struct diag_mhi_buf_tbl_t,
225 link);
226 list_del(&item->link);
227 diag_remote_dev_write_done(mhi_info->dev_id, item->buf,
228 item->len, mhi_info->id);
229 kfree(item);
230
231 }
232 spin_unlock_irqrestore(&ch->lock, flags);
233 }
234}
235
236static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag)
237{
238 if (!mhi_info)
239 return -EIO;
240
241 if (!mhi_info->enabled)
242 return -ENODEV;
243
244 if (close_flag == CLOSE_CHANNELS) {
245 atomic_set(&(mhi_info->read_ch.opened), 0);
246 atomic_set(&(mhi_info->write_ch.opened), 0);
247 }
248
249 if (!(atomic_read(&(mhi_info->read_ch.opened)))) {
250 flush_workqueue(mhi_info->mhi_wq);
251 mhi_close_channel(mhi_info->read_ch.hdl);
252 }
253
254 if (!(atomic_read(&(mhi_info->write_ch.opened)))) {
255 flush_workqueue(mhi_info->mhi_wq);
256 mhi_close_channel(mhi_info->write_ch.hdl);
257 }
258
259 mhi_buf_tbl_clear(mhi_info);
260 diag_remote_dev_close(mhi_info->dev_id);
261 return 0;
262}
263
264static int mhi_close(int id)
265{
266 if (id < 0 || id >= NUM_MHI_DEV) {
267 pr_err("diag: In %s, invalid index %d\n", __func__, id);
268 return -EINVAL;
269 }
270
271 if (!diag_mhi[id].enabled)
272 return -ENODEV;
273 /*
274 * This function is called whenever the channel needs to be closed
275 * explicitly by Diag. Close both the read and write channels (denoted
276 * by CLOSE_CHANNELS flag)
277 */
278 return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
279}
280
281static void mhi_close_work_fn(struct work_struct *work)
282{
283 struct diag_mhi_info *mhi_info = container_of(work,
284 struct diag_mhi_info,
285 close_work);
286 /*
287 * This is a part of work function which is queued after the channels
288 * are explicitly closed. Do not close channels again (denoted by
289 * CHANNELS_CLOSED flag)
290 */
291 if (mhi_info)
292 __mhi_close(mhi_info, CHANNELS_CLOSED);
293}
294
295static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
296{
297 int err = 0;
298 unsigned long flags;
299
300 if (!mhi_info)
301 return -EIO;
302
303 if (open_flag == OPEN_CHANNELS) {
304 if (!atomic_read(&mhi_info->read_ch.opened)) {
305 err = mhi_ch_open(&mhi_info->read_ch);
306 if (err)
307 goto fail;
308 DIAG_LOG(DIAG_DEBUG_BRIDGE,
309 "opened mhi read channel, port: %d\n",
310 mhi_info->id);
311 }
312 if (!atomic_read(&mhi_info->write_ch.opened)) {
313 err = mhi_ch_open(&mhi_info->write_ch);
314 if (err)
315 goto fail;
316 DIAG_LOG(DIAG_DEBUG_BRIDGE,
317 "opened mhi write channel, port: %d\n",
318 mhi_info->id);
319 }
320 } else if (open_flag == CHANNELS_OPENED) {
321 if (!atomic_read(&(mhi_info->read_ch.opened)) ||
322 !atomic_read(&(mhi_info->write_ch.opened))) {
323 return -ENODEV;
324 }
325 }
326
327 spin_lock_irqsave(&mhi_info->lock, flags);
328 mhi_info->enabled = 1;
329 spin_unlock_irqrestore(&mhi_info->lock, flags);
330 diag_remote_dev_open(mhi_info->dev_id);
331 queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
332 return 0;
333
334fail:
335 pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
336 mhi_close(mhi_info->id);
337 return err;
338}
339
340static int mhi_open(int id)
341{
342 if (id < 0 || id >= NUM_MHI_DEV) {
343 pr_err("diag: In %s, invalid index %d\n", __func__, id);
344 return -EINVAL;
345 }
346
347 if (!diag_mhi[id].enabled)
348 return -ENODEV;
349 /*
350 * This function is called whenever the channel needs to be opened
351 * explicitly by Diag. Open both the read and write channels (denoted by
352 * OPEN_CHANNELS flag)
353 */
354 __mhi_open(&diag_mhi[id], OPEN_CHANNELS);
355 diag_remote_dev_open(diag_mhi[id].dev_id);
356 queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
357
358 return 0;
359}
360
361static void mhi_open_work_fn(struct work_struct *work)
362{
363 struct diag_mhi_info *mhi_info = container_of(work,
364 struct diag_mhi_info,
365 open_work);
366 /*
367 * This is a part of work function which is queued after the channels
368 * are explicitly opened. Do not open channels again (denoted by
369 * CHANNELS_OPENED flag)
370 */
371 if (mhi_info) {
372 diag_remote_dev_open(mhi_info->dev_id);
373 queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
374 }
375}
376
377static void mhi_read_done_work_fn(struct work_struct *work)
378{
379 unsigned char *buf = NULL;
380 struct mhi_result result;
381 int err = 0;
382 struct diag_mhi_info *mhi_info = container_of(work,
383 struct diag_mhi_info,
384 read_done_work);
385 if (!mhi_info)
386 return;
387
388 do {
389 if (!(atomic_read(&(mhi_info->read_ch.opened))))
390 break;
391 err = mhi_poll_inbound(mhi_info->read_ch.hdl, &result);
392 if (err) {
393 pr_debug("diag: In %s, err %d\n", __func__, err);
394 break;
395 }
396 buf = result.buf_addr;
397 if (!buf)
398 break;
399 DIAG_LOG(DIAG_DEBUG_BRIDGE,
400 "read from mhi port %d buf %pK\n",
401 mhi_info->id, buf);
402 /*
403 * The read buffers can come after the MHI channels are closed.
404 * If the channels are closed at the time of read, discard the
405 * buffers here and do not forward them to the mux layer.
406 */
407 if ((atomic_read(&(mhi_info->read_ch.opened)))) {
408 err = diag_remote_dev_read_done(mhi_info->dev_id, buf,
409 result.bytes_xferd);
410 if (err)
411 mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH,
412 buf, result.bytes_xferd);
413 } else {
414 mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
415 result.bytes_xferd);
416 }
417 } while (buf);
418}
419
420static void mhi_read_work_fn(struct work_struct *work)
421{
422 int err = 0;
423 unsigned char *buf = NULL;
424 enum MHI_FLAGS mhi_flags = MHI_EOT;
425 struct diag_mhi_ch_t *read_ch = NULL;
426 unsigned long flags;
427 struct diag_mhi_info *mhi_info = container_of(work,
428 struct diag_mhi_info,
429 read_work);
430 if (!mhi_info)
431 return;
432
433 read_ch = &mhi_info->read_ch;
434 do {
435 if (!(atomic_read(&(read_ch->opened))))
436 break;
437
438 buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
439 mhi_info->mempool);
440 if (!buf)
441 break;
442
443 err = mhi_buf_tbl_add(mhi_info, TYPE_MHI_READ_CH, buf,
444 DIAG_MDM_BUF_SIZE);
445 if (err)
446 goto fail;
447
448 DIAG_LOG(DIAG_DEBUG_BRIDGE,
449 "queueing a read buf %pK, ch: %s\n",
450 buf, mhi_info->name);
451 spin_lock_irqsave(&read_ch->lock, flags);
452 err = mhi_queue_xfer(read_ch->hdl, buf, DIAG_MDM_BUF_SIZE,
453 mhi_flags);
454 spin_unlock_irqrestore(&read_ch->lock, flags);
455 if (err) {
456 pr_err_ratelimited("diag: Unable to read from MHI channel %s, err: %d\n",
457 mhi_info->name, err);
458 goto fail;
459 }
460 } while (buf);
461
462 return;
463fail:
464 mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf, DIAG_MDM_BUF_SIZE);
465 queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
466}
467
468static int mhi_queue_read(int id)
469{
470 if (id < 0 || id >= NUM_MHI_DEV) {
471 pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
472 id);
473 return -EINVAL;
474 }
475 queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
476 return 0;
477}
478
479static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
480{
481 int err = 0;
482 enum MHI_FLAGS mhi_flags = MHI_EOT;
483 unsigned long flags;
484 struct diag_mhi_ch_t *ch = NULL;
485
486 if (id < 0 || id >= NUM_MHI_DEV) {
487 pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
488 id);
489 return -EINVAL;
490 }
491
492 if (!buf || len <= 0) {
493 pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
494 __func__, id, buf, len);
495 return -EINVAL;
496 }
497
498 if (!diag_mhi[id].enabled) {
499 pr_err_ratelimited("diag: In %s, MHI channel %s is not enabled\n",
500 __func__, diag_mhi[id].name);
501 return -EIO;
502 }
503
504 ch = &diag_mhi[id].write_ch;
505 if (!(atomic_read(&(ch->opened)))) {
506 pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
507 __func__, diag_mhi[id].name);
508 return -EIO;
509 }
510
511 err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf,
512 len);
513 if (err)
514 goto fail;
515
516 spin_lock_irqsave(&ch->lock, flags);
517 err = mhi_queue_xfer(ch->hdl, buf, len, mhi_flags);
518 spin_unlock_irqrestore(&ch->lock, flags);
519 if (err) {
520 pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
521 __func__, diag_mhi[id].name, len, err);
522 mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
523 goto fail;
524 }
525
526 return 0;
527fail:
528 return err;
529}
530
531static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
532{
533 if (id < 0 || id >= NUM_MHI_DEV) {
534 pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
535 id);
536 return -EINVAL;
537 }
538
539 if (!buf)
540 return -EINVAL;
541
542 mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_READ_CH, buf, len);
543 queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
544 return 0;
545}
546
547static void mhi_notifier(struct mhi_cb_info *cb_info)
548{
549 int index;
550 int type;
551 int err = 0;
552 struct mhi_result *result = NULL;
553 struct diag_mhi_ch_t *ch = NULL;
554 void *buf = NULL;
555
556 if (!cb_info)
557 return;
558
559 result = cb_info->result;
560 if (!result) {
561 pr_err_ratelimited("diag: failed to obtain mhi result from callback\n");
562 return;
563 }
564
565 index = GET_INFO_INDEX((uintptr_t)cb_info->result->user_data);
566 if (index < 0 || index >= NUM_MHI_DEV) {
567 pr_err_ratelimited("diag: In %s, invalid MHI index %d\n",
568 __func__, index);
569 return;
570 }
571
572 type = GET_CH_TYPE((uintptr_t)cb_info->result->user_data);
573 switch (type) {
574 case TYPE_MHI_READ_CH:
575 ch = &diag_mhi[index].read_ch;
576 break;
577 case TYPE_MHI_WRITE_CH:
578 ch = &diag_mhi[index].write_ch;
579 break;
580 default:
581 pr_err_ratelimited("diag: In %s, invalid channel type %d\n",
582 __func__, type);
583 return;
584 }
585
586 switch (cb_info->cb_reason) {
587 case MHI_CB_MHI_ENABLED:
588 DIAG_LOG(DIAG_DEBUG_BRIDGE,
589 "received mhi enabled notifiation port: %d ch: %d\n",
590 index, ch->type);
591 err = mhi_ch_open(ch);
592 if (err)
593 break;
594 if (ch->type == TYPE_MHI_READ_CH) {
595 diag_mhi[index].num_read = mhi_get_free_desc(ch->hdl);
596 if (diag_mhi[index].num_read <= 0) {
597 pr_err("diag: In %s, invalid number of descriptors %d\n",
598 __func__, diag_mhi[index].num_read);
599 break;
600 }
601 }
602 __mhi_open(&diag_mhi[index], CHANNELS_OPENED);
603 queue_work(diag_mhi[index].mhi_wq,
604 &(diag_mhi[index].open_work));
605 break;
606 case MHI_CB_MHI_DISABLED:
607 DIAG_LOG(DIAG_DEBUG_BRIDGE,
608 "received mhi disabled notifiation port: %d ch: %d\n",
609 index, ch->type);
610 atomic_set(&(ch->opened), 0);
611 __mhi_close(&diag_mhi[index], CHANNELS_CLOSED);
612 break;
613 case MHI_CB_XFER:
614 /*
615 * If the channel is a read channel, this is a read
616 * complete notification - write complete if the channel is
617 * a write channel.
618 */
619 if (type == TYPE_MHI_READ_CH) {
620 if (!atomic_read(&(diag_mhi[index].read_ch.opened)))
621 break;
622
623 queue_work(diag_mhi[index].mhi_wq,
624 &(diag_mhi[index].read_done_work));
625 break;
626 }
627 buf = result->buf_addr;
628 if (!buf) {
629 pr_err_ratelimited("diag: In %s, unable to de-serialize the data\n",
630 __func__);
631 break;
632 }
633 mhi_buf_tbl_remove(&diag_mhi[index], TYPE_MHI_WRITE_CH, buf,
634 result->bytes_xferd);
635 diag_remote_dev_write_done(diag_mhi[index].dev_id, buf,
636 result->bytes_xferd,
637 diag_mhi[index].id);
638 break;
639 default:
640 pr_err("diag: In %s, invalid cb reason 0x%x\n", __func__,
641 cb_info->cb_reason);
642 break;
643 }
644}
645
646static struct diag_remote_dev_ops diag_mhi_fwd_ops = {
647 .open = mhi_open,
648 .close = mhi_close,
649 .queue_read = mhi_queue_read,
650 .write = mhi_write,
651 .fwd_complete = mhi_fwd_complete,
652};
653
654static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
655{
656 int ctxt = 0;
657
658 if (!ch)
659 return -EIO;
660 if (id < 0 || id >= NUM_MHI_DEV)
661 return -EINVAL;
662 spin_lock_init(&ch->lock);
663 atomic_set(&(ch->opened), 0);
664 ctxt = SET_CH_CTXT(id, ch->type);
665 ch->client_info.mhi_client_cb = mhi_notifier;
666 return mhi_register_channel(&ch->hdl, ch->chan, 0, &ch->client_info,
667 (void *)(uintptr_t)ctxt);
668}
669
670int diag_mhi_init(void)
671{
672 int i;
673 int err = 0;
674 struct diag_mhi_info *mhi_info = NULL;
675 char wq_name[DIAG_MHI_NAME_SZ + DIAG_MHI_STRING_SZ];
676
677 for (i = 0; i < NUM_MHI_DEV; i++) {
678 mhi_info = &diag_mhi[i];
679 spin_lock_init(&mhi_info->lock);
680 INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
681 INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
682 INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
683 INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
684 strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
685 strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
686 diagmem_init(driver, mhi_info->mempool);
687 mhi_info->mempool_init = 1;
688 mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
689 if (!mhi_info->mhi_wq)
690 goto fail;
691 err = diagfwd_bridge_register(mhi_info->dev_id, mhi_info->id,
692 &diag_mhi_fwd_ops);
693 if (err) {
694 pr_err("diag: Unable to register MHI channel %d with bridge, err: %d\n",
695 i, err);
696 goto fail;
697 }
698 err = diag_mhi_register_ch(mhi_info->id, &mhi_info->read_ch);
699 if (err) {
700 pr_err("diag: Unable to register MHI read channel for %d, err: %d\n",
701 i, err);
702 goto fail;
703 }
704 err = diag_mhi_register_ch(mhi_info->id, &mhi_info->write_ch);
705 if (err) {
706 pr_err("diag: Unable to register MHI write channel for %d, err: %d\n",
707 i, err);
708 goto fail;
709 }
710 DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
711 }
712
713 return 0;
714fail:
715 diag_mhi_exit();
716 return -ENOMEM;
717}
718
719void diag_mhi_exit(void)
720{
721 int i;
722 struct diag_mhi_info *mhi_info = NULL;
723
724 for (i = 0; i < NUM_MHI_DEV; i++) {
725 mhi_info = &diag_mhi[i];
726 if (mhi_info->mhi_wq)
727 destroy_workqueue(mhi_info->mhi_wq);
728 mhi_close(mhi_info->id);
729 if (mhi_info->mempool_init)
730 diagmem_exit(driver, mhi_info->mempool);
731 }
732}
733