blob: bf048c1e0b27bbe0e49556e904cd0141429a24f8 [file] [log] [blame]
Manoj Prabhu B10ced022019-01-11 11:00:43 +05301/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/kernel.h>
18#include <linux/err.h>
19#include <linux/sched.h>
20#include <linux/ratelimit.h>
21#include <linux/workqueue.h>
22#include <linux/diagchar.h>
23#include <linux/delay.h>
24#include <linux/kmemleak.h>
25#include <linux/list.h>
26#include "diag_pcie.h"
27#include "diag_mux.h"
28#include "diagmem.h"
29#include "diag_ipc_logging.h"
30#define DIAG_LEGACY "DIAG_PCIE"
31
32struct diag_pcie_info diag_pcie[NUM_DIAG_PCIE_DEV] = {
33 {
34 .id = DIAG_PCIE_LOCAL,
35 .name = DIAG_LEGACY,
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -080036 .enabled = {0},
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -070037 .mempool = POOL_TYPE_MUX_APPS,
38 .ops = NULL,
39 .wq = NULL,
40 .read_cnt = 0,
41 .write_cnt = 0,
42 .in_chan_attr = {
43 .max_pkt_size = DIAG_MAX_PKT_SZ,
44 .nr_trbs = 1,
45 .read_buffer = NULL,
46 },
47 .out_chan_attr = {
48 .max_pkt_size = DIAG_MAX_PCIE_PKT_SZ,
49 },
50 .in_chan = MHI_CLIENT_DIAG_OUT,
51 .out_chan = MHI_CLIENT_DIAG_IN,
52 }
53};
54
55static void diag_pcie_event_notifier(struct mhi_dev_client_cb_reason *reason)
56{
57 int i;
58 struct diag_pcie_info *pcie_info = NULL;
59
60 for (i = 0; i < NUM_DIAG_PCIE_DEV; i++) {
61 pcie_info = &diag_pcie[i];
62 if (reason->reason == MHI_DEV_TRE_AVAILABLE)
63 if (reason->ch_id == pcie_info->in_chan) {
64 queue_work(pcie_info->wq,
65 &pcie_info->read_work);
66 break;
67 }
68 }
69}
70
71void diag_pcie_read_work_fn(struct work_struct *work)
72{
73 struct mhi_req ureq;
74 struct diag_pcie_info *pcie_info = container_of(work,
75 struct diag_pcie_info,
76 read_work);
77 unsigned int bytes_avail = 0;
78
79 if (!pcie_info || !atomic_read(&pcie_info->enabled) ||
80 !atomic_read(&pcie_info->diag_state))
81 return;
82
83 ureq.chan = pcie_info->in_chan;
84 ureq.client = pcie_info->in_handle;
85 ureq.mode = IPA_DMA_SYNC;
86 ureq.buf = pcie_info->in_chan_attr.read_buffer;
87 ureq.len = pcie_info->in_chan_attr.read_buffer_size;
Manoj Prabhu B10ced022019-01-11 11:00:43 +053088 ureq.transfer_len = 0;
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -070089 bytes_avail = mhi_dev_read_channel(&ureq);
90 if (bytes_avail < 0)
91 return;
92 DIAG_LOG(DIAG_DEBUG_MUX, "read total bytes %d from chan:%d",
93 bytes_avail, pcie_info->in_chan);
94 pcie_info->read_cnt++;
95
96 if (pcie_info->ops && pcie_info->ops->read_done)
97 pcie_info->ops->read_done(pcie_info->in_chan_attr.read_buffer,
98 ureq.transfer_len, pcie_info->ctxt);
99
100}
101
102static void diag_pcie_buf_tbl_remove(struct diag_pcie_info *pcie_info,
103 unsigned char *buf)
104{
105 struct diag_pcie_buf_tbl_t *temp = NULL;
106 struct diag_pcie_buf_tbl_t *entry = NULL;
107
108 list_for_each_entry_safe(entry, temp, &pcie_info->buf_tbl, track) {
109 if (entry->buf == buf) {
110 DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
111 atomic_dec(&entry->ref_count);
112 /*
113 * Remove reference from the table if it is the
114 * only instance of the buffer
115 */
116 if (atomic_read(&entry->ref_count) == 0) {
117 list_del(&entry->track);
118 kfree(entry);
119 entry = NULL;
120 }
121 break;
122 }
123 }
124}
125
126static struct diag_pcie_buf_tbl_t *diag_pcie_buf_tbl_get(
127 struct diag_pcie_info *pcie_info,
128 unsigned char *buf)
129{
130 struct diag_pcie_buf_tbl_t *temp = NULL;
131 struct diag_pcie_buf_tbl_t *entry = NULL;
132
133 list_for_each_entry_safe(entry, temp, &pcie_info->buf_tbl, track) {
134 if (entry->buf == buf) {
135 DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
136 atomic_dec(&entry->ref_count);
137 return entry;
138 }
139 }
140
141 return NULL;
142}
143
144void diag_pcie_write_complete_cb(void *req)
145{
146 struct diag_pcie_context *ctxt = NULL;
147 struct diag_pcie_info *ch;
148 struct diag_pcie_buf_tbl_t *entry = NULL;
149 struct mhi_req *ureq = req;
150 unsigned long flags;
151
152 if (!ureq)
153 return;
154 ctxt = (struct diag_pcie_context *)ureq->context;
155 if (!ctxt)
156 return;
157 ch = ctxt->ch;
158 if (!ch)
159 return;
160 spin_lock_irqsave(&ch->write_lock, flags);
161 ch->write_cnt++;
162 entry = diag_pcie_buf_tbl_get(ch, ctxt->buf);
163 if (!entry) {
164 pr_err_ratelimited("diag: In %s, unable to find entry %pK in the table\n",
165 __func__, ctxt->buf);
166 spin_unlock_irqrestore(&ch->write_lock, flags);
167 return;
168 }
169 if (atomic_read(&entry->ref_count) != 0) {
170 DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
171 atomic_read(&entry->ref_count));
172 diag_ws_on_copy_complete(DIAG_WS_MUX);
173 spin_unlock_irqrestore(&ch->write_lock, flags);
174 diagmem_free(driver, req, ch->mempool);
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800175 kfree(ctxt);
176 ctxt = NULL;
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700177 return;
178 }
179 DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %pK\n",
180 ctxt->buf);
181 list_del(&entry->track);
182 kfree(entry);
183 entry = NULL;
184 if (ch->ops && ch->ops->write_done)
185 ch->ops->write_done(ureq->buf, ureq->len,
186 ctxt->buf_ctxt, DIAG_PCIE_MODE);
187 spin_unlock_irqrestore(&ch->write_lock, flags);
188 diagmem_free(driver, req, ch->mempool);
189 kfree(ctxt);
190 ctxt = NULL;
191}
192
193static int diag_pcie_buf_tbl_add(struct diag_pcie_info *pcie_info,
194 unsigned char *buf, uint32_t len, int ctxt)
195{
196 struct diag_pcie_buf_tbl_t *temp = NULL;
197 struct diag_pcie_buf_tbl_t *entry = NULL;
198
199 list_for_each_entry_safe(entry, temp, &pcie_info->buf_tbl, track) {
200 if (entry->buf == buf) {
201 atomic_inc(&entry->ref_count);
202 return 0;
203 }
204 }
205
206 /* New buffer, not found in the list */
207 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
208 if (!entry)
209 return -ENOMEM;
210
211 entry->buf = buf;
212 entry->ctxt = ctxt;
213 entry->len = len;
214 atomic_set(&entry->ref_count, 1);
215 INIT_LIST_HEAD(&entry->track);
216 list_add_tail(&entry->track, &pcie_info->buf_tbl);
217
218 return 0;
219}
220
221static int diag_pcie_write_ext(struct diag_pcie_info *pcie_info,
222 unsigned char *buf, int len, int ctxt)
223{
224 int write_len = 0;
225 int bytes_remaining = len;
226 int offset = 0;
227 struct mhi_req *req;
228 struct diag_pcie_context *context;
229 int bytes_to_write;
230 unsigned long flags;
231
232 if (!pcie_info || !buf || len <= 0) {
233 pr_err_ratelimited("diag: In %s, pcie_info: %pK buf: %pK, len: %d\n",
234 __func__, pcie_info, buf, len);
235 return -EINVAL;
236 }
237
238 while (bytes_remaining > 0) {
239 req = diagmem_alloc(driver, sizeof(struct mhi_req),
240 pcie_info->mempool);
241 if (!req) {
242 pr_err_ratelimited("diag: In %s, cannot retrieve pcie write ptrs for pcie channel %s\n",
243 __func__, pcie_info->name);
244 return -ENOMEM;
245 }
246
247 write_len = (bytes_remaining >
248 pcie_info->out_chan_attr.max_pkt_size) ?
249 pcie_info->out_chan_attr.max_pkt_size :
250 bytes_remaining;
251 req->client = pcie_info->out_handle;
252 context = kzalloc(sizeof(*context), GFP_KERNEL);
253 if (!context)
254 return -ENOMEM;
255
256 context->ch = pcie_info;
257 context->buf_ctxt = ctxt;
258 context->buf = buf;
259 req->context = context;
260 req->buf = buf + offset;
261 req->len = write_len;
262 req->chan = pcie_info->out_chan;
263 req->mode = IPA_DMA_ASYNC;
264 req->client_cb = diag_pcie_write_complete_cb;
265 req->snd_cmpl = 1;
266 if (!pcie_info->out_handle ||
267 !atomic_read(&pcie_info->enabled) ||
268 !atomic_read(&pcie_info->diag_state)) {
269 pr_debug_ratelimited("diag: pcie ch %s is not opened\n",
270 pcie_info->name);
271 kfree(req->context);
272 diagmem_free(driver, req, pcie_info->mempool);
273 return -ENODEV;
274 }
275 spin_lock_irqsave(&pcie_info->write_lock, flags);
276 if (diag_pcie_buf_tbl_add(pcie_info, buf, len, ctxt)) {
277 kfree(req->context);
278 diagmem_free(driver, req, pcie_info->mempool);
279 spin_unlock_irqrestore(&pcie_info->write_lock, flags);
280 return -ENOMEM;
281 }
282 spin_unlock_irqrestore(&pcie_info->write_lock, flags);
283 diag_ws_on_read(DIAG_WS_MUX, len);
284 bytes_to_write = mhi_dev_write_channel(req);
285 diag_ws_on_copy(DIAG_WS_MUX);
286 if (bytes_to_write != write_len) {
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800287 pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d, write_len: %d\n",
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700288 __func__, pcie_info->name,
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800289 bytes_to_write, write_len);
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700290 DIAG_LOG(DIAG_DEBUG_MUX,
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800291 "ERR! unable to write to pcie, err: %d, write_len: %d\n",
292 bytes_to_write, write_len);
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700293 diag_ws_on_copy_fail(DIAG_WS_MUX);
294 spin_lock_irqsave(&pcie_info->write_lock, flags);
295 diag_pcie_buf_tbl_remove(pcie_info, buf);
296 kfree(req->context);
297 diagmem_free(driver, req, pcie_info->mempool);
298 spin_unlock_irqrestore(&pcie_info->write_lock, flags);
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800299 return -EINVAL;
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700300 }
301 offset += write_len;
302 bytes_remaining -= write_len;
303 DIAG_LOG(DIAG_DEBUG_MUX,
304 "bytes_remaining: %d write_len: %d, len: %d\n",
305 bytes_remaining, write_len, len);
306 }
307 DIAG_LOG(DIAG_DEBUG_MUX, "done writing!");
308
309 return 0;
310}
311
312int diag_pcie_write(int id, unsigned char *buf, int len, int ctxt)
313{
314 struct mhi_req *req;
315 struct diag_pcie_context *context;
316 int bytes_to_write;
317 struct diag_pcie_info *pcie_info;
318 unsigned long flags;
319
320 pcie_info = &diag_pcie[id];
321
322 if (len > pcie_info->out_chan_attr.max_pkt_size) {
Manoj Prabhu B10ced022019-01-11 11:00:43 +0530323 DIAG_LOG(DIAG_DEBUG_MUX, "len: %d, max_size: %zu\n",
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700324 len, pcie_info->out_chan_attr.max_pkt_size);
325 return diag_pcie_write_ext(pcie_info, buf, len, ctxt);
326 }
327 req = (struct mhi_req *)diagmem_alloc(driver, sizeof(struct mhi_req),
328 pcie_info->mempool);
329 if (!req) {
330 pr_err_ratelimited("diag: In %s, cannot retrieve pcie write ptrs for pcie channel %s\n",
331 __func__, pcie_info->name);
332 return -ENOMEM;
333 }
334 req->client = pcie_info->out_handle;
335 context = kzalloc(sizeof(struct diag_pcie_context), GFP_KERNEL);
336 if (!context)
337 return -ENOMEM;
338
339 context->ch = &diag_pcie[id];
340 context->buf_ctxt = ctxt;
341 context->buf = buf;
342 req->context = context;
343 req->buf = buf;
344 req->len = len;
345 req->chan = pcie_info->out_chan;
346 req->mode = IPA_DMA_ASYNC;
347 req->client_cb = diag_pcie_write_complete_cb;
348 req->snd_cmpl = 1;
349 if (!pcie_info->out_handle || !atomic_read(&pcie_info->enabled) ||
350 !atomic_read(&pcie_info->diag_state)) {
351 pr_debug_ratelimited("diag: pcie ch %s is not opened\n",
352 pcie_info->name);
353 kfree(req->context);
354 diagmem_free(driver, req, pcie_info->mempool);
355 return -ENODEV;
356 }
357 spin_lock_irqsave(&pcie_info->write_lock, flags);
358 if (diag_pcie_buf_tbl_add(pcie_info, buf, len, ctxt)) {
359 DIAG_LOG(DIAG_DEBUG_MUX,
360 "ERR! unable to add buf %pK to table\n", buf);
361 kfree(req->context);
362 diagmem_free(driver, req, pcie_info->mempool);
363 spin_unlock_irqrestore(&pcie_info->write_lock, flags);
364 return -ENOMEM;
365 }
366 spin_unlock_irqrestore(&pcie_info->write_lock, flags);
367 diag_ws_on_read(DIAG_WS_MUX, len);
368 bytes_to_write = mhi_dev_write_channel(req);
369 diag_ws_on_copy(DIAG_WS_MUX);
370 if (bytes_to_write != len) {
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800371 pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d len: %d\n",
372 __func__, pcie_info->name, bytes_to_write, len);
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700373 diag_ws_on_copy_fail(DIAG_WS_MUX);
374 DIAG_LOG(DIAG_DEBUG_MUX,
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800375 "ERR! unable to write to pcie, err: %d len: %d\n",
376 bytes_to_write, len);
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700377 spin_lock_irqsave(&pcie_info->write_lock, flags);
378 diag_pcie_buf_tbl_remove(pcie_info, buf);
379 spin_unlock_irqrestore(&pcie_info->write_lock, flags);
380 kfree(req->context);
381 diagmem_free(driver, req, pcie_info->mempool);
Mohit Aggarwalb5f9b8f2018-12-02 17:39:24 -0800382 return -EINVAL;
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700383 }
384 DIAG_LOG(DIAG_DEBUG_MUX, "wrote packet to pcie chan:%d, len:%d",
385 pcie_info->out_chan, len);
386
387 return 0;
388}
389
390static int pcie_init_read_chan(struct diag_pcie_info *ptr,
391 enum mhi_client_channel chan)
392{
393 int rc = 0;
394 size_t buf_size;
395 void *data_loc;
396
397 if (ptr == NULL) {
398 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Bad Input data, quitting\n");
399 return -EINVAL;
400 }
401
402 buf_size = ptr->in_chan_attr.max_pkt_size;
403 data_loc = kzalloc(buf_size, GFP_KERNEL);
404 if (!data_loc)
405 return -ENOMEM;
406
407 kmemleak_not_leak(data_loc);
408 ptr->in_chan_attr.read_buffer = data_loc;
409 ptr->in_chan_attr.read_buffer_size = buf_size;
410
411 return rc;
412
413}
414
415void diag_pcie_client_cb(struct mhi_dev_client_cb_data *cb_data)
416{
417 struct diag_pcie_info *pcie_info = NULL;
418
419 if (!cb_data)
420 return;
421
422 pcie_info = cb_data->user_data;
423 if (!pcie_info)
424 return;
425
426 switch (cb_data->ctrl_info) {
427 case MHI_STATE_CONNECTED:
428 if (cb_data->channel == pcie_info->out_chan) {
429 DIAG_LOG(DIAG_DEBUG_MUX,
430 " Received connect event from MHI for %d",
431 pcie_info->out_chan);
432 if (atomic_read(&pcie_info->enabled))
433 return;
434 queue_work(pcie_info->wq, &pcie_info->open_work);
435 }
436 break;
437 case MHI_STATE_DISCONNECTED:
438 if (cb_data->channel == pcie_info->out_chan) {
439 DIAG_LOG(DIAG_DEBUG_MUX,
440 " Received disconnect event from MHI for %d",
441 pcie_info->out_chan);
442 if (!atomic_read(&pcie_info->enabled))
443 return;
444 queue_work(pcie_info->wq, &pcie_info->close_work);
445 }
446 break;
447 default:
448 break;
449 }
450}
451
452static int diag_register_pcie_channels(struct diag_pcie_info *pcie_info)
453{
454 int rc = 0;
455
456 if (!pcie_info)
457 return -EIO;
458
459 pcie_info->event_notifier = diag_pcie_event_notifier;
460
461 DIAG_LOG(DIAG_DEBUG_MUX,
462 "Initializing inbound chan %d.\n",
463 pcie_info->in_chan);
464 rc = pcie_init_read_chan(pcie_info, pcie_info->in_chan);
465 if (rc < 0) {
466 DIAG_LOG(DIAG_DEBUG_MUX,
467 "Failed to init inbound 0x%x, ret 0x%x\n",
468 pcie_info->in_chan, rc);
469 return rc;
470 }
471 /* Register for state change notifications from mhi*/
472 rc = mhi_register_state_cb(diag_pcie_client_cb, pcie_info,
473 pcie_info->out_chan);
474 if (rc < 0)
475 return rc;
476
477 return 0;
478}
479
480static void diag_pcie_connect(struct diag_pcie_info *ch)
481{
Sreelakshmi Gownipalli75c91e12018-10-16 16:54:43 -0700482 if (!ch || !atomic_read(&ch->enabled))
483 return;
484
485 if (ch->ops && ch->ops->open)
486 if (atomic_read(&ch->diag_state))
487 ch->ops->open(ch->ctxt, DIAG_PCIE_MODE);
488
489 /* As soon as we open the channel, queue a read */
490 queue_work(ch->wq, &(ch->read_work));
491}
492
493void diag_pcie_open_work_fn(struct work_struct *work)
494{
495 int rc = 0;
496 struct diag_pcie_info *pcie_info = container_of(work,
497 struct diag_pcie_info,
498 open_work);
499
500 if (!pcie_info || atomic_read(&pcie_info->enabled))
501 return;
502
503 mutex_lock(&pcie_info->out_chan_lock);
504 mutex_lock(&pcie_info->in_chan_lock);
505 /* Open write channel*/
506 rc = mhi_dev_open_channel(pcie_info->out_chan,
507 &pcie_info->out_handle,
508 pcie_info->event_notifier);
509 if (rc < 0) {
510 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
511 "Failed to open chan %d, ret %d\n",
512 pcie_info->in_chan, rc);
513 goto handle_not_rdy_err;
514 }
515 DIAG_LOG(DIAG_DEBUG_MUX, "opened write channel %d",
516 pcie_info->out_chan);
517
518 /* Open read channel*/
519 rc = mhi_dev_open_channel(pcie_info->in_chan,
520 &pcie_info->in_handle,
521 pcie_info->event_notifier);
522 if (rc < 0) {
523 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
524 "Failed to open chan %d, ret 0x%x\n",
525 pcie_info->in_chan, rc);
526 goto handle_in_err;
527 }
528 DIAG_LOG(DIAG_DEBUG_MUX, "opened read channel %d", pcie_info->in_chan);
529 mutex_unlock(&pcie_info->in_chan_lock);
530 mutex_unlock(&pcie_info->out_chan_lock);
531 atomic_set(&pcie_info->enabled, 1);
532 atomic_set(&pcie_info->diag_state, 1);
533 diag_pcie_connect(pcie_info);
534 return;
535handle_in_err:
536 mhi_dev_close_channel(pcie_info->out_handle);
537 atomic_set(&pcie_info->enabled, 0);
538handle_not_rdy_err:
539 mutex_unlock(&pcie_info->in_chan_lock);
540 mutex_unlock(&pcie_info->out_chan_lock);
541}
542
543/*
544 * This function performs pcie connect operations wrt Diag synchronously. It
545 * doesn't translate to actual pcie connect. This is used when Diag switches
546 * logging to pcie mode and wants to mimic pcie connection.
547 */
548void diag_pcie_connect_all(void)
549{
550 int i = 0;
551 struct diag_pcie_info *pcie_info = NULL;
552
553 for (i = 0; i < NUM_DIAG_PCIE_DEV; i++) {
554 pcie_info = &diag_pcie[i];
555 if (!atomic_read(&pcie_info->enabled))
556 continue;
557 atomic_set(&pcie_info->diag_state, 1);
558 diag_pcie_connect(pcie_info);
559 }
560}
561
562static void diag_pcie_disconnect(struct diag_pcie_info *ch)
563{
564 if (!ch)
565 return;
566
567 if (!atomic_read(&ch->enabled) &&
568 driver->pcie_connected && diag_mask_param())
569 diag_clear_masks(0);
570
571 if (ch && ch->ops && ch->ops->close)
572 ch->ops->close(ch->ctxt, DIAG_PCIE_MODE);
573}
574
575/*
576 * This function performs pcie disconnect operations wrt Diag synchronously.
577 * It doesn't translate to actual pcie disconnect. This is used when Diag
578 * switches logging from pcie mode and want to mimic pcie disconnect.
579 */
580void diag_pcie_disconnect_all(void)
581{
582 int i = 0;
583 struct diag_pcie_info *pcie_info = NULL;
584
585 for (i = 0; i < NUM_DIAG_PCIE_DEV; i++) {
586 pcie_info = &diag_pcie[i];
587 if (!atomic_read(&pcie_info->enabled))
588 continue;
589 atomic_set(&pcie_info->diag_state, 0);
590 diag_pcie_disconnect(pcie_info);
591 }
592}
593
594void diag_pcie_close_work_fn(struct work_struct *work)
595{
596 int rc = 0;
597 struct diag_pcie_info *pcie_info = container_of(work,
598 struct diag_pcie_info,
599 open_work);
600
601 if (!pcie_info || !atomic_read(&pcie_info->enabled))
602 return;
603 mutex_lock(&pcie_info->out_chan_lock);
604 mutex_lock(&pcie_info->in_chan_lock);
605 rc = mhi_dev_close_channel(pcie_info->in_handle);
606 DIAG_LOG(DIAG_DEBUG_MUX, " closed in bound channel %d",
607 pcie_info->in_chan);
608 rc = mhi_dev_close_channel(pcie_info->out_handle);
609 DIAG_LOG(DIAG_DEBUG_MUX, " closed out bound channel %d",
610 pcie_info->out_chan);
611 mutex_unlock(&pcie_info->in_chan_lock);
612 mutex_unlock(&pcie_info->out_chan_lock);
613 atomic_set(&pcie_info->enabled, 0);
614 diag_pcie_disconnect(pcie_info);
615}
616
617int diag_pcie_register(int id, int ctxt, struct diag_mux_ops *ops)
618{
619 struct diag_pcie_info *ch = NULL;
620 int rc = 0;
621 unsigned char wq_name[DIAG_PCIE_NAME_SZ + DIAG_PCIE_STRING_SZ];
622
623 if (id < 0 || id >= NUM_DIAG_PCIE_DEV) {
624 pr_err("diag: Unable to register with PCIE, id: %d\n", id);
625 return -EIO;
626 }
627
628 if (!ops) {
629 pr_err("diag: Invalid operations for PCIE\n");
630 return -EIO;
631 }
632
633 ch = &diag_pcie[id];
634 ch->ops = ops;
635 ch->ctxt = ctxt;
636 atomic_set(&ch->diag_state, 0);
637 atomic_set(&ch->enabled, 0);
638 INIT_LIST_HEAD(&ch->buf_tbl);
639 spin_lock_init(&ch->write_lock);
640 INIT_WORK(&(ch->read_work), diag_pcie_read_work_fn);
641 INIT_WORK(&(ch->open_work), diag_pcie_open_work_fn);
642 INIT_WORK(&(ch->close_work), diag_pcie_close_work_fn);
643 strlcpy(wq_name, "DIAG_PCIE_", sizeof(wq_name));
644 strlcat(wq_name, ch->name, sizeof(wq_name));
645 ch->wq = create_singlethread_workqueue(wq_name);
646 if (!ch->wq)
647 return -ENOMEM;
648 diagmem_init(driver, ch->mempool);
649 mutex_init(&ch->in_chan_lock);
650 mutex_init(&ch->out_chan_lock);
651 rc = diag_register_pcie_channels(ch);
652 if (rc < 0) {
653 if (ch->wq)
654 destroy_workqueue(ch->wq);
655 kfree(ch->in_chan_attr.read_buffer);
656 return rc;
657 }
658 return 0;
659}