blob: ddd45987f1b1b4f57ed2c9579586401b836668f7 [file] [log] [blame]
Nitesh Guptadd7321f2020-02-18 15:06:12 +05301/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/of.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/of_irq.h>
24#include <linux/interrupt.h>
25#include <linux/workqueue.h>
26#include <linux/completion.h>
27#include <linux/platform_device.h>
28#include <linux/msm_ep_pcie.h>
29#include <linux/ipa_mhi.h>
30#include <linux/vmalloc.h>
Nitesh Guptadd7321f2020-02-18 15:06:12 +053031#include <soc/qcom/boot_stats.h>
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080032
33#include "mhi.h"
34#include "mhi_hwio.h"
35#include "mhi_sm.h"
36
37/* Wait time on the device for Host to set M0 state */
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080038#define MHI_DEV_M0_MAX_CNT 30
39/* Wait time before suspend/resume is complete */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080040#define MHI_SUSPEND_MIN 100
41#define MHI_SUSPEND_TIMEOUT 600
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -070042#define MHI_WAKEUP_TIMEOUT_CNT 20
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080043#define MHI_MASK_CH_EV_LEN 32
44#define MHI_RING_CMD_ID 0
45#define MHI_RING_PRIMARY_EVT_ID 1
46#define MHI_1K_SIZE 0x1000
47/* Updated Specification for event start is NER - 2 and end - NER -1 */
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080048#define MHI_HW_ACC_EVT_RING_END 1
49
50#define MHI_HOST_REGION_NUM 2
51
52#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1
53#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2
54
55#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32))
56#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF)
57#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF)
58
59#define MHI_IPC_LOG_PAGES (100)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080060#define MHI_REGLEN 0x100
61#define MHI_INIT 0
62#define MHI_REINIT 1
63
64#define TR_RING_ELEMENT_SZ sizeof(struct mhi_dev_transfer_ring_element)
65#define RING_ELEMENT_TYPE_SZ sizeof(union mhi_dev_ring_element_type)
66
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +053067#define MHI_DEV_CH_CLOSE_TIMEOUT_MIN 5000
68#define MHI_DEV_CH_CLOSE_TIMEOUT_MAX 5100
69#define MHI_DEV_CH_CLOSE_TIMEOUT_COUNT 30
70
Siva Kumar Akkireddi4e388212019-02-05 19:53:19 +053071uint32_t bhi_imgtxdb;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080072enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
73enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
74void *mhi_ipc_log;
75
76static struct mhi_dev *mhi_ctx;
77static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
78 unsigned long data);
79static void mhi_ring_init_cb(void *user_data);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -070080static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080081static int mhi_deinit(struct mhi_dev *mhi);
82static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify);
83static int mhi_dev_pcie_notify_event;
84static void mhi_dev_transfer_completion_cb(void *mreq);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +053085static int mhi_dev_alloc_evt_buf_evt_req(struct mhi_dev *mhi,
86 struct mhi_dev_channel *ch, struct mhi_dev_ring *evt_ring);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -070087static struct mhi_dev_uevent_info channel_state_info[MHI_MAX_CHANNELS];
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080088
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080089/*
90 * mhi_dev_ring_cache_completion_cb () - Call back function called
91 * by IPA driver when ring element cache is done
92 *
93 * @req : ring cache request
94 */
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +053095static void mhi_dev_ring_cache_completion_cb(void *req)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080096{
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +053097 struct ring_cache_req *ring_req = req;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080098
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +053099 if (ring_req)
100 complete(ring_req->done);
101 else
102 mhi_log(MHI_MSG_ERROR, "ring cache req is NULL\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800103}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800104
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800105void mhi_dev_read_from_host(struct mhi_dev *mhi, struct mhi_addr *transfer)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800106{
107 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800108 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
109 struct ring_cache_req ring_req;
110
111 DECLARE_COMPLETION(done);
112
113 ring_req.done = &done;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800114
115 if (!mhi) {
116 pr_err("invalid MHI ctx\n");
117 return;
118 }
119
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800120 if (mhi->config_iatu) {
121 offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa;
122 /* Mapping the translated physical address on the device */
123 host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset;
124 } else {
125 host_addr_pa = transfer->host_pa | bit_40;
126 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800127
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800128 mhi_log(MHI_MSG_VERBOSE,
129 "device 0x%x <<-- host 0x%llx, size %d\n",
130 transfer->phy_addr, host_addr_pa,
131 (int) transfer->size);
132 rc = ipa_dma_async_memcpy((u64)transfer->phy_addr, host_addr_pa,
133 (int)transfer->size,
134 mhi_dev_ring_cache_completion_cb, &ring_req);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800135 if (rc)
136 pr_err("error while reading from host:%d\n", rc);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800137
138 wait_for_completion(&done);
139}
140EXPORT_SYMBOL(mhi_dev_read_from_host);
141
142void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *transfer,
143 struct event_req *ereq, enum mhi_dev_transfer_type tr_type)
144{
145 int rc = 0;
146 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
147 dma_addr_t dma;
148
149 if (!mhi) {
150 pr_err("invalid MHI ctx\n");
151 return;
152 }
153 if (mhi->config_iatu) {
154 offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa;
155 /* Mapping the translated physical address on the device */
156 host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset;
157 } else {
158 host_addr_pa = transfer->host_pa | bit_40;
159 }
160
161 mhi_log(MHI_MSG_VERBOSE,
162 "device 0x%llx --> host 0x%llx, size %d\n",
163 (uint64_t) mhi->cache_dma_handle, host_addr_pa,
164 (int) transfer->size);
165 if (tr_type == MHI_DEV_DMA_ASYNC) {
166 dma = dma_map_single(&mhi->pdev->dev,
167 transfer->virt_addr, transfer->size,
168 DMA_TO_DEVICE);
169 if (ereq->event_type == SEND_EVENT_BUFFER) {
170 ereq->dma = dma;
171 ereq->dma_len = transfer->size;
172 } else if (ereq->event_type == SEND_EVENT_RD_OFFSET) {
173 ereq->event_rd_dma = dma;
174 }
175 rc = ipa_dma_async_memcpy(host_addr_pa, (uint64_t) dma,
176 (int)transfer->size,
177 ereq->client_cb, ereq);
178 if (rc)
179 pr_err("error while writing to host:%d\n", rc);
180 } else if (tr_type == MHI_DEV_DMA_SYNC) {
181 /* Copy the device content to a local device
182 * physical address.
183 */
184 memcpy(mhi->dma_cache, transfer->virt_addr,
185 transfer->size);
186 rc = ipa_dma_sync_memcpy(host_addr_pa,
187 (u64) mhi->cache_dma_handle,
188 (int) transfer->size);
189 if (rc)
190 pr_err("error while writing to host:%d\n", rc);
191 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800192}
193EXPORT_SYMBOL(mhi_dev_write_to_host);
194
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530195/*
196 * mhi_dev_event_buf_completion_cb() - CB function called by IPA driver
197 * when transfer completion event buffer copy to host is done.
198 *
199 * @req - event_req structure
200 */
201static void mhi_dev_event_buf_completion_cb(void *req)
202{
203 struct event_req *ereq = req;
204
205 if (ereq)
206 dma_unmap_single(&mhi_ctx->pdev->dev, ereq->dma,
207 ereq->dma_len, DMA_TO_DEVICE);
208 else
209 mhi_log(MHI_MSG_ERROR, "event req is null\n");
210}
211
212/*
213 * mhi_dev_event_rd_offset_completion_cb() - CB function called by IPA driver
214 * when event ring rd_offset transfer is done.
215 *
216 * @req - event_req structure
217 */
218static void mhi_dev_event_rd_offset_completion_cb(void *req)
219{
220 union mhi_dev_ring_ctx *ctx;
221 int rc;
222 struct event_req *ereq = req;
223 struct mhi_dev_channel *ch = ereq->context;
224 struct mhi_dev *mhi = ch->ring->mhi_dev;
225 unsigned long flags;
226
227 if (ereq->event_rd_dma)
228 dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma,
229 sizeof(uint64_t), DMA_TO_DEVICE);
230 /* rp update in host memory should be flushed before sending an MSI */
231 wmb();
232 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring];
233 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
234 if (rc)
235 pr_err("%s: error sending in msi\n", __func__);
236
237 /* Add back the flushed events space to the event buffer */
238 ch->evt_buf_wp = ereq->start + ereq->num_events;
239 if (ch->evt_buf_wp == ch->evt_buf_size)
240 ch->evt_buf_wp = 0;
241 /* Return the event req to the list */
242 spin_lock_irqsave(&mhi->lock, flags);
243 if (ch->curr_ereq == NULL)
244 ch->curr_ereq = ereq;
245 else
246 list_add_tail(&ereq->list, &ch->event_req_buffers);
247 spin_unlock_irqrestore(&mhi->lock, flags);
248}
249
250static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring,
251 struct event_req *ereq, uint32_t evt_len)
252{
253 int rc = 0;
254 uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
255 struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
256 union mhi_dev_ring_ctx *ctx;
257 struct mhi_addr transfer_addr;
258 struct mhi_dev_channel *ch;
259
260 if (!ereq) {
261 pr_err("%s(): invalid event req\n", __func__);
262 return -EINVAL;
263 }
264
265 if (evnt_ring_idx > mhi->cfg.event_rings) {
266 pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
267 return -EINVAL;
268 }
269
270 if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
271 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
272 rc = mhi_ring_start(ring, ctx, mhi);
273 if (rc) {
274 mhi_log(MHI_MSG_ERROR,
275 "error starting event ring %d\n", evnt_ring);
276 return rc;
277 }
278 }
279 ch = ereq->context;
280 /* Check the limits of the buffer to be flushed */
281 if (ereq->tr_events < ch->tr_events ||
282 (ereq->tr_events + ereq->num_events) >
283 (ch->tr_events + ch->evt_buf_size)) {
284 pr_err("%s: Invalid completion event buffer!\n", __func__);
285 mhi_log(MHI_MSG_ERROR,
286 "Invalid cmpl evt buf - start %pK, end %pK\n",
287 ereq->tr_events, ereq->tr_events + ereq->num_events);
288 return -EINVAL;
289 }
290
291 mhi_log(MHI_MSG_VERBOSE, "Flushing %d cmpl events of ch %d\n",
292 ereq->num_events, ch->ch_id);
293 /* add the events */
294 ereq->client_cb = mhi_dev_event_buf_completion_cb;
295 ereq->event_type = SEND_EVENT_BUFFER;
296 rc = mhi_dev_add_element(ring, ereq->tr_events, ereq, evt_len);
297 if (rc) {
298 pr_err("%s(): error in adding element rc %d\n", __func__, rc);
299 return rc;
300 }
301 ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
302 sizeof(union mhi_dev_ring_element_type)) +
303 ring->ring_ctx->generic.rbase;
304
305 mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n",
306 ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
307
308 if (mhi->use_ipa) {
309 transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
310 sizeof(struct mhi_dev_ev_ctx) *
311 evnt_ring) + (size_t)&ring->ring_ctx->ev.rp -
312 (size_t)ring->ring_ctx;
313 /*
314 * As ev_ctx_cache memory is dma_alloc_coherent, dma_map_single
315 * should not be called. Pass physical address to write to host.
316 */
317 transfer_addr.phy_addr = (mhi->ev_ctx_cache_dma_handle +
318 sizeof(struct mhi_dev_ev_ctx) * evnt_ring) +
319 (size_t)&ring->ring_ctx->ev.rp -
320 (size_t)ring->ring_ctx;
321 } else {
322 transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va +
323 sizeof(struct mhi_dev_ev_ctx) *
324 evnt_ring) + (size_t)&ring->ring_ctx->ev.rp -
325 (size_t)ring->ring_ctx;
326 }
327
328 transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp;
329 transfer_addr.size = sizeof(uint64_t);
330 ereq->event_type = SEND_EVENT_RD_OFFSET;
331 ereq->client_cb = mhi_dev_event_rd_offset_completion_cb;
332 ereq->event_ring = evnt_ring;
333 mhi_dev_write_to_host(mhi, &transfer_addr, ereq, MHI_DEV_DMA_ASYNC);
334 return rc;
335}
336
337static int mhi_dev_flush_transfer_completion_events(struct mhi_dev *mhi,
338 struct mhi_dev_channel *ch)
339{
340 int rc = 0;
341 unsigned long flags;
342 struct event_req *flush_ereq;
343
344 /*
345 * Channel got closed with transfers pending
346 * Do not send completion events to host
347 */
348 if (ch->state == MHI_DEV_CH_CLOSED) {
349 mhi_log(MHI_MSG_DBG, "Ch %d closed with %d writes pending\n",
350 ch->ch_id, ch->pend_wr_count + 1);
351 return -ENODEV;
352 }
353
354 do {
355 spin_lock_irqsave(&mhi->lock, flags);
356 if (list_empty(&ch->flush_event_req_buffers)) {
357 spin_unlock_irqrestore(&mhi->lock, flags);
358 break;
359 }
360 flush_ereq = container_of(ch->flush_event_req_buffers.next,
361 struct event_req, list);
362 list_del_init(&flush_ereq->list);
363 spin_unlock_irqrestore(&mhi->lock, flags);
364
365 mhi_log(MHI_MSG_DBG, "Flush called for ch %d\n", ch->ch_id);
366 rc = mhi_dev_send_multiple_tr_events(mhi,
367 mhi->ch_ctx_cache[ch->ch_id].err_indx,
368 flush_ereq,
369 (flush_ereq->num_events *
370 sizeof(union mhi_dev_ring_element_type)));
371 if (rc) {
372 mhi_log(MHI_MSG_ERROR, "failed to send compl evts\n");
373 break;
374 }
375 } while (true);
376
377 return rc;
378}
379
380static bool mhi_dev_is_full_compl_evt_buf(struct mhi_dev_channel *ch)
381{
382 if (((ch->evt_buf_rp + 1) % ch->evt_buf_size) == ch->evt_buf_wp)
383 return true;
384
385 return false;
386}
387
388static void mhi_dev_rollback_compl_evt(struct mhi_dev_channel *ch)
389{
390 if (ch->evt_buf_rp)
391 ch->evt_buf_rp--;
392 else
393 ch->evt_buf_rp = ch->evt_buf_size - 1;
394}
395
396/*
397 * mhi_dev_queue_transfer_completion() - Queues a transfer completion
398 * event to the event buffer (where events are stored until they get
399 * flushed to host). Also determines when the completion events are
400 * to be flushed (sent) to host.
401 *
402 * @req - event_req structure
403 * @flush - Set to true when completion events are to be flushed.
404 */
405
406static int mhi_dev_queue_transfer_completion(struct mhi_req *mreq, bool *flush)
407{
408 union mhi_dev_ring_element_type *compl_ev;
409 struct mhi_dev_channel *ch = mreq->client->channel;
410 unsigned long flags;
411
412 if (mhi_dev_is_full_compl_evt_buf(ch) || ch->curr_ereq == NULL) {
413 mhi_log(MHI_MSG_VERBOSE, "Ran out of %s\n",
414 (ch->curr_ereq ? "compl evts" : "ereqs"));
415 return -EBUSY;
416 }
417
418 if (mreq->el->tre.ieot) {
419 compl_ev = ch->tr_events + ch->evt_buf_rp;
420 compl_ev->evt_tr_comp.chid = ch->ch_id;
421 compl_ev->evt_tr_comp.type =
422 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
423 compl_ev->evt_tr_comp.len = mreq->transfer_len;
424 compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT;
425 compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
426 mreq->rd_offset * TR_RING_ELEMENT_SZ;
427 ch->evt_buf_rp++;
428 if (ch->evt_buf_rp == ch->evt_buf_size)
429 ch->evt_buf_rp = 0;
430 ch->curr_ereq->num_events++;
431 /*
432 * It is not necessary to flush when we need to wrap-around, if
433 * we do have free space in the buffer upon wrap-around.
434 * But when we really need to flush, we need a separate dma op
435 * anyway for the current chunk (from flush_start to the
436 * physical buffer end) since the buffer is circular. So we
437 * might as well flush on wrap-around.
438 * Also, we flush when we hit the threshold as well. The flush
439 * threshold is based on the channel's event ring size.
440 *
441 * In summary, completion event buffer flush is done if
442 * * Client requests it (snd_cmpl was set to 1) OR
443 * * Physical end of the event buffer is reached OR
444 * * Flush threshold is reached for the current ereq
445 *
446 * When events are to be flushed, the current ereq is moved to
447 * the flush list, and the flush param is set to true for the
448 * second and third cases above. The actual flush of the events
449 * is done in the write_to_host API (for the write path) or
450 * in the transfer completion callback (for the read path).
451 */
452 if (ch->evt_buf_rp == 0 ||
453 ch->curr_ereq->num_events >=
454 MHI_CMPL_EVT_FLUSH_THRSHLD(ch->evt_buf_size)
455 || mreq->snd_cmpl) {
456 if (flush)
457 *flush = true;
458
459 if (!mreq->snd_cmpl)
460 mreq->snd_cmpl = 1;
461
462 ch->curr_ereq->tr_events = ch->tr_events +
463 ch->curr_ereq->start;
464 ch->curr_ereq->context = ch;
465
466 /* Move current event req to flush list*/
467 spin_lock_irqsave(&mhi_ctx->lock, flags);
468 list_add_tail(&ch->curr_ereq->list,
469 &ch->flush_event_req_buffers);
470
471 if (!list_empty(&ch->event_req_buffers)) {
472 ch->curr_ereq =
473 container_of(ch->event_req_buffers.next,
474 struct event_req, list);
475 list_del_init(&ch->curr_ereq->list);
476 ch->curr_ereq->num_events = 0;
477 ch->curr_ereq->start = ch->evt_buf_rp;
478 } else {
479 pr_err("%s evt req buffers empty\n", __func__);
480 mhi_log(MHI_MSG_ERROR,
481 "evt req buffers empty\n");
482 ch->curr_ereq = NULL;
483 }
484 spin_unlock_irqrestore(&mhi_ctx->lock, flags);
485 }
486 return 0;
487 }
488
489 mhi_log(MHI_MSG_ERROR, "ieot is not valid\n");
490 return -EINVAL;
491}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800492int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800493 struct mhi_dev *mhi, struct mhi_req *mreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800494{
495 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800496 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
497 struct mhi_dev_ring *ring = NULL;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530498 struct mhi_dev_channel *ch;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800499
500 if (!mhi || !dev || !host_pa || !mreq) {
501 pr_err("%s():Invalid parameters\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800502 return -EINVAL;
503 }
504
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800505 if (mhi->config_iatu) {
506 offset = (uint64_t)host_pa - mhi->data_base.host_pa;
507 /* Mapping the translated physical address on the device */
508 host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset;
509 } else {
510 host_addr_pa = host_pa | bit_40;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800511 }
512
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800513 mhi_log(MHI_MSG_VERBOSE, "device 0x%llx <-- host 0x%llx, size %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800514 (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800515
516 if (mreq->mode == IPA_DMA_SYNC) {
517 rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle,
518 host_addr_pa, (int) len);
519 if (rc) {
520 pr_err("error while reading chan using sync:%d\n", rc);
521 return rc;
522 }
523 memcpy(dev, mhi->read_handle, len);
524 } else if (mreq->mode == IPA_DMA_ASYNC) {
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530525 ch = mreq->client->channel;
526 ring = ch->ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800527 mreq->dma = dma_map_single(&mhi->pdev->dev, dev, len,
528 DMA_FROM_DEVICE);
529 mhi_dev_ring_inc_index(ring, ring->rd_offset);
530
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530531 if (ring->rd_offset == ring->wr_offset) {
532 mhi_log(MHI_MSG_VERBOSE,
533 "Setting snd_cmpl to 1 for ch %d\n", ch->ch_id);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800534 mreq->snd_cmpl = 1;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530535 }
536
537 /* Queue the completion event for the current transfer */
538 rc = mhi_dev_queue_transfer_completion(mreq, NULL);
539 if (rc) {
540 mhi_log(MHI_MSG_ERROR,
541 "Failed to queue completion for ch %d, rc %d\n",
542 ch->ch_id, rc);
543 return rc;
544 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800545 rc = ipa_dma_async_memcpy(mreq->dma, host_addr_pa,
546 (int) len, mhi_dev_transfer_completion_cb,
547 mreq);
548 if (rc) {
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530549 mhi_log(MHI_MSG_ERROR,
550 "DMA read error %d for ch %d\n", rc, ch->ch_id);
551 /* Roll back the completion event that we wrote above */
552 mhi_dev_rollback_compl_evt(ch);
553 /* Unmap the buffer */
554 dma_unmap_single(&mhi_ctx->pdev->dev, mreq->dma,
555 len, DMA_FROM_DEVICE);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800556 return rc;
557 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800558 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800559 return rc;
560}
561EXPORT_SYMBOL(mhi_transfer_host_to_device);
562
563int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800564 struct mhi_dev *mhi, struct mhi_req *req)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800565{
566 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800567 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
568 struct mhi_dev_ring *ring = NULL;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530569 bool flush = false;
570 struct mhi_dev_channel *ch;
571 u32 snd_cmpl;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800572
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800573 if (!mhi || !dev || !req || !host_addr) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800574 pr_err("%sInvalid parameters\n", __func__);
575 return -EINVAL;
576 }
577
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800578 if (mhi->config_iatu) {
579 offset = (uint64_t)host_addr - mhi->data_base.host_pa;
580 /* Mapping the translated physical address on the device */
581 host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset;
582 } else {
583 host_addr_pa = host_addr | bit_40;
584 }
585 mhi_log(MHI_MSG_VERBOSE, "device 0x%llx ---> host 0x%llx, size %d\n",
586 (uint64_t) mhi->write_dma_handle,
587 host_addr_pa, (int) len);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800588
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800589 if (req->mode == IPA_DMA_SYNC) {
590 memcpy(mhi->write_handle, dev, len);
591 rc = ipa_dma_sync_memcpy(host_addr_pa,
592 (u64) mhi->write_dma_handle, (int) len);
593 } else if (req->mode == IPA_DMA_ASYNC) {
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530594 ch = req->client->channel;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800595 req->dma = dma_map_single(&mhi->pdev->dev, req->buf,
596 req->len, DMA_TO_DEVICE);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530597
598 ring = ch->ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800599 mhi_dev_ring_inc_index(ring, ring->rd_offset);
600 if (ring->rd_offset == ring->wr_offset)
601 req->snd_cmpl = 1;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530602 snd_cmpl = req->snd_cmpl;
603
604 /* Queue the completion event for the current transfer */
605 rc = mhi_dev_queue_transfer_completion(req, &flush);
606 if (rc) {
607 pr_err("Failed to queue completion: %d\n", rc);
608 return rc;
609 }
610
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800611 rc = ipa_dma_async_memcpy(host_addr_pa,
612 (uint64_t) req->dma, (int) len,
613 mhi_dev_transfer_completion_cb, req);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530614 if (rc) {
615 mhi_log(MHI_MSG_ERROR, "Error sending data to host\n");
616 /* Roll back the completion event that we wrote above */
617 mhi_dev_rollback_compl_evt(ch);
618 /* Unmap the buffer */
619 dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
620 req->len, DMA_TO_DEVICE);
621 return rc;
622 }
623 if (snd_cmpl || flush) {
624 rc = mhi_dev_flush_transfer_completion_events(mhi, ch);
625 if (rc) {
626 mhi_log(MHI_MSG_ERROR,
627 "Failed to flush write completions to host\n");
628 return rc;
629 }
630 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800631 }
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530632 return 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800633}
634EXPORT_SYMBOL(mhi_transfer_device_to_host);
635
636int mhi_dev_is_list_empty(void)
637{
638
639 if (list_empty(&mhi_ctx->event_ring_list) &&
640 list_empty(&mhi_ctx->process_ring_list))
641 return 0;
642 else
643 return 1;
644}
645EXPORT_SYMBOL(mhi_dev_is_list_empty);
646
647static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi,
648 struct ep_pcie_db_config *erdb_cfg)
649{
650 switch (mhi->cfg.event_rings) {
651 case NUM_CHANNELS:
652 erdb_cfg->base = HW_CHANNEL_BASE;
653 erdb_cfg->end = HW_CHANNEL_END;
654 break;
655 default:
656 erdb_cfg->base = mhi->cfg.event_rings -
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530657 (mhi->cfg.hw_event_rings);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800658 erdb_cfg->end = mhi->cfg.event_rings -
659 MHI_HW_ACC_EVT_RING_END;
660 break;
661 }
662}
663
664int mhi_pcie_config_db_routing(struct mhi_dev *mhi)
665{
666 int rc = 0;
667 struct ep_pcie_db_config chdb_cfg, erdb_cfg;
668
669 if (!mhi) {
670 pr_err("Invalid MHI context\n");
671 return -EINVAL;
672 }
673
674 /* Configure Doorbell routing */
675 chdb_cfg.base = HW_CHANNEL_BASE;
676 chdb_cfg.end = HW_CHANNEL_END;
677 chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb;
678
679 mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
680
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800681 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800682 "Event rings 0x%x => er_base 0x%x, er_end %d\n",
683 mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
684 erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
685 ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg);
686
687 return rc;
688}
689EXPORT_SYMBOL(mhi_pcie_config_db_routing);
690
691static int mhi_hwc_init(struct mhi_dev *mhi)
692{
693 int rc = 0;
694 struct ep_pcie_msi_config cfg;
695 struct ipa_mhi_init_params ipa_init_params;
696 struct ep_pcie_db_config erdb_cfg;
697
698 /* Call IPA HW_ACC Init with MSI Address and db routing info */
699 rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
700 if (rc) {
701 pr_err("Error retrieving pcie msi logic\n");
702 return rc;
703 }
704
705 rc = mhi_pcie_config_db_routing(mhi);
706 if (rc) {
707 pr_err("Error configuring DB routing\n");
708 return rc;
709 }
710
711 mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800712 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800713 "Event rings 0x%x => er_base 0x%x, er_end %d\n",
714 mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
715
716 erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
717 memset(&ipa_init_params, 0, sizeof(ipa_init_params));
718 ipa_init_params.msi.addr_hi = cfg.upper;
719 ipa_init_params.msi.addr_low = cfg.lower;
720 ipa_init_params.msi.data = cfg.data;
721 ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
722 ipa_init_params.first_er_idx = erdb_cfg.base;
723 ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800724
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800725 if (mhi_ctx->config_iatu)
726 ipa_init_params.mmio_addr =
727 ((uint32_t) mhi_ctx->mmio_base_pa_addr) + MHI_REGLEN;
728 else
729 ipa_init_params.mmio_addr =
730 ((uint32_t) mhi_ctx->mmio_base_pa_addr);
731
732 if (!mhi_ctx->config_iatu)
733 ipa_init_params.assert_bit40 = true;
734
735 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800736 "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n",
737 ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data);
738 ipa_init_params.notify = mhi_hwc_cb;
739 ipa_init_params.priv = mhi;
740
741 rc = ipa_mhi_init(&ipa_init_params);
742 if (rc) {
743 pr_err("Error initializing IPA\n");
744 return rc;
745 }
746
747 return rc;
748}
749
750static int mhi_hwc_start(struct mhi_dev *mhi)
751{
752 int rc = 0;
753 struct ipa_mhi_start_params ipa_start_params;
754
755 memset(&ipa_start_params, 0, sizeof(ipa_start_params));
756
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800757 if (mhi->config_iatu) {
758 ipa_start_params.host_ctrl_addr = mhi->ctrl_base.device_pa;
759 ipa_start_params.host_data_addr = mhi->data_base.device_pa;
760 } else {
761 ipa_start_params.channel_context_array_addr =
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800762 mhi->ch_ctx_shadow.host_pa;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800763 ipa_start_params.event_context_array_addr =
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800764 mhi->ev_ctx_shadow.host_pa;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800765 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800766
767 rc = ipa_mhi_start(&ipa_start_params);
768 if (rc)
769 pr_err("Error starting IPA (rc = 0x%X)\n", rc);
770
771 return rc;
772}
773
774static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
775 unsigned long data)
776{
777 int rc = 0;
778
779 switch (event) {
780 case IPA_MHI_EVENT_READY:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800781 mhi_log(MHI_MSG_INFO,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800782 "HW Channel uC is ready event=0x%X\n", event);
783 rc = mhi_hwc_start(mhi_ctx);
784 if (rc) {
785 pr_err("hwc_init start failed with %d\n", rc);
786 return;
787 }
788
789 rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx);
790 if (rc) {
791 pr_err("Failed to enable channel db\n");
792 return;
793 }
794
795 rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx);
796 if (rc) {
797 pr_err("Failed to enable control interrupt\n");
798 return;
799 }
800
801 rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx);
802
803 if (rc) {
804 pr_err("Failed to enable command db\n");
805 return;
806 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800807
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700808 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
Nitesh Guptadd7321f2020-02-18 15:06:12 +0530809 mhi_log(MHI_MSG_CRITICAL, "Device in M0 State\n");
810 place_marker("MHI - Device in M0 State\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800811
Siva Kumar Akkireddidd7c6ed2018-09-07 15:04:17 +0530812 if (!mhi_ctx->mhi_int)
813 ep_pcie_mask_irq_event(mhi_ctx->phandle,
814 EP_PCIE_INT_EVT_MHI_A7, true);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800815 break;
816 case IPA_MHI_EVENT_DATA_AVAILABLE:
817 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
818 if (rc) {
819 pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc);
820 return;
821 }
822 break;
823 default:
824 pr_err("HW Channel uC unknown event 0x%X\n", event);
825 break;
826 }
827}
828
829static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid,
830 enum mhi_dev_ring_element_type_id type)
831{
832 int rc = 0;
833 struct ipa_mhi_connect_params connect_params;
834
835 memset(&connect_params, 0, sizeof(connect_params));
836
837 switch (type) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800838 case MHI_DEV_RING_EL_RESET:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800839 case MHI_DEV_RING_EL_STOP:
Nitesh Gupta67c5c612020-04-30 23:28:38 +0530840 if ((chid-HW_CHANNEL_BASE) > NUM_HW_CHANNELS) {
841 pr_err("Invalid Channel ID = 0x%X\n", chid);
842 return -EINVAL;
843 }
844
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800845 rc = ipa_mhi_disconnect_pipe(
846 mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
847 if (rc)
848 pr_err("Stopping HW Channel%d failed 0x%X\n",
849 chid, rc);
850 break;
851 case MHI_DEV_RING_EL_START:
852 connect_params.channel_id = chid;
853 connect_params.sys.skip_ep_cfg = true;
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530854
855 switch (chid) {
856 case MHI_CLIENT_ADPL_IN:
Siva Kumar Akkireddi859701c2018-10-03 17:35:30 +0530857 connect_params.sys.client = IPA_CLIENT_MHI_DPL_CONS;
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530858 break;
859 case MHI_CLIENT_IP_HW_0_OUT:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800860 connect_params.sys.client = IPA_CLIENT_MHI_PROD;
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530861 break;
862 case MHI_CLIENT_IP_HW_0_IN:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800863 connect_params.sys.client = IPA_CLIENT_MHI_CONS;
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530864 break;
865 case MHI_CLIENT_IP_HW_1_OUT:
866 connect_params.sys.client = IPA_CLIENT_MHI2_PROD;
867 break;
868 case MHI_CLIENT_IP_HW_1_IN:
869 connect_params.sys.client = IPA_CLIENT_MHI2_CONS;
870 break;
871 default:
872 pr_err("Invalid channel = 0x%X\n", chid);
873 return -EINVAL;
874 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800875
Nitesh Gupta67c5c612020-04-30 23:28:38 +0530876 if ((chid-HW_CHANNEL_BASE) > NUM_HW_CHANNELS) {
877 pr_err("Invalid Channel = 0x%X\n", chid);
878 return -EINVAL;
879 }
880
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800881 rc = ipa_mhi_connect_pipe(&connect_params,
882 &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
883 if (rc)
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530884 pr_err("HW Channel%d start failed : %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800885 chid, rc);
886 break;
887 case MHI_DEV_RING_EL_INVALID:
888 default:
889 pr_err("Invalid Ring Element type = 0x%X\n", type);
890 break;
891 }
892
893 return rc;
894}
895
896static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev,
897 uint32_t *int_value)
898{
899 int rc = 0;
900
901 rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value);
902 if (rc) {
903 pr_err("Failed to read A7 status\n");
904 return;
905 }
906
907 mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value);
908 if (rc) {
909 pr_err("Failed to clear A7 status\n");
910 return;
911 }
912}
913
914static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id)
915{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800916 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800917
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800918 if (mhi->use_ipa) {
919 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800920 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800921 data_transfer.phy_addr = mhi->ch_ctx_cache_dma_handle +
922 sizeof(struct mhi_dev_ch_ctx) * ch_id;
923 }
924
925 data_transfer.size = sizeof(struct mhi_dev_ch_ctx);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800926 /* Fetch the channel ctx (*dst, *src, size) */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800927 mhi_dev_read_from_host(mhi, &data_transfer);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800928}
929
930int mhi_dev_syserr(struct mhi_dev *mhi)
931{
932
933 if (!mhi) {
934 pr_err("%s: Invalid MHI ctx\n", __func__);
935 return -EINVAL;
936 }
937
938 mhi_dev_dump_mmio(mhi);
939 pr_err("MHI dev sys error\n");
940
941 return 0;
942}
943EXPORT_SYMBOL(mhi_dev_syserr);
944
945int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
946 union mhi_dev_ring_element_type *el)
947{
948 int rc = 0;
949 uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
950 struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
951 union mhi_dev_ring_ctx *ctx;
952 struct ep_pcie_msi_config cfg;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800953 struct mhi_addr transfer_addr;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800954
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800955 rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
956 if (rc) {
957 pr_err("Error retrieving pcie msi logic\n");
958 return rc;
959 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800960
961 if (evnt_ring_idx > mhi->cfg.event_rings) {
962 pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
963 return -EINVAL;
964 }
965
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800966 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800967 if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800968 rc = mhi_ring_start(ring, ctx, mhi);
969 if (rc) {
970 mhi_log(MHI_MSG_ERROR,
971 "error starting event ring %d\n", evnt_ring);
972 return rc;
973 }
974 }
975
976 mutex_lock(&mhi->mhi_event_lock);
977 /* add the ring element */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800978 mhi_dev_add_element(ring, el, NULL, 0);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800979
980 ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
981 sizeof(union mhi_dev_ring_element_type)) +
982 ring->ring_ctx->generic.rbase;
983
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800984 mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800985 ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
986
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800987 if (mhi->use_ipa)
988 transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800989 sizeof(struct mhi_dev_ev_ctx) *
990 evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
991 (uint32_t) ring->ring_ctx;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800992 else
993 transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va +
994 sizeof(struct mhi_dev_ev_ctx) *
995 evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
996 (uint32_t) ring->ring_ctx;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800997
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800998 transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp;
999 transfer_addr.size = sizeof(uint64_t);
1000
1001 mhi_dev_write_to_host(mhi, &transfer_addr, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001002 /*
1003 * rp update in host memory should be flushed
1004 * before sending a MSI to the host
1005 */
1006 wmb();
1007
1008 mutex_unlock(&mhi->mhi_event_lock);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001009 mhi_log(MHI_MSG_VERBOSE, "event sent:\n");
1010 mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
1011 mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x\n", el->evt_tr_comp.len);
1012 mhi_log(MHI_MSG_VERBOSE, "evnt code :0x%x\n", el->evt_tr_comp.code);
1013 mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n", el->evt_tr_comp.type);
1014 mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
1015 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
1016 if (rc) {
1017 pr_err("%s: error sending msi\n", __func__);
1018 return rc;
1019 }
1020 return rc;
1021}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001022
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001023static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch,
1024 uint32_t rd_ofst, uint32_t len,
1025 enum mhi_dev_cmd_completion_code code)
1026{
1027 int rc = 0;
1028 union mhi_dev_ring_element_type compl_event;
1029 struct mhi_dev *mhi = ch->ring->mhi_dev;
1030
1031 compl_event.evt_tr_comp.chid = ch->ch_id;
1032 compl_event.evt_tr_comp.type =
1033 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
1034 compl_event.evt_tr_comp.len = len;
1035 compl_event.evt_tr_comp.code = code;
1036 compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
1037 rd_ofst * sizeof(struct mhi_dev_transfer_ring_element);
1038
1039 rc = mhi_dev_send_event(mhi,
1040 mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event);
1041
1042 return rc;
1043}
1044
1045int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
1046 enum mhi_dev_state state)
1047{
1048 union mhi_dev_ring_element_type event;
1049 int rc = 0;
1050
1051 event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG;
1052 event.evt_state_change.mhistate = state;
1053
1054 rc = mhi_dev_send_event(mhi, 0, &event);
1055 if (rc) {
1056 pr_err("Sending state change event failed\n");
1057 return rc;
1058 }
1059
1060 return rc;
1061}
1062EXPORT_SYMBOL(mhi_dev_send_state_change_event);
1063
1064int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env)
1065{
1066 union mhi_dev_ring_element_type event;
1067 int rc = 0;
1068
1069 event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY;
1070 event.evt_ee_state.execenv = exec_env;
1071
1072 rc = mhi_dev_send_event(mhi, 0, &event);
1073 if (rc) {
1074 pr_err("Sending EE change event failed\n");
1075 return rc;
1076 }
1077
1078 return rc;
1079}
1080EXPORT_SYMBOL(mhi_dev_send_ee_event);
1081
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +05301082static void mhi_dev_trigger_cb(enum mhi_client_channel ch_id)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001083{
1084 struct mhi_dev_ready_cb_info *info;
Siddartha Mohanadossf0aab7a2018-03-19 15:19:22 -07001085 enum mhi_ctrl_info state_data;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001086
Siddartha Mohanadoss32b02492020-03-19 22:12:00 -07001087 /* Currently no clients register for HW channel notify */
1088 if (ch_id >= MHI_MAX_SOFTWARE_CHANNELS)
1089 return;
1090
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001091 list_for_each_entry(info, &mhi_ctx->client_cb_list, list)
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +05301092 if (info->cb && info->cb_data.channel == ch_id) {
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001093 mhi_ctrl_state_info(info->cb_data.channel, &state_data);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001094 info->cb_data.ctrl_info = state_data;
1095 info->cb(&info->cb_data);
1096 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001097}
1098
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001099int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi)
1100{
1101 int rc = 0;
1102
1103 /*
1104 * Expected usuage is when there is HW ACC traffic IPA uC notifes
1105 * Q6 -> IPA A7 -> MHI core -> MHI SM
1106 */
1107 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
1108 if (rc) {
1109 pr_err("error sending SM event\n");
1110 return rc;
1111 }
1112
1113 return rc;
1114}
1115EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup);
1116
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001117static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi,
1118 enum mhi_dev_cmd_completion_code code)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001119{
1120 int rc = 0;
1121 union mhi_dev_ring_element_type event;
1122
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001123 if (code > MHI_CMD_COMPL_CODE_RES) {
1124 mhi_log(MHI_MSG_ERROR,
1125 "Invalid cmd compl code: %d\n", code);
1126 return -EINVAL;
1127 }
1128
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001129 /* send the command completion event to the host */
1130 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase
1131 + (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1132 (sizeof(union mhi_dev_ring_element_type)));
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001133 mhi_log(MHI_MSG_VERBOSE, "evt cmd comp ptr :%d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001134 (uint32_t) event.evt_cmd_comp.ptr);
1135 event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001136 event.evt_cmd_comp.code = code;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001137 rc = mhi_dev_send_event(mhi, 0, &event);
1138 if (rc)
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001139 mhi_log(MHI_MSG_ERROR, "Send completion failed\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001140
1141 return rc;
1142}
1143
1144static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
1145 struct mhi_dev *mhi)
1146{
1147 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001148 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001149
1150 if (ring->rd_offset != ring->wr_offset &&
1151 mhi->ch_ctx_cache[ch_id].ch_type ==
1152 MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001153 mhi_log(MHI_MSG_INFO, "Pending outbound transaction\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001154 return 0;
1155 } else if (mhi->ch_ctx_cache[ch_id].ch_type ==
1156 MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05301157 (mhi->ch[ch_id].pend_wr_count > 0)) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001158 mhi_log(MHI_MSG_INFO, "Pending inbound transaction\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001159 return 0;
1160 }
1161
1162 /* set the channel to stop */
1163 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001164 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001165
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001166 if (mhi->use_ipa) {
1167 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001168 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001169 } else {
1170 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
1171 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1172 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
1173 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1174 }
1175 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
1176 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
1177
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001178 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001179 mhi_dev_write_to_host(mhi, &data_transfer, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001180
1181 /* send the completion event to the host */
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001182 rc = mhi_dev_send_cmd_comp_event(mhi,
1183 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001184 if (rc)
1185 pr_err("Error sending command completion event\n");
1186
1187 return rc;
1188}
1189
1190static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi,
1191 union mhi_dev_ring_element_type *el, void *ctx)
1192{
1193 int rc = 0;
1194 uint32_t ch_id = 0;
1195 union mhi_dev_ring_element_type event;
1196 struct mhi_addr host_addr;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001197 struct mhi_dev_channel *ch;
1198 struct mhi_dev_ring *ring;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301199 union mhi_dev_ring_ctx *evt_ctx;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001200
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001201 ch_id = el->generic.chid;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001202 mhi_log(MHI_MSG_VERBOSE, "for channel:%d and cmd:%d\n",
1203 ch_id, el->generic.type);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001204
1205 switch (el->generic.type) {
1206 case MHI_DEV_RING_EL_START:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001207 mhi_log(MHI_MSG_VERBOSE, "recived start cmd for channel %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001208 ch_id);
1209 if (ch_id >= (HW_CHANNEL_BASE)) {
1210 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
1211 if (rc) {
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001212 mhi_log(MHI_MSG_ERROR,
1213 "Error with HW channel cmd %d\n", rc);
1214 rc = mhi_dev_send_cmd_comp_event(mhi,
1215 MHI_CMD_COMPL_CODE_UNDEFINED);
1216 if (rc)
1217 mhi_log(MHI_MSG_ERROR,
1218 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001219 return;
1220 }
1221 goto send_start_completion_event;
1222 }
1223
1224 /* fetch the channel context from host */
1225 mhi_dev_fetch_ch_ctx(mhi, ch_id);
1226
1227 /* Initialize and configure the corresponding channel ring */
1228 rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id],
1229 (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id],
1230 mhi);
1231 if (rc) {
1232 mhi_log(MHI_MSG_ERROR,
1233 "start ring failed for ch %d\n", ch_id);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001234 rc = mhi_dev_send_cmd_comp_event(mhi,
1235 MHI_CMD_COMPL_CODE_UNDEFINED);
1236 if (rc)
1237 mhi_log(MHI_MSG_ERROR,
1238 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001239 return;
1240 }
1241
1242 mhi->ring[mhi->ch_ring_start + ch_id].state =
1243 RING_STATE_PENDING;
1244
1245 /* set the channel to running */
1246 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001247 mhi->ch[ch_id].state = MHI_DEV_CH_STARTED;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001248 mhi->ch[ch_id].ch_id = ch_id;
1249 mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id];
1250 mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type;
1251
1252 /* enable DB for event ring */
1253 rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id);
1254 if (rc) {
1255 pr_err("Failed to enable channel db\n");
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001256 rc = mhi_dev_send_cmd_comp_event(mhi,
1257 MHI_CMD_COMPL_CODE_UNDEFINED);
1258 if (rc)
1259 mhi_log(MHI_MSG_ERROR,
1260 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001261 return;
1262 }
1263
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301264 if (mhi->use_ipa) {
1265 uint32_t evnt_ring_idx = mhi->ev_ring_start +
1266 mhi->ch_ctx_cache[ch_id].err_indx;
1267 struct mhi_dev_ring *evt_ring =
1268 &mhi->ring[evnt_ring_idx];
1269 evt_ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache
1270 [mhi->ch_ctx_cache[ch_id].err_indx];
1271 if (mhi_ring_get_state(evt_ring) == RING_STATE_UINT) {
1272 rc = mhi_ring_start(evt_ring, evt_ctx, mhi);
1273 if (rc) {
1274 mhi_log(MHI_MSG_ERROR,
1275 "error starting event ring %d\n",
1276 mhi->ch_ctx_cache[ch_id].err_indx);
1277 return;
1278 }
1279 }
1280 mhi_dev_alloc_evt_buf_evt_req(mhi, &mhi->ch[ch_id],
1281 evt_ring);
1282 }
1283
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001284 if (mhi->use_ipa)
1285 host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001286 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001287 else
1288 host_addr.device_va = mhi->ch_ctx_shadow.device_va +
1289 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1290
1291 host_addr.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
1292 host_addr.size = sizeof(enum mhi_dev_ch_ctx_state);
1293
1294 mhi_dev_write_to_host(mhi, &host_addr, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001295
1296send_start_completion_event:
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001297 rc = mhi_dev_send_cmd_comp_event(mhi,
1298 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001299 if (rc)
1300 pr_err("Error sending command completion event\n");
1301
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05301302 mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001303 /* Trigger callback to clients */
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +05301304 mhi_dev_trigger_cb(ch_id);
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301305 mhi_uci_chan_state_notify(mhi, ch_id, MHI_STATE_CONNECTED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001306 break;
1307 case MHI_DEV_RING_EL_STOP:
1308 if (ch_id >= HW_CHANNEL_BASE) {
1309 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001310 if (rc)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001311 mhi_log(MHI_MSG_ERROR,
1312 "send channel stop cmd event failed\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001313
1314 /* send the completion event to the host */
1315 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
1316 (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1317 (sizeof(union mhi_dev_ring_element_type)));
1318 event.evt_cmd_comp.type =
1319 MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
1320 if (rc == 0)
1321 event.evt_cmd_comp.code =
1322 MHI_CMD_COMPL_CODE_SUCCESS;
1323 else
1324 event.evt_cmd_comp.code =
1325 MHI_CMD_COMPL_CODE_UNDEFINED;
1326
1327 rc = mhi_dev_send_event(mhi, 0, &event);
1328 if (rc) {
1329 pr_err("stop event send failed\n");
1330 return;
1331 }
1332 } else {
1333 /*
1334 * Check if there are any pending transactions for the
1335 * ring associated with the channel. If no, proceed to
1336 * write disable the channel state else send stop
1337 * channel command to check if one can suspend the
1338 * command.
1339 */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001340 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1341 if (ring->state == RING_STATE_UINT) {
1342 pr_err("Channel not opened for %d\n", ch_id);
1343 return;
1344 }
1345
1346 ch = &mhi->ch[ch_id];
1347
1348 mutex_lock(&ch->ch_lock);
1349
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001350 mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP;
1351 rc = mhi_dev_process_stop_cmd(
1352 &mhi->ring[mhi->ch_ring_start + ch_id],
1353 ch_id, mhi);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001354 if (rc)
1355 pr_err("stop event send failed\n");
1356
1357 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001358 mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301359 /* Trigger callback to clients */
1360 mhi_dev_trigger_cb(ch_id);
1361 mhi_uci_chan_state_notify(mhi, ch_id,
1362 MHI_STATE_DISCONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001363 }
1364 break;
1365 case MHI_DEV_RING_EL_RESET:
1366 mhi_log(MHI_MSG_VERBOSE,
1367 "received reset cmd for channel %d\n", ch_id);
1368 if (ch_id >= HW_CHANNEL_BASE) {
1369 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001370 if (rc)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001371 mhi_log(MHI_MSG_ERROR,
1372 "send channel stop cmd event failed\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001373
1374 /* send the completion event to the host */
1375 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
1376 (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1377 (sizeof(union mhi_dev_ring_element_type)));
1378 event.evt_cmd_comp.type =
1379 MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
1380 if (rc == 0)
1381 event.evt_cmd_comp.code =
1382 MHI_CMD_COMPL_CODE_SUCCESS;
1383 else
1384 event.evt_cmd_comp.code =
1385 MHI_CMD_COMPL_CODE_UNDEFINED;
1386
1387 rc = mhi_dev_send_event(mhi, 0, &event);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001388 if (rc) {
1389 pr_err("stop event send failed\n");
1390 return;
1391 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001392 } else {
1393
1394 mhi_log(MHI_MSG_VERBOSE,
1395 "received reset cmd for channel %d\n",
1396 ch_id);
1397
1398 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1399 if (ring->state == RING_STATE_UINT) {
1400 pr_err("Channel not opened for %d\n", ch_id);
1401 return;
1402 }
1403
1404 ch = &mhi->ch[ch_id];
1405
1406 mutex_lock(&ch->ch_lock);
1407
1408 /* hard stop and set the channel to stop */
1409 mhi->ch_ctx_cache[ch_id].ch_state =
1410 MHI_DEV_CH_STATE_DISABLED;
1411 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
1412 if (mhi->use_ipa)
1413 host_addr.host_pa =
1414 mhi->ch_ctx_shadow.host_pa +
1415 (sizeof(struct mhi_dev_ch_ctx) * ch_id);
1416 else
1417 host_addr.device_va =
1418 mhi->ch_ctx_shadow.device_va +
1419 (sizeof(struct mhi_dev_ch_ctx) * ch_id);
1420
1421 host_addr.virt_addr =
1422 &mhi->ch_ctx_cache[ch_id].ch_state;
1423 host_addr.size = sizeof(enum mhi_dev_ch_ctx_state);
1424
1425 /* update the channel state in the host */
1426 mhi_dev_write_to_host(mhi, &host_addr, NULL,
1427 MHI_DEV_DMA_SYNC);
1428
1429 /* send the completion event to the host */
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001430 rc = mhi_dev_send_cmd_comp_event(mhi,
1431 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001432 if (rc)
1433 pr_err("Error sending command completion event\n");
1434 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001435 mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301436 mhi_dev_trigger_cb(ch_id);
1437 mhi_uci_chan_state_notify(mhi, ch_id,
1438 MHI_STATE_DISCONNECTED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001439 }
1440 break;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001441 default:
1442 pr_err("%s: Invalid command:%d\n", __func__, el->generic.type);
1443 break;
1444 }
1445}
1446
1447static void mhi_dev_process_tre_ring(struct mhi_dev *mhi,
1448 union mhi_dev_ring_element_type *el, void *ctx)
1449{
1450 struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx;
1451 struct mhi_dev_channel *ch;
1452 struct mhi_dev_client_cb_reason reason;
1453
1454 if (ring->id < mhi->ch_ring_start) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001455 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001456 "invalid channel ring id (%d), should be < %d\n",
1457 ring->id, mhi->ch_ring_start);
1458 return;
1459 }
1460
1461 ch = &mhi->ch[ring->id - mhi->ch_ring_start];
1462 reason.ch_id = ch->ch_id;
1463 reason.reason = MHI_DEV_TRE_AVAILABLE;
1464
1465 /* Invoke a callback to let the client know its data is ready.
1466 * Copy this event to the clients context so that it can be
1467 * sent out once the client has fetch the data. Update the rp
1468 * before sending the data as part of the event completion
1469 */
1470 if (ch->active_client && ch->active_client->event_trigger != NULL)
1471 ch->active_client->event_trigger(&reason);
1472}
1473
1474static void mhi_dev_process_ring_pending(struct work_struct *work)
1475{
1476 struct mhi_dev *mhi = container_of(work,
1477 struct mhi_dev, pending_work);
1478 struct list_head *cp, *q;
1479 struct mhi_dev_ring *ring;
1480 struct mhi_dev_channel *ch;
1481 int rc = 0;
1482
1483 mutex_lock(&mhi_ctx->mhi_lock);
1484 rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]);
1485 if (rc) {
1486 mhi_log(MHI_MSG_ERROR, "error processing command ring\n");
1487 goto exit;
1488 }
1489
1490 list_for_each_safe(cp, q, &mhi->process_ring_list) {
1491 ring = list_entry(cp, struct mhi_dev_ring, list);
1492 list_del(cp);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001493 mhi_log(MHI_MSG_VERBOSE, "processing ring %d\n", ring->id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001494 rc = mhi_dev_process_ring(ring);
1495 if (rc) {
1496 mhi_log(MHI_MSG_ERROR,
1497 "error processing ring %d\n", ring->id);
1498 goto exit;
1499 }
1500
1501 if (ring->id < mhi->ch_ring_start) {
1502 mhi_log(MHI_MSG_ERROR,
1503 "ring (%d) is not a channel ring\n", ring->id);
1504 goto exit;
1505 }
1506
1507 ch = &mhi->ch[ring->id - mhi->ch_ring_start];
1508 rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id);
1509 if (rc) {
1510 mhi_log(MHI_MSG_ERROR,
1511 "error enabling chdb interrupt for %d\n", ch->ch_id);
1512 goto exit;
1513 }
1514 }
1515
1516exit:
1517 mutex_unlock(&mhi_ctx->mhi_lock);
1518}
1519
1520static int mhi_dev_get_event_notify(enum mhi_dev_state state,
1521 enum mhi_dev_event *event)
1522{
1523 int rc = 0;
1524
1525 switch (state) {
1526 case MHI_DEV_M0_STATE:
1527 *event = MHI_DEV_EVENT_M0_STATE;
1528 break;
1529 case MHI_DEV_M1_STATE:
1530 *event = MHI_DEV_EVENT_M1_STATE;
1531 break;
1532 case MHI_DEV_M2_STATE:
1533 *event = MHI_DEV_EVENT_M2_STATE;
1534 break;
1535 case MHI_DEV_M3_STATE:
1536 *event = MHI_DEV_EVENT_M3_STATE;
1537 break;
1538 default:
1539 rc = -EINVAL;
1540 break;
1541 }
1542
1543 return rc;
1544}
1545
1546static void mhi_dev_queue_channel_db(struct mhi_dev *mhi,
1547 uint32_t chintr_value, uint32_t ch_num)
1548{
1549 struct mhi_dev_ring *ring;
1550 int rc = 0;
1551
1552 for (; chintr_value; ch_num++, chintr_value >>= 1) {
1553 if (chintr_value & 1) {
1554 ring = &mhi->ring[ch_num + mhi->ch_ring_start];
1555 if (ring->state == RING_STATE_UINT) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001556 pr_debug("Channel not opened for %d\n", ch_num);
Siva Kumar Akkireddi3d6d73d2018-10-30 15:23:41 +05301557 continue;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001558 }
1559 mhi_ring_set_state(ring, RING_STATE_PENDING);
1560 list_add(&ring->list, &mhi->process_ring_list);
1561 rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num);
1562 if (rc) {
1563 pr_err("Error disabling chdb\n");
1564 return;
1565 }
1566 queue_work(mhi->pending_ring_wq, &mhi->pending_work);
1567 }
1568 }
1569}
1570
1571static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi)
1572{
1573 int i, rc = 0;
1574 uint32_t chintr_value = 0, ch_num = 0;
1575
1576 rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi);
1577 if (rc) {
1578 pr_err("Read channel db\n");
1579 return;
1580 }
1581
1582 for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
1583 ch_num = i * MHI_MASK_CH_EV_LEN;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001584 /* Process channel status whose mask is enabled */
1585 chintr_value = (mhi->chdb[i].status & mhi->chdb[i].mask);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001586 if (chintr_value) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001587 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001588 "processing id: %d, ch interrupt 0x%x\n",
1589 i, chintr_value);
1590 mhi_dev_queue_channel_db(mhi, chintr_value, ch_num);
1591 rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i),
1592 mhi->chdb[i].status);
1593 if (rc) {
1594 pr_err("Error writing interrupt clear for A7\n");
1595 return;
1596 }
1597 }
1598 }
1599}
1600
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301601static void mhi_update_state_info_all(enum mhi_ctrl_info info)
1602{
1603 int i;
1604 struct mhi_dev_client_cb_reason reason;
1605
1606 mhi_ctx->ctrl_info = info;
Siddartha Mohanadoss32b02492020-03-19 22:12:00 -07001607 for (i = 0; i < MHI_MAX_SOFTWARE_CHANNELS; ++i) {
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301608 channel_state_info[i].ctrl_info = info;
Siva Kumar Akkireddi8ca68e62019-01-16 21:38:13 +05301609 /* Notify kernel clients */
1610 mhi_dev_trigger_cb(i);
1611 }
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301612
1613 /* For legacy reasons for QTI client */
1614 reason.reason = MHI_DEV_CTRL_UPDATE;
1615 uci_ctrl_update(&reason);
1616}
1617
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001618static int mhi_dev_abort(struct mhi_dev *mhi)
1619{
1620 struct mhi_dev_channel *ch;
1621 struct mhi_dev_ring *ring;
1622 int ch_id = 0, rc = 0;
1623
1624 /* Hard stop all the channels */
1625 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1626 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1627 if (ring->state == RING_STATE_UINT)
1628 continue;
1629
1630 ch = &mhi->ch[ch_id];
1631 mutex_lock(&ch->ch_lock);
1632 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
1633 mutex_unlock(&ch->ch_lock);
1634 }
1635
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301636 /* Update channel state and notify clients */
1637 mhi_update_state_info_all(MHI_STATE_DISCONNECTED);
1638 mhi_uci_chan_state_notify_all(mhi, MHI_STATE_DISCONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001639
1640 flush_workqueue(mhi->ring_init_wq);
1641 flush_workqueue(mhi->pending_ring_wq);
1642
1643 /* Initiate MHI IPA reset */
1644 ipa_mhi_destroy();
1645
1646 /* Clean up initialized channels */
1647 rc = mhi_deinit(mhi);
1648 if (rc) {
1649 pr_err("Error during mhi_deinit with %d\n", rc);
1650 return rc;
1651 }
1652
1653 rc = mhi_dev_mmio_mask_chdb_interrupts(mhi_ctx);
1654 if (rc) {
1655 pr_err("Failed to enable channel db\n");
1656 return rc;
1657 }
1658
1659 rc = mhi_dev_mmio_disable_ctrl_interrupt(mhi_ctx);
1660 if (rc) {
1661 pr_err("Failed to enable control interrupt\n");
1662 return rc;
1663 }
1664
1665 rc = mhi_dev_mmio_disable_cmdb_interrupt(mhi_ctx);
1666 if (rc) {
1667 pr_err("Failed to enable command db\n");
1668 return rc;
1669 }
1670
1671
1672 atomic_set(&mhi_ctx->re_init_done, 0);
1673
1674 mhi_log(MHI_MSG_INFO,
1675 "Register a PCIe callback during re-init\n");
1676 mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP;
1677 mhi_ctx->event_reg.user = mhi_ctx;
1678 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
1679 mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up;
1680 mhi_ctx->event_reg.options = MHI_REINIT;
1681
1682 rc = ep_pcie_register_event(mhi_ctx->phandle,
1683 &mhi_ctx->event_reg);
1684 if (rc) {
1685 pr_err("Failed to register for events from PCIe\n");
1686 return rc;
1687 }
1688
1689 /* Set RESET field to 0 */
1690 mhi_dev_mmio_reset(mhi_ctx);
1691
1692 return rc;
1693}
1694
1695static void mhi_dev_transfer_completion_cb(void *mreq)
1696{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001697 int rc = 0;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301698 struct mhi_req *req = mreq;
1699 struct mhi_dev_channel *ch = req->client->channel;
1700 u32 snd_cmpl = req->snd_cmpl;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001701
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301702 if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type ==
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05301703 MHI_DEV_CH_TYPE_INBOUND_CHANNEL)
1704 ch->pend_wr_count--;
1705
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001706 dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
1707 req->len, DMA_FROM_DEVICE);
1708
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05301709 /*
1710 * Channel got closed with transfers pending
1711 * Do not trigger callback or send cmpl to host
1712 */
1713 if (ch->state == MHI_DEV_CH_CLOSED) {
1714 mhi_log(MHI_MSG_DBG, "Ch %d closed with %d writes pending\n",
1715 ch->ch_id, ch->pend_wr_count + 1);
1716 return;
1717 }
1718
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001719 /* Trigger client call back */
1720 req->client_cb(req);
1721
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301722 /* Flush read completions to host */
1723 if (snd_cmpl && mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type ==
1724 MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
1725 mhi_log(MHI_MSG_DBG, "Calling flush for ch %d\n", ch->ch_id);
1726 rc = mhi_dev_flush_transfer_completion_events(mhi_ctx, ch);
1727 if (rc) {
1728 mhi_log(MHI_MSG_ERROR,
1729 "Failed to flush read completions to host\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001730 }
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301731 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001732
1733 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
1734 ch->state = MHI_DEV_CH_STOPPED;
1735 rc = mhi_dev_process_stop_cmd(ch->ring, ch->ch_id, mhi_ctx);
1736 if (rc)
1737 mhi_log(MHI_MSG_ERROR,
1738 "Error while stopping channel (%d)\n", ch->ch_id);
1739 }
1740}
1741
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001742static void mhi_dev_scheduler(struct work_struct *work)
1743{
1744 struct mhi_dev *mhi = container_of(work,
1745 struct mhi_dev, chdb_ctrl_work);
1746 int rc = 0;
1747 uint32_t int_value = 0;
1748 struct mhi_dev_ring *ring;
1749 enum mhi_dev_state state;
1750 enum mhi_dev_event event = 0;
Siddartha Mohanadosse81dee92018-12-06 18:22:28 -08001751 u32 mhi_reset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001752
1753 mutex_lock(&mhi_ctx->mhi_lock);
1754 /* Check for interrupts */
1755 mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value);
1756
1757 if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001758 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001759 "processing ctrl interrupt with %d\n", int_value);
Siva Kumar Akkireddi4e388212019-02-05 19:53:19 +05301760
1761 rc = mhi_dev_mmio_read(mhi, BHI_IMGTXDB, &bhi_imgtxdb);
1762 mhi_log(MHI_MSG_DBG, "BHI_IMGTXDB = 0x%x\n", bhi_imgtxdb);
1763
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001764 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001765 if (rc) {
1766 pr_err("%s: get mhi state failed\n", __func__);
1767 mutex_unlock(&mhi_ctx->mhi_lock);
1768 return;
1769 }
1770
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001771 if (mhi_reset) {
1772 mhi_log(MHI_MSG_VERBOSE,
1773 "processing mhi device reset\n");
1774 rc = mhi_dev_abort(mhi);
1775 if (rc)
1776 pr_err("device reset failed:%d\n", rc);
1777 mutex_unlock(&mhi_ctx->mhi_lock);
1778 queue_work(mhi->ring_init_wq, &mhi->re_init);
1779 return;
1780 }
1781
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001782 rc = mhi_dev_get_event_notify(state, &event);
1783 if (rc) {
1784 pr_err("unsupported state :%d\n", state);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001785 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001786 }
1787
1788 rc = mhi_dev_notify_sm_event(event);
1789 if (rc) {
1790 pr_err("error sending SM event\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001791 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001792 }
1793 }
1794
1795 if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001796 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001797 "processing cmd db interrupt with %d\n", int_value);
1798 ring = &mhi->ring[MHI_RING_CMD_ID];
1799 ring->state = RING_STATE_PENDING;
1800 queue_work(mhi->pending_ring_wq, &mhi->pending_work);
1801 }
1802
1803 /* get the specific channel interrupts */
1804 mhi_dev_check_channel_interrupt(mhi);
1805
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001806fail:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001807 mutex_unlock(&mhi_ctx->mhi_lock);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001808
1809 if (mhi->config_iatu || mhi->mhi_int)
1810 enable_irq(mhi->mhi_irq);
1811 else
1812 ep_pcie_mask_irq_event(mhi->phandle,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001813 EP_PCIE_INT_EVT_MHI_A7, true);
1814}
1815
1816void mhi_dev_notify_a7_event(struct mhi_dev *mhi)
1817{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001818
1819 if (!atomic_read(&mhi->mhi_dev_wake)) {
1820 pm_stay_awake(mhi->dev);
1821 atomic_set(&mhi->mhi_dev_wake, 1);
1822 }
1823 mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock\n");
1824
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001825 schedule_work(&mhi->chdb_ctrl_work);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001826 mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001827}
1828EXPORT_SYMBOL(mhi_dev_notify_a7_event);
1829
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001830static irqreturn_t mhi_dev_isr(int irq, void *dev_id)
1831{
1832 struct mhi_dev *mhi = dev_id;
1833
Rama Krishna Phani A62cedde2018-06-11 19:57:00 +05301834 if (!atomic_read(&mhi->mhi_dev_wake)) {
1835 pm_stay_awake(mhi->dev);
1836 atomic_set(&mhi->mhi_dev_wake, 1);
1837 mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock in ISR\n");
1838 }
1839
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001840 disable_irq_nosync(mhi->mhi_irq);
1841 schedule_work(&mhi->chdb_ctrl_work);
1842 mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
1843
1844 return IRQ_HANDLED;
1845}
1846
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001847int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi)
1848{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001849 struct ep_pcie_iatu control, data;
1850 int rc = 0;
1851 struct ep_pcie_iatu entries[MHI_HOST_REGION_NUM];
1852
1853 data.start = mhi->data_base.device_pa;
1854 data.end = mhi->data_base.device_pa + mhi->data_base.size - 1;
1855 data.tgt_lower = HOST_ADDR_LSB(mhi->data_base.host_pa);
1856 data.tgt_upper = HOST_ADDR_MSB(mhi->data_base.host_pa);
1857
1858 control.start = mhi->ctrl_base.device_pa;
1859 control.end = mhi->ctrl_base.device_pa + mhi->ctrl_base.size - 1;
1860 control.tgt_lower = HOST_ADDR_LSB(mhi->ctrl_base.host_pa);
1861 control.tgt_upper = HOST_ADDR_MSB(mhi->ctrl_base.host_pa);
1862
1863 entries[0] = data;
1864 entries[1] = control;
1865
1866 rc = ep_pcie_config_outbound_iatu(mhi_ctx->phandle, entries,
1867 MHI_HOST_REGION_NUM);
1868 if (rc) {
1869 pr_err("error configure iATU\n");
1870 return rc;
1871 }
1872
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001873 return 0;
1874}
1875EXPORT_SYMBOL(mhi_dev_config_outbound_iatu);
1876
1877static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
1878{
1879 int rc = 0;
1880 struct platform_device *pdev;
1881 uint64_t addr1 = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001882 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001883
1884 pdev = mhi->pdev;
1885
1886 /* Get host memory region configuration */
1887 mhi_dev_get_mhi_addr(mhi);
1888
1889 mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb,
1890 mhi->host_addr.ctrl_base_msb);
1891 mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb,
1892 mhi->host_addr.data_base_msb);
1893
1894 addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb,
1895 mhi->host_addr.ctrl_limit_msb);
1896 mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa;
1897 addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb,
1898 mhi->host_addr.data_limit_msb);
1899 mhi->data_base.size = addr1 - mhi->data_base.host_pa;
1900
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001901 if (mhi->config_iatu) {
1902 if (mhi->ctrl_base.host_pa > mhi->data_base.host_pa) {
1903 mhi->data_base.device_pa = mhi->device_local_pa_base;
1904 mhi->ctrl_base.device_pa = mhi->device_local_pa_base +
1905 mhi->ctrl_base.host_pa - mhi->data_base.host_pa;
1906 } else {
1907 mhi->ctrl_base.device_pa = mhi->device_local_pa_base;
1908 mhi->data_base.device_pa = mhi->device_local_pa_base +
1909 mhi->data_base.host_pa - mhi->ctrl_base.host_pa;
1910 }
1911
1912 if (!mhi->use_ipa) {
1913 mhi->ctrl_base.device_va =
1914 (uintptr_t) devm_ioremap_nocache(&pdev->dev,
1915 mhi->ctrl_base.device_pa,
1916 mhi->ctrl_base.size);
1917 if (!mhi->ctrl_base.device_va) {
1918 pr_err("io remap failed for mhi address\n");
1919 return -EINVAL;
1920 }
1921 }
1922 }
1923
1924 if (mhi->config_iatu) {
1925 rc = mhi_dev_config_outbound_iatu(mhi);
1926 if (rc) {
1927 pr_err("Configuring iATU failed\n");
1928 return rc;
1929 }
1930 }
1931
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001932 /* Get Channel, event and command context base pointer */
1933 rc = mhi_dev_mmio_get_chc_base(mhi);
1934 if (rc) {
1935 pr_err("Fetching channel context failed\n");
1936 return rc;
1937 }
1938
1939 rc = mhi_dev_mmio_get_erc_base(mhi);
1940 if (rc) {
1941 pr_err("Fetching event ring context failed\n");
1942 return rc;
1943 }
1944
1945 rc = mhi_dev_mmio_get_crc_base(mhi);
1946 if (rc) {
1947 pr_err("Fetching command ring context failed\n");
1948 return rc;
1949 }
1950
1951 rc = mhi_dev_update_ner(mhi);
1952 if (rc) {
1953 pr_err("Fetching NER failed\n");
1954 return rc;
1955 }
1956
1957 mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx);
1958 mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) *
1959 mhi->cfg.event_rings;
1960 mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
1961 mhi->cfg.channels;
1962
1963 mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
1964 sizeof(struct mhi_dev_cmd_ctx),
1965 &mhi->cmd_ctx_cache_dma_handle,
1966 GFP_KERNEL);
1967 if (!mhi->cmd_ctx_cache) {
1968 pr_err("no memory while allocating cmd ctx\n");
1969 return -ENOMEM;
1970 }
1971 memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx));
1972
1973 mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
1974 sizeof(struct mhi_dev_ev_ctx) *
1975 mhi->cfg.event_rings,
1976 &mhi->ev_ctx_cache_dma_handle,
1977 GFP_KERNEL);
1978 if (!mhi->ev_ctx_cache)
1979 return -ENOMEM;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001980 memset(mhi->ev_ctx_cache, 0, sizeof(struct mhi_dev_ev_ctx) *
1981 mhi->cfg.event_rings);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001982
1983 mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
1984 sizeof(struct mhi_dev_ch_ctx) *
1985 mhi->cfg.channels,
1986 &mhi->ch_ctx_cache_dma_handle,
1987 GFP_KERNEL);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001988 if (!mhi->ch_ctx_cache)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001989 return -ENOMEM;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001990 memset(mhi->ch_ctx_cache, 0, sizeof(struct mhi_dev_ch_ctx) *
1991 mhi->cfg.channels);
1992
1993 if (mhi->use_ipa) {
1994 data_transfer.phy_addr = mhi->cmd_ctx_cache_dma_handle;
1995 data_transfer.host_pa = mhi->cmd_ctx_shadow.host_pa;
1996 }
1997
1998 data_transfer.size = mhi->cmd_ctx_shadow.size;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001999
2000 /* Cache the command and event context */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002001 mhi_dev_read_from_host(mhi, &data_transfer);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002002
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002003 if (mhi->use_ipa) {
2004 data_transfer.phy_addr = mhi->ev_ctx_cache_dma_handle;
2005 data_transfer.host_pa = mhi->ev_ctx_shadow.host_pa;
2006 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002007
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002008 data_transfer.size = mhi->ev_ctx_shadow.size;
2009
2010 mhi_dev_read_from_host(mhi, &data_transfer);
2011
2012 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002013 "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
2014 mhi->cmd_ctx_cache->rbase,
2015 mhi->cmd_ctx_cache->rp,
2016 mhi->cmd_ctx_cache->wp);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002017 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002018 "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
2019 mhi_ctx->ev_ctx_cache->rbase,
2020 mhi->ev_ctx_cache->rp,
2021 mhi->ev_ctx_cache->wp);
2022
2023 rc = mhi_ring_start(&mhi->ring[0],
2024 (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);
2025 if (rc) {
2026 pr_err("error in ring start\n");
2027 return rc;
2028 }
2029
2030 return 0;
2031}
2032
2033int mhi_dev_suspend(struct mhi_dev *mhi)
2034{
2035 int ch_id = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002036 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002037
2038 mutex_lock(&mhi_ctx->mhi_write_test);
2039 atomic_set(&mhi->is_suspended, 1);
2040
2041 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
2042 if (mhi->ch_ctx_cache[ch_id].ch_state !=
2043 MHI_DEV_CH_STATE_RUNNING)
2044 continue;
2045
2046 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED;
2047
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002048 if (mhi->use_ipa) {
2049 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002050 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002051 } else {
2052 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
2053 sizeof(struct mhi_dev_ch_ctx) * ch_id;
2054 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
2055 sizeof(struct mhi_dev_ch_ctx) * ch_id;
2056 }
2057
2058 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
2059 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002060
2061 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002062 mhi_dev_write_to_host(mhi, &data_transfer, NULL,
2063 MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002064
2065 }
2066
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002067 atomic_set(&mhi->mhi_dev_wake, 0);
2068 pm_relax(mhi->dev);
2069 mhi_log(MHI_MSG_VERBOSE, "releasing mhi wakelock\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002070
2071 mutex_unlock(&mhi_ctx->mhi_write_test);
2072
2073 return rc;
2074}
2075EXPORT_SYMBOL(mhi_dev_suspend);
2076
2077int mhi_dev_resume(struct mhi_dev *mhi)
2078{
2079 int ch_id = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002080 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002081
2082 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
2083 if (mhi->ch_ctx_cache[ch_id].ch_state !=
2084 MHI_DEV_CH_STATE_SUSPENDED)
2085 continue;
2086
2087 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002088 if (mhi->use_ipa) {
2089 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002090 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002091 } else {
2092 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
2093 sizeof(struct mhi_dev_ch_ctx) * ch_id;
2094 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
2095 sizeof(struct mhi_dev_ch_ctx) * ch_id;
2096 }
2097
2098 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
2099 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002100
2101 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002102 mhi_dev_write_to_host(mhi, &data_transfer, NULL,
2103 MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002104 }
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002105 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002106
2107 atomic_set(&mhi->is_suspended, 0);
2108
2109 return rc;
2110}
2111EXPORT_SYMBOL(mhi_dev_resume);
2112
2113static int mhi_dev_ring_init(struct mhi_dev *dev)
2114{
2115 int i = 0;
2116
2117 mhi_log(MHI_MSG_INFO, "initializing all rings");
2118 dev->cmd_ring_idx = 0;
2119 dev->ev_ring_start = 1;
2120 dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings;
2121
2122 /* Initialize CMD ring */
2123 mhi_ring_init(&dev->ring[dev->cmd_ring_idx],
2124 RING_TYPE_CMD, dev->cmd_ring_idx);
2125
2126 mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx],
2127 mhi_dev_process_cmd_ring);
2128
2129 /* Initialize Event ring */
2130 for (i = dev->ev_ring_start; i < (dev->cfg.event_rings
2131 + dev->ev_ring_start); i++)
2132 mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i);
2133
2134 /* Initialize CH */
2135 for (i = dev->ch_ring_start; i < (dev->cfg.channels
2136 + dev->ch_ring_start); i++) {
2137 mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i);
2138 mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring);
2139 }
2140
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302141 return 0;
2142}
2143
2144static uint32_t mhi_dev_get_evt_ring_size(struct mhi_dev *mhi, uint32_t ch_id)
2145{
2146 uint32_t info;
2147 int rc;
2148
2149 /* If channel was started by host, get event ring size */
2150 rc = mhi_ctrl_state_info(ch_id, &info);
2151 if (rc || (info != MHI_STATE_CONNECTED))
2152 return NUM_TR_EVENTS_DEFAULT;
2153
2154 return mhi->ring[mhi->ev_ring_start +
2155 mhi->ch_ctx_cache[ch_id].err_indx].ring_size;
2156}
2157
2158static int mhi_dev_alloc_evt_buf_evt_req(struct mhi_dev *mhi,
2159 struct mhi_dev_channel *ch, struct mhi_dev_ring *evt_ring)
2160{
2161 int rc;
2162 uint32_t size, i;
2163
2164 if (evt_ring)
2165 size = evt_ring->ring_size;
2166 else
2167 size = mhi_dev_get_evt_ring_size(mhi, ch->ch_id);
2168
2169 if (!size) {
2170 mhi_log(MHI_MSG_ERROR,
2171 "Evt buf size is 0 for channel %d", ch->ch_id);
2172 return -EINVAL;
2173 }
2174
2175 /* Previous allocated evt buf size matches requested size */
2176 if (size == ch->evt_buf_size)
2177 return 0;
2178
2179 /*
2180 * Either evt buf and evt reqs were not allocated yet or
2181 * they were allocated with a different size
2182 */
2183 if (ch->evt_buf_size) {
2184 kfree(ch->ereqs);
2185 kfree(ch->tr_events);
2186 }
2187 /*
2188 * Set number of event flush req buffers equal to size of
2189 * event buf since in the worst case we may need to flush
2190 * every event ring element individually
2191 */
2192 ch->evt_buf_size = size;
2193 ch->evt_req_size = size;
2194
2195 mhi_log(MHI_MSG_INFO,
2196 "Channel %d evt buf size is %d\n", ch->ch_id, ch->evt_buf_size);
2197
2198 /* Allocate event requests */
2199 ch->ereqs = kcalloc(ch->evt_req_size, sizeof(*ch->ereqs), GFP_KERNEL);
2200 if (!ch->ereqs)
2201 return -ENOMEM;
2202
2203 /* Allocate buffers to queue transfer completion events */
2204 ch->tr_events = kcalloc(ch->evt_buf_size, sizeof(*ch->tr_events),
2205 GFP_KERNEL);
2206 if (!ch->tr_events) {
2207 rc = -ENOMEM;
2208 goto free_ereqs;
2209 }
2210
2211 /* Organize event flush requests into a linked list */
2212 INIT_LIST_HEAD(&ch->event_req_buffers);
2213 INIT_LIST_HEAD(&ch->flush_event_req_buffers);
2214 for (i = 0; i < ch->evt_req_size; ++i)
2215 list_add_tail(&ch->ereqs[i].list, &ch->event_req_buffers);
2216
2217 ch->curr_ereq =
2218 container_of(ch->event_req_buffers.next,
2219 struct event_req, list);
2220 list_del_init(&ch->curr_ereq->list);
2221 ch->curr_ereq->start = 0;
2222
2223 /*
2224 * Initialize cmpl event buffer indexes - evt_buf_rp and
2225 * evt_buf_wp point to the first and last free index available.
2226 */
2227 ch->evt_buf_rp = 0;
2228 ch->evt_buf_wp = ch->evt_buf_size - 1;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002229
2230 return 0;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302231
2232free_ereqs:
2233 kfree(ch->ereqs);
2234 ch->ereqs = NULL;
2235 ch->evt_buf_size = 0;
2236 ch->evt_req_size = 0;
2237
2238 return rc;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002239}
2240
2241int mhi_dev_open_channel(uint32_t chan_id,
2242 struct mhi_dev_client **handle_client,
2243 void (*mhi_dev_client_cb_reason)
2244 (struct mhi_dev_client_cb_reason *cb))
2245{
2246 int rc = 0;
2247 struct mhi_dev_channel *ch;
2248 struct platform_device *pdev;
2249
2250 pdev = mhi_ctx->pdev;
2251 ch = &mhi_ctx->ch[chan_id];
2252
2253 mutex_lock(&ch->ch_lock);
2254
2255 if (ch->active_client) {
2256 mhi_log(MHI_MSG_ERROR,
2257 "Channel (%d) already opened by client\n", chan_id);
2258 rc = -EINVAL;
2259 goto exit;
2260 }
2261
2262 /* Initialize the channel, client and state information */
2263 *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL);
2264 if (!(*handle_client)) {
2265 dev_err(&pdev->dev, "can not allocate mhi_dev memory\n");
2266 rc = -ENOMEM;
2267 goto exit;
2268 }
2269
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302270 rc = mhi_dev_alloc_evt_buf_evt_req(mhi_ctx, ch, NULL);
2271 if (rc)
Siva Kumar Akkireddi6c694d32018-06-01 20:22:30 +05302272 goto free_client;
Siva Kumar Akkireddi6c694d32018-06-01 20:22:30 +05302273
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002274 ch->active_client = (*handle_client);
2275 (*handle_client)->channel = ch;
2276 (*handle_client)->event_trigger = mhi_dev_client_cb_reason;
2277
2278 if (ch->state == MHI_DEV_CH_UNINT) {
2279 ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start];
2280 ch->state = MHI_DEV_CH_PENDING_START;
2281 } else if (ch->state == MHI_DEV_CH_CLOSED)
2282 ch->state = MHI_DEV_CH_STARTED;
2283 else if (ch->state == MHI_DEV_CH_STOPPED)
2284 ch->state = MHI_DEV_CH_PENDING_START;
2285
Siva Kumar Akkireddi6c694d32018-06-01 20:22:30 +05302286 goto exit;
2287
Siva Kumar Akkireddi6c694d32018-06-01 20:22:30 +05302288free_client:
2289 kfree(*handle_client);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302290 *handle_client = NULL;
2291
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002292exit:
2293 mutex_unlock(&ch->ch_lock);
2294 return rc;
2295}
2296EXPORT_SYMBOL(mhi_dev_open_channel);
2297
2298int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
2299{
2300 struct mhi_dev_channel *ch;
2301 int rc;
2302
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302303 if (!handle) {
2304 mhi_log(MHI_MSG_ERROR, "Invalid channel access\n");
2305 return -EINVAL;
2306 }
2307
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002308 ch = handle->channel;
Siva Kumar Akkireddiad598872018-12-03 19:17:54 +05302309 if (!ch)
2310 return -EINVAL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002311
2312 rc = ch->ring->rd_offset == ch->ring->wr_offset;
2313
2314 return rc;
2315}
2316EXPORT_SYMBOL(mhi_dev_channel_isempty);
2317
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302318bool mhi_dev_channel_has_pending_write(struct mhi_dev_client *handle)
2319{
2320 struct mhi_dev_channel *ch;
2321
2322 if (!handle) {
2323 mhi_log(MHI_MSG_ERROR, "Invalid channel access\n");
2324 return -EINVAL;
2325 }
2326
2327 ch = handle->channel;
2328 if (!ch)
2329 return -EINVAL;
2330
2331 return ch->pend_wr_count ? true : false;
2332}
2333EXPORT_SYMBOL(mhi_dev_channel_has_pending_write);
2334
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002335int mhi_dev_close_channel(struct mhi_dev_client *handle)
2336{
2337 struct mhi_dev_channel *ch;
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302338 int count = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002339 int rc = 0;
2340
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302341 if (!handle) {
2342 mhi_log(MHI_MSG_ERROR, "Invalid channel access:%d\n", -ENODEV);
2343 return -EINVAL;
2344 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002345 ch = handle->channel;
2346
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302347 do {
2348 if (ch->pend_wr_count) {
2349 usleep_range(MHI_DEV_CH_CLOSE_TIMEOUT_MIN,
2350 MHI_DEV_CH_CLOSE_TIMEOUT_MAX);
2351 } else
2352 break;
2353 } while (++count < MHI_DEV_CH_CLOSE_TIMEOUT_COUNT);
2354
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002355 mutex_lock(&ch->ch_lock);
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302356
2357 if (ch->pend_wr_count)
2358 mhi_log(MHI_MSG_ERROR, "%d writes pending for channel %d\n",
2359 ch->pend_wr_count, ch->ch_id);
2360
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002361 if (ch->state != MHI_DEV_CH_PENDING_START) {
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302362 if ((ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
2363 !mhi_dev_channel_isempty(handle)) || ch->tre_loc) {
2364 mhi_log(MHI_MSG_DBG,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002365 "Trying to close an active channel (%d)\n",
2366 ch->ch_id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002367 rc = -EAGAIN;
2368 goto exit;
2369 } else if (ch->tre_loc) {
2370 mhi_log(MHI_MSG_ERROR,
2371 "Trying to close channel (%d) when a TRE is active",
2372 ch->ch_id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002373 rc = -EAGAIN;
2374 goto exit;
2375 }
2376 }
2377
2378 ch->state = MHI_DEV_CH_CLOSED;
2379 ch->active_client = NULL;
Siva Kumar Akkireddi6c694d32018-06-01 20:22:30 +05302380 kfree(ch->ereqs);
2381 kfree(ch->tr_events);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302382 ch->evt_buf_size = 0;
2383 ch->evt_req_size = 0;
Siva Kumar Akkireddi6c694d32018-06-01 20:22:30 +05302384 ch->ereqs = NULL;
2385 ch->tr_events = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002386 kfree(handle);
2387exit:
2388 mutex_unlock(&ch->ch_lock);
2389 return rc;
2390}
2391EXPORT_SYMBOL(mhi_dev_close_channel);
2392
2393static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch,
2394 struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el,
2395 uint32_t *chain)
2396{
2397 uint32_t td_done = 0;
2398
2399 /*
2400 * A full TRE worth of data was consumed.
2401 * Check if we are at a TD boundary.
2402 */
2403 if (ch->tre_bytes_left == 0) {
2404 if (el->tre.chain) {
2405 if (el->tre.ieob)
2406 mhi_dev_send_completion_event(ch,
2407 ring->rd_offset, el->tre.len,
2408 MHI_CMD_COMPL_CODE_EOB);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002409 *chain = 1;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002410 } else {
2411 if (el->tre.ieot)
2412 mhi_dev_send_completion_event(
2413 ch, ring->rd_offset, el->tre.len,
2414 MHI_CMD_COMPL_CODE_EOT);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002415 td_done = 1;
2416 *chain = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002417 }
2418 mhi_dev_ring_inc_index(ring, ring->rd_offset);
2419 ch->tre_bytes_left = 0;
2420 ch->tre_loc = 0;
2421 }
2422
2423 return td_done;
2424}
2425
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002426int mhi_dev_read_channel(struct mhi_req *mreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002427{
2428 struct mhi_dev_channel *ch;
2429 struct mhi_dev_ring *ring;
2430 union mhi_dev_ring_element_type *el;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002431 size_t bytes_to_read, addr_offset;
2432 uint64_t read_from_loc;
2433 ssize_t bytes_read = 0;
2434 uint32_t write_to_loc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002435 size_t usr_buf_remaining;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002436 int td_done = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002437 struct mhi_dev_client *handle_client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002438
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002439 if (!mreq) {
2440 mhi_log(MHI_MSG_ERROR, "invalid mhi request\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002441 return -ENXIO;
2442 }
2443
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002444 if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2445 pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info);
2446 return -ENODEV;
2447 }
2448
2449 if (!mreq->client) {
2450 mhi_log(MHI_MSG_ERROR, "invalid mhi request\n");
2451 return -ENXIO;
2452 }
2453 handle_client = mreq->client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002454 ch = handle_client->channel;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002455 usr_buf_remaining = mreq->len;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002456 ring = ch->ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002457 mreq->chain = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002458
2459 mutex_lock(&ch->ch_lock);
2460
2461 do {
2462 el = &ring->ring_cache[ring->rd_offset];
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002463 mhi_log(MHI_MSG_VERBOSE, "evtptr : 0x%llx\n",
2464 el->tre.data_buf_ptr);
2465 mhi_log(MHI_MSG_VERBOSE, "evntlen : 0x%x, offset:%d\n",
2466 el->tre.len, ring->rd_offset);
2467
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002468 if (ch->tre_loc) {
2469 bytes_to_read = min(usr_buf_remaining,
2470 ch->tre_bytes_left);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002471 mreq->chain = 1;
2472 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002473 "remaining buffered data size %d\n",
2474 (int) ch->tre_bytes_left);
2475 } else {
2476 if (ring->rd_offset == ring->wr_offset) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002477 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002478 "nothing to read, returning\n");
2479 bytes_read = 0;
2480 goto exit;
2481 }
2482
2483 if (ch->state == MHI_DEV_CH_STOPPED) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002484 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002485 "channel (%d) already stopped\n",
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002486 mreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002487 bytes_read = -1;
2488 goto exit;
2489 }
2490
2491 ch->tre_loc = el->tre.data_buf_ptr;
2492 ch->tre_size = el->tre.len;
2493 ch->tre_bytes_left = ch->tre_size;
2494
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002495 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002496 "user_buf_remaining %d, ch->tre_size %d\n",
2497 usr_buf_remaining, ch->tre_size);
2498 bytes_to_read = min(usr_buf_remaining, ch->tre_size);
2499 }
2500
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002501 bytes_read += bytes_to_read;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002502 addr_offset = ch->tre_size - ch->tre_bytes_left;
2503 read_from_loc = ch->tre_loc + addr_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002504 write_to_loc = (uint32_t) mreq->buf +
2505 (mreq->len - usr_buf_remaining);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002506 ch->tre_bytes_left -= bytes_to_read;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002507 mreq->el = el;
Siva Kumar Akkireddifc10c672018-09-27 13:15:02 +05302508 mreq->transfer_len = bytes_to_read;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002509 mreq->rd_offset = ring->rd_offset;
2510 mhi_log(MHI_MSG_VERBOSE, "reading %d bytes from chan %d\n",
2511 bytes_to_read, mreq->chan);
2512 rc = mhi_transfer_host_to_device((void *) write_to_loc,
2513 read_from_loc, bytes_to_read, mhi_ctx, mreq);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002514 if (rc) {
2515 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002516 "Error while reading chan (%d) rc %d\n",
2517 mreq->chan, rc);
2518 mutex_unlock(&ch->ch_lock);
2519 return rc;
2520 }
2521 usr_buf_remaining -= bytes_to_read;
2522
2523 if (mreq->mode == IPA_DMA_ASYNC) {
2524 ch->tre_bytes_left = 0;
2525 ch->tre_loc = 0;
2526 goto exit;
2527 } else {
2528 td_done = mhi_dev_check_tre_bytes_left(ch, ring,
2529 el, &mreq->chain);
2530 }
2531 } while (usr_buf_remaining && !td_done);
2532 if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) {
2533 ch->state = MHI_DEV_CH_STOPPED;
2534 rc = mhi_dev_process_stop_cmd(ring, mreq->chan, mhi_ctx);
2535 if (rc) {
2536 mhi_log(MHI_MSG_ERROR,
2537 "Error while stopping channel (%d)\n",
2538 mreq->chan);
2539 bytes_read = -EIO;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002540 }
2541 }
2542exit:
2543 mutex_unlock(&ch->ch_lock);
2544 return bytes_read;
2545}
2546EXPORT_SYMBOL(mhi_dev_read_channel);
2547
2548static void skip_to_next_td(struct mhi_dev_channel *ch)
2549{
2550 struct mhi_dev_ring *ring = ch->ring;
2551 union mhi_dev_ring_element_type *el;
2552 uint32_t td_boundary_reached = 0;
2553
2554 ch->skip_td = 1;
2555 el = &ring->ring_cache[ring->rd_offset];
2556 while (ring->rd_offset != ring->wr_offset) {
2557 if (td_boundary_reached) {
2558 ch->skip_td = 0;
2559 break;
2560 }
2561 if (!el->tre.chain)
2562 td_boundary_reached = 1;
2563 mhi_dev_ring_inc_index(ring, ring->rd_offset);
2564 el = &ring->ring_cache[ring->rd_offset];
2565 }
2566}
2567
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002568int mhi_dev_write_channel(struct mhi_req *wreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002569{
2570 struct mhi_dev_channel *ch;
2571 struct mhi_dev_ring *ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002572 struct mhi_dev_client *handle_client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002573 union mhi_dev_ring_element_type *el;
2574 enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
2575 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002576 uint64_t skip_tres = 0, write_to_loc;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002577 uint32_t read_from_loc;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002578 size_t usr_buf_remaining;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002579 size_t usr_buf_offset = 0;
2580 size_t bytes_to_write = 0;
2581 size_t bytes_written = 0;
2582 uint32_t tre_len = 0, suspend_wait_timeout = 0;
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302583 bool async_wr_sched = false;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002584
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002585 if (!wreq || !wreq->client || !wreq->buf) {
2586 pr_err("%s: invalid parameters\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002587 return -ENXIO;
2588 }
2589
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002590 if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2591 pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info);
2592 return -ENODEV;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002593 }
2594
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002595 usr_buf_remaining = wreq->len;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002596 mutex_lock(&mhi_ctx->mhi_write_test);
2597
2598 if (atomic_read(&mhi_ctx->is_suspended)) {
2599 /*
2600 * Expected usage is when there is a write
2601 * to the MHI core -> notify SM.
2602 */
2603 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP);
2604 if (rc) {
2605 pr_err("error sending core wakeup event\n");
2606 mutex_unlock(&mhi_ctx->mhi_write_test);
2607 return rc;
2608 }
2609 }
2610
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002611 while (atomic_read(&mhi_ctx->is_suspended) &&
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07002612 suspend_wait_timeout < MHI_WAKEUP_TIMEOUT_CNT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002613 /* wait for the suspend to finish */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002614 msleep(MHI_SUSPEND_MIN);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002615 suspend_wait_timeout++;
2616 }
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07002617
2618 if (suspend_wait_timeout >= MHI_WAKEUP_TIMEOUT_CNT ||
2619 mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2620 pr_err("Failed to wake up core\n");
2621 mutex_unlock(&mhi_ctx->mhi_write_test);
2622 return -ENODEV;
2623 }
2624
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002625 handle_client = wreq->client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002626 ch = handle_client->channel;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002627
2628 ring = ch->ring;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002629
2630 mutex_lock(&ch->ch_lock);
2631
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302632 ch->pend_wr_count++;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002633 if (ch->state == MHI_DEV_CH_STOPPED) {
2634 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002635 "channel %d already stopped\n", wreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002636 bytes_written = -1;
2637 goto exit;
2638 }
2639
2640 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002641 if (mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx) < 0)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002642 bytes_written = -1;
2643 goto exit;
2644 }
2645
2646 if (ch->skip_td)
2647 skip_to_next_td(ch);
2648
2649 do {
2650 if (ring->rd_offset == ring->wr_offset) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002651 mhi_log(MHI_MSG_ERROR,
2652 "%s():rd & wr offsets are equal\n",
2653 __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002654 mhi_log(MHI_MSG_INFO, "No TREs available\n");
2655 break;
2656 }
2657
2658 el = &ring->ring_cache[ring->rd_offset];
2659 tre_len = el->tre.len;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002660 if (wreq->len > tre_len) {
2661 pr_err("%s(): rlen = %d, tlen = %d: client buf > tre len\n",
2662 __func__, wreq->len, tre_len);
2663 bytes_written = -ENOMEM;
2664 goto exit;
2665 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002666
2667 bytes_to_write = min(usr_buf_remaining, tre_len);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002668 usr_buf_offset = wreq->len - bytes_to_write;
2669 read_from_loc = (uint32_t) wreq->buf + usr_buf_offset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002670 write_to_loc = el->tre.data_buf_ptr;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002671 wreq->rd_offset = ring->rd_offset;
2672 wreq->el = el;
Siva Kumar Akkireddifc10c672018-09-27 13:15:02 +05302673 wreq->transfer_len = bytes_to_write;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002674 rc = mhi_transfer_device_to_host(write_to_loc,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002675 (void *) read_from_loc,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002676 bytes_to_write,
2677 mhi_ctx, wreq);
2678 if (rc) {
2679 mhi_log(MHI_MSG_ERROR,
2680 "Error while writing chan (%d) rc %d\n",
2681 wreq->chan, rc);
2682 goto exit;
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302683 } else if (wreq->mode == DMA_ASYNC)
2684 async_wr_sched = true;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002685 bytes_written += bytes_to_write;
2686 usr_buf_remaining -= bytes_to_write;
2687
2688 if (usr_buf_remaining) {
2689 if (!el->tre.chain)
2690 code = MHI_CMD_COMPL_CODE_OVERFLOW;
2691 else if (el->tre.ieob)
2692 code = MHI_CMD_COMPL_CODE_EOB;
2693 } else {
2694 if (el->tre.chain)
2695 skip_tres = 1;
2696 code = MHI_CMD_COMPL_CODE_EOT;
2697 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002698 if (wreq->mode == IPA_DMA_SYNC) {
2699 rc = mhi_dev_send_completion_event(ch,
2700 ring->rd_offset, bytes_to_write, code);
2701 if (rc)
2702 mhi_log(MHI_MSG_VERBOSE,
2703 "err in snding cmpl evt ch:%d\n",
2704 wreq->chan);
2705 mhi_dev_ring_inc_index(ring, ring->rd_offset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002706 }
2707
2708 if (ch->state == MHI_DEV_CH_PENDING_STOP)
2709 break;
2710
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002711 } while (!skip_tres && usr_buf_remaining);
2712
2713 if (skip_tres)
2714 skip_to_next_td(ch);
2715
2716 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002717 rc = mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002718 if (rc) {
2719 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002720 "channel %d stop failed\n", wreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002721 }
2722 }
2723exit:
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +05302724 if (wreq->mode == DMA_SYNC || !async_wr_sched)
2725 ch->pend_wr_count--;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002726 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002727 mutex_unlock(&mhi_ctx->mhi_write_test);
2728 return bytes_written;
2729}
2730EXPORT_SYMBOL(mhi_dev_write_channel);
2731
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05302732static int mhi_dev_recover(struct mhi_dev *mhi)
2733{
2734 int rc = 0;
2735 uint32_t syserr, max_cnt = 0, bhi_intvec = 0;
Siddartha Mohanadosse81dee92018-12-06 18:22:28 -08002736 u32 mhi_reset;
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05302737 enum mhi_dev_state state;
2738
2739 /* Check if MHI is in syserr */
2740 mhi_dev_mmio_masked_read(mhi, MHISTATUS,
2741 MHISTATUS_SYSERR_MASK,
2742 MHISTATUS_SYSERR_SHIFT, &syserr);
2743
2744 mhi_log(MHI_MSG_VERBOSE, "mhi_syserr = 0x%X\n", syserr);
2745 if (syserr) {
Siddartha Mohanadosse81dee92018-12-06 18:22:28 -08002746 /* Poll for the host to set the reset bit */
2747 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
2748 if (rc) {
2749 pr_err("%s: get mhi state failed\n", __func__);
2750 return rc;
2751 }
2752
2753 mhi_log(MHI_MSG_VERBOSE, "mhi_state = 0x%X, reset = %d\n",
2754 state, mhi_reset);
2755
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05302756 rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
2757 if (rc)
2758 return rc;
2759
2760 if (bhi_intvec != 0xffffffff) {
2761 /* Indicate the host that the device is ready */
2762 rc = ep_pcie_trigger_msi(mhi->phandle, bhi_intvec);
2763 if (rc) {
2764 pr_err("%s: error sending msi\n", __func__);
Siva Kumar Akkireddifb33bcb2019-03-01 19:42:50 +05302765 /*
2766 * MSIs are not enabled by host yet, set
2767 * mhistatus to syserr and exit.
2768 * Expected mhi host driver behaviour
2769 * is to check the device state and
2770 * issue a reset after it finds the device.
2771 */
2772 goto mask_intr;
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05302773 }
2774 }
2775
2776 /* Poll for the host to set the reset bit */
2777 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
2778 if (rc) {
2779 pr_err("%s: get mhi state failed\n", __func__);
2780 return rc;
2781 }
Siddartha Mohanadosse81dee92018-12-06 18:22:28 -08002782
2783 mhi_log(MHI_MSG_VERBOSE, "mhi_state = 0x%X, reset = %d\n",
2784 state, mhi_reset);
2785
2786 while (mhi_reset != 0x1 && max_cnt < MHI_SUSPEND_TIMEOUT) {
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05302787 /* Wait for Host to set the reset */
2788 msleep(MHI_SUSPEND_MIN);
2789 rc = mhi_dev_mmio_get_mhi_state(mhi, &state,
2790 &mhi_reset);
2791 if (rc) {
2792 pr_err("%s: get mhi state failed\n", __func__);
2793 return rc;
2794 }
2795 max_cnt++;
2796 }
2797
2798 if (!mhi_reset) {
2799 mhi_log(MHI_MSG_VERBOSE, "Host failed to set reset\n");
2800 return -EINVAL;
2801 }
2802 }
Siva Kumar Akkireddifb33bcb2019-03-01 19:42:50 +05302803mask_intr:
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05302804 /*
2805 * Now mask the interrupts so that the state machine moves
2806 * only after IPA is ready
2807 */
2808 mhi_dev_mmio_mask_interrupts(mhi);
2809 return 0;
2810}
2811
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002812static void mhi_dev_enable(struct work_struct *work)
2813{
2814 int rc = 0;
2815 struct ep_pcie_msi_config msi_cfg;
2816 struct mhi_dev *mhi = container_of(work,
2817 struct mhi_dev, ring_init_cb_work);
Siddartha Mohanadosse81dee92018-12-06 18:22:28 -08002818 u32 mhi_reset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002819 enum mhi_dev_state state;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002820 uint32_t max_cnt = 0, bhi_intvec = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002821
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002822 if (mhi->use_ipa) {
2823 rc = ipa_dma_init();
2824 if (rc) {
2825 pr_err("ipa dma init failed\n");
2826 return;
2827 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002828
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002829 rc = ipa_dma_enable();
2830 if (rc) {
2831 pr_err("ipa enable failed\n");
2832 return;
2833 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002834 }
2835
2836 rc = mhi_dev_ring_init(mhi);
2837 if (rc) {
2838 pr_err("MHI dev ring init failed\n");
2839 return;
2840 }
2841
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002842 rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
2843 if (rc)
2844 return;
2845
2846 if (bhi_intvec != 0xffffffff) {
2847 /* Indicate the host that the device is ready */
2848 rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg);
2849 if (!rc) {
2850 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, bhi_intvec);
2851 if (rc) {
2852 pr_err("%s: error sending msi\n", __func__);
2853 return;
2854 }
2855 } else {
2856 pr_err("MHI: error geting msi configs\n");
2857 }
2858 }
2859
2860 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002861 if (rc) {
2862 pr_err("%s: get mhi state failed\n", __func__);
2863 return;
2864 }
2865
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002866 while (state != MHI_DEV_M0_STATE && max_cnt < MHI_SUSPEND_TIMEOUT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002867 /* Wait for Host to set the M0 state */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002868 msleep(MHI_SUSPEND_MIN);
2869 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002870 if (rc) {
2871 pr_err("%s: get mhi state failed\n", __func__);
2872 return;
2873 }
2874 max_cnt++;
2875 }
2876
2877 mhi_log(MHI_MSG_INFO, "state:%d\n", state);
2878
2879 if (state == MHI_DEV_M0_STATE) {
2880 rc = mhi_dev_cache_host_cfg(mhi);
2881 if (rc) {
2882 pr_err("Failed to cache the host config\n");
2883 return;
2884 }
2885
2886 rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
2887 if (rc) {
2888 pr_err("%s: env setting failed\n", __func__);
2889 return;
2890 }
2891 } else {
2892 pr_err("MHI device failed to enter M0\n");
2893 return;
2894 }
2895
2896 rc = mhi_hwc_init(mhi_ctx);
2897 if (rc) {
2898 pr_err("error during hwc_init\n");
2899 return;
2900 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002901
Rama Krishna Phani A69494ec2018-06-05 19:15:56 +05302902 if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
2903 mhi_ctx->mhi_int_en = true;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002904 enable_irq(mhi_ctx->mhi_irq);
Rama Krishna Phani A69494ec2018-06-05 19:15:56 +05302905 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002906
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002907 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05302908
2909 /*Enable MHI dev network stack Interface*/
2910 rc = mhi_dev_net_interface_init();
2911 if (rc)
2912 pr_err("%s Failed to initialize mhi_dev_net iface\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002913}
2914
2915static void mhi_ring_init_cb(void *data)
2916{
2917 struct mhi_dev *mhi = data;
2918
2919 if (!mhi) {
2920 pr_err("Invalid MHI ctx\n");
2921 return;
2922 }
2923
2924 queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work);
2925}
2926
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002927int mhi_register_state_cb(void (*mhi_state_cb)
2928 (struct mhi_dev_client_cb_data *cb_data),
2929 void *data, enum mhi_client_channel channel)
2930{
2931 struct mhi_dev_ready_cb_info *cb_info = NULL;
2932
2933 if (!mhi_ctx) {
2934 pr_err("MHI device not ready\n");
2935 return -ENXIO;
2936 }
2937
Siddartha Mohanadoss32b02492020-03-19 22:12:00 -07002938 if (channel >= MHI_MAX_SOFTWARE_CHANNELS) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002939 pr_err("Invalid channel :%d\n", channel);
2940 return -EINVAL;
2941 }
2942
2943 mutex_lock(&mhi_ctx->mhi_lock);
2944 cb_info = kmalloc(sizeof(struct mhi_dev_ready_cb_info), GFP_KERNEL);
2945 if (!cb_info) {
2946 mutex_unlock(&mhi_ctx->mhi_lock);
2947 return -ENOMEM;
2948 }
2949
2950 cb_info->cb = mhi_state_cb;
2951 cb_info->cb_data.user_data = data;
2952 cb_info->cb_data.channel = channel;
2953
2954 list_add_tail(&cb_info->list, &mhi_ctx->client_cb_list);
2955
2956 /**
2957 * If channel is open during registration, no callback is issued.
2958 * Instead return -EEXIST to notify the client. Clients request
2959 * is added to the list to notify future state change notification.
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302960 * Channel struct may not be allocated yet if this function is called
2961 * early during boot - add an explicit check for non-null "ch".
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002962 */
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302963 if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002964 mutex_unlock(&mhi_ctx->mhi_lock);
2965 return -EEXIST;
2966 }
2967
2968 mutex_unlock(&mhi_ctx->mhi_lock);
2969
2970 return 0;
2971}
2972EXPORT_SYMBOL(mhi_register_state_cb);
2973
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002974static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002975{
2976 struct mhi_dev_client_cb_reason reason;
2977
Siddartha Mohanadoss32b02492020-03-19 22:12:00 -07002978 /* Currently no clients register for HW channel notify */
2979 if (uevent_idx >= MHI_MAX_SOFTWARE_CHANNELS)
2980 return;
2981
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002982 if (uevent_idx == MHI_DEV_UEVENT_CTRL)
2983 mhi_ctx->ctrl_info = info;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002984
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002985 channel_state_info[uevent_idx].ctrl_info = info;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002986
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002987 if (uevent_idx == MHI_CLIENT_QMI_OUT ||
2988 uevent_idx == MHI_CLIENT_QMI_IN) {
2989 /* For legacy reasons for QTI client */
2990 reason.reason = MHI_DEV_CTRL_UPDATE;
2991 uci_ctrl_update(&reason);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002992 }
2993
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002994}
2995
2996int mhi_ctrl_state_info(uint32_t idx, uint32_t *info)
2997{
2998 if (idx == MHI_DEV_UEVENT_CTRL)
2999 *info = mhi_ctx->ctrl_info;
3000 else
Siddartha Mohanadoss32b02492020-03-19 22:12:00 -07003001 if (idx < MHI_MAX_SOFTWARE_CHANNELS)
3002 *info = channel_state_info[idx].ctrl_info;
3003 else
3004 return -EINVAL;
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07003005
3006 mhi_log(MHI_MSG_VERBOSE, "idx:%d, ctrl:%d", idx, *info);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003007
3008 return 0;
3009}
3010EXPORT_SYMBOL(mhi_ctrl_state_info);
3011
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003012static int get_device_tree_data(struct platform_device *pdev)
3013{
3014 struct mhi_dev *mhi;
3015 int rc = 0;
3016 struct resource *res_mem = NULL;
3017
3018 mhi = devm_kzalloc(&pdev->dev,
3019 sizeof(struct mhi_dev), GFP_KERNEL);
3020 if (!mhi)
3021 return -ENOMEM;
3022
3023 mhi->pdev = pdev;
3024 mhi->dev = &pdev->dev;
3025 res_mem = platform_get_resource_byname(pdev,
3026 IORESOURCE_MEM, "mhi_mmio_base");
3027 if (!res_mem) {
3028 rc = -EINVAL;
3029 pr_err("Request MHI MMIO physical memory region failed\n");
3030 return rc;
3031 }
3032
3033 mhi->mmio_base_pa_addr = res_mem->start;
3034 mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE);
3035 if (!mhi->mmio_base_addr) {
3036 pr_err("Failed to IO map MMIO registers.\n");
3037 rc = -EINVAL;
3038 return rc;
3039 }
3040
3041 res_mem = platform_get_resource_byname(pdev,
3042 IORESOURCE_MEM, "ipa_uc_mbox_crdb");
3043 if (!res_mem) {
3044 rc = -EINVAL;
3045 pr_err("Request IPA_UC_MBOX CRDB physical region failed\n");
3046 return rc;
3047 }
3048
3049 mhi->ipa_uc_mbox_crdb = res_mem->start;
3050
3051 res_mem = platform_get_resource_byname(pdev,
3052 IORESOURCE_MEM, "ipa_uc_mbox_erdb");
3053 if (!res_mem) {
3054 rc = -EINVAL;
3055 pr_err("Request IPA_UC_MBOX ERDB physical region failed\n");
3056 return rc;
3057 }
3058
3059 mhi->ipa_uc_mbox_erdb = res_mem->start;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003060 mhi_ctx = mhi;
3061
3062 rc = of_property_read_u32((&pdev->dev)->of_node,
3063 "qcom,mhi-ifc-id",
3064 &mhi_ctx->ifc_id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003065 if (rc) {
3066 pr_err("qcom,mhi-ifc-id does not exist.\n");
3067 return rc;
3068 }
3069
3070 rc = of_property_read_u32((&pdev->dev)->of_node,
3071 "qcom,mhi-ep-msi",
3072 &mhi_ctx->mhi_ep_msi_num);
3073 if (rc) {
3074 pr_err("qcom,mhi-ep-msi does not exist.\n");
3075 return rc;
3076 }
3077
3078 rc = of_property_read_u32((&pdev->dev)->of_node,
3079 "qcom,mhi-version",
3080 &mhi_ctx->mhi_version);
3081 if (rc) {
3082 pr_err("qcom,mhi-version does not exist.\n");
3083 return rc;
3084 }
3085
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003086 mhi_ctx->use_ipa = of_property_read_bool((&pdev->dev)->of_node,
3087 "qcom,use-ipa-software-channel");
3088
3089 mhi_ctx->config_iatu = of_property_read_bool((&pdev->dev)->of_node,
3090 "qcom,mhi-config-iatu");
3091
3092 if (mhi_ctx->config_iatu) {
3093 rc = of_property_read_u32((&pdev->dev)->of_node,
3094 "qcom,mhi-local-pa-base",
3095 &mhi_ctx->device_local_pa_base);
3096 if (rc) {
3097 pr_err("qcom,mhi-local-pa-base does not exist\n");
3098 return rc;
3099 }
3100 }
3101
3102 mhi_ctx->mhi_int = of_property_read_bool((&pdev->dev)->of_node,
3103 "qcom,mhi-interrupt");
3104
3105 if (mhi->config_iatu || mhi_ctx->mhi_int) {
3106 mhi->mhi_irq = platform_get_irq_byname(pdev, "mhi-device-inta");
3107 if (mhi->mhi_irq < 0) {
3108 pr_err("Invalid MHI device interrupt\n");
3109 rc = mhi->mhi_irq;
3110 return rc;
3111 }
3112 }
3113
3114 device_init_wakeup(mhi->dev, true);
3115 /* MHI device will be woken up from PCIe event */
3116 device_set_wakeup_capable(mhi->dev, false);
3117 /* Hold a wakelock until completion of M0 */
3118 pm_stay_awake(mhi->dev);
3119 atomic_set(&mhi->mhi_dev_wake, 1);
3120
3121 mhi_log(MHI_MSG_VERBOSE, "acquiring wakelock\n");
3122
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003123 return 0;
3124}
3125
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003126static int mhi_deinit(struct mhi_dev *mhi)
3127{
3128 int rc = 0, i = 0, ring_id = 0;
3129 struct mhi_dev_ring *ring;
3130 struct platform_device *pdev = mhi->pdev;
3131
3132 ring_id = mhi->cfg.channels + mhi->cfg.event_rings + 1;
3133
3134 for (i = 0; i < ring_id; i++) {
3135 ring = &mhi->ring[i];
3136 if (ring->state == RING_STATE_UINT)
3137 continue;
3138
3139 dma_free_coherent(mhi->dev, ring->ring_size *
3140 sizeof(union mhi_dev_ring_element_type),
3141 ring->ring_cache,
3142 ring->ring_cache_dma_handle);
3143 }
3144
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003145 devm_kfree(&pdev->dev, mhi->mmio_backup);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003146 devm_kfree(&pdev->dev, mhi->ring);
3147
3148 mhi_dev_sm_exit(mhi);
3149
3150 mhi->mmio_initialized = false;
3151
3152 return rc;
3153}
3154
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003155static int mhi_init(struct mhi_dev *mhi)
3156{
3157 int rc = 0, i = 0;
3158 struct platform_device *pdev = mhi->pdev;
3159
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003160 rc = mhi_dev_mmio_init(mhi);
3161 if (rc) {
3162 pr_err("Failed to update the MMIO init\n");
3163 return rc;
3164 }
3165
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003166 mhi->ring = devm_kzalloc(&pdev->dev,
3167 (sizeof(struct mhi_dev_ring) *
3168 (mhi->cfg.channels + mhi->cfg.event_rings + 1)),
3169 GFP_KERNEL);
3170 if (!mhi->ring)
3171 return -ENOMEM;
3172
Siva Kumar Akkireddiad598872018-12-03 19:17:54 +05303173 /*
3174 * mhi_init is also called during device reset, in
3175 * which case channel mem will already be allocated.
3176 */
3177 if (!mhi->ch) {
3178 mhi->ch = devm_kzalloc(&pdev->dev,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003179 (sizeof(struct mhi_dev_channel) *
3180 (mhi->cfg.channels)), GFP_KERNEL);
Siva Kumar Akkireddiad598872018-12-03 19:17:54 +05303181 if (!mhi->ch)
3182 return -ENOMEM;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003183
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05303184 for (i = 0; i < mhi->cfg.channels; i++) {
3185 mhi->ch[i].ch_id = i;
Siva Kumar Akkireddiad598872018-12-03 19:17:54 +05303186 mutex_init(&mhi->ch[i].ch_lock);
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05303187 }
Siva Kumar Akkireddiad598872018-12-03 19:17:54 +05303188 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003189
3190 spin_lock_init(&mhi->lock);
3191 mhi->mmio_backup = devm_kzalloc(&pdev->dev,
3192 MHI_DEV_MMIO_RANGE, GFP_KERNEL);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003193 if (!mhi->mmio_backup)
3194 return -ENOMEM;
3195
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003196 return 0;
3197}
3198
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003199static int mhi_dev_resume_mmio_mhi_reinit(struct mhi_dev *mhi_ctx)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003200{
3201 int rc = 0;
3202
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003203 mutex_lock(&mhi_ctx->mhi_lock);
3204 if (atomic_read(&mhi_ctx->re_init_done)) {
3205 mhi_log(MHI_MSG_INFO, "Re_init done, return\n");
3206 mutex_unlock(&mhi_ctx->mhi_lock);
3207 return 0;
3208 }
3209
3210 rc = mhi_init(mhi_ctx);
3211 if (rc) {
3212 pr_err("Error initializing MHI MMIO with %d\n", rc);
3213 goto fail;
3214 }
3215
3216 mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
3217 EP_PCIE_EVENT_PM_D3_COLD |
3218 EP_PCIE_EVENT_PM_D0 |
3219 EP_PCIE_EVENT_PM_RST_DEAST |
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003220 EP_PCIE_EVENT_LINKDOWN;
Siva Kumar Akkireddidd7c6ed2018-09-07 15:04:17 +05303221 if (!mhi_ctx->mhi_int)
3222 mhi_ctx->event_reg.events |= EP_PCIE_EVENT_MHI_A7;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003223 mhi_ctx->event_reg.user = mhi_ctx;
3224 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
3225 mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
3226
3227 rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
3228 if (rc) {
3229 pr_err("Failed to register for events from PCIe\n");
3230 goto fail;
3231 }
3232
3233 rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
3234 if (rc < 0) {
3235 if (rc == -EEXIST) {
3236 mhi_ring_init_cb(mhi_ctx);
3237 } else {
3238 pr_err("Error calling IPA cb with %d\n", rc);
3239 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003240 }
3241 }
3242
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003243 /* Invoke MHI SM when device is in RESET state */
3244 rc = mhi_dev_sm_init(mhi_ctx);
3245 if (rc) {
3246 pr_err("%s: Error during SM init\n", __func__);
3247 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003248 }
3249
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003250 /* set the env before setting the ready bit */
3251 rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE);
3252 if (rc) {
3253 pr_err("%s: env setting failed\n", __func__);
3254 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003255 }
3256
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003257 /* All set, notify the host */
3258 rc = mhi_dev_sm_set_ready();
3259 if (rc) {
3260 pr_err("%s: unable to set ready bit\n", __func__);
3261 goto fail;
3262 }
3263
3264 atomic_set(&mhi_ctx->is_suspended, 0);
3265fail:
3266 atomic_set(&mhi_ctx->re_init_done, 1);
3267 mutex_unlock(&mhi_ctx->mhi_lock);
3268 return rc;
3269}
3270
3271static void mhi_dev_reinit(struct work_struct *work)
3272{
3273 struct mhi_dev *mhi_ctx = container_of(work,
3274 struct mhi_dev, re_init);
3275 enum ep_pcie_link_status link_state;
3276 int rc = 0;
3277
3278 link_state = ep_pcie_get_linkstatus(mhi_ctx->phandle);
3279 if (link_state == EP_PCIE_LINK_ENABLED) {
3280 /* PCIe link is up with BME set */
3281 rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx);
3282 if (rc) {
3283 pr_err("Failed to register for events from PCIe\n");
3284 return;
3285 }
3286 }
3287
3288 mhi_log(MHI_MSG_VERBOSE, "Wait for PCIe linkup\n");
3289}
3290
3291static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx)
3292{
3293 struct platform_device *pdev;
3294 int rc = 0;
3295
Siva Kumar Akkireddidaedab72018-12-29 21:17:01 +05303296 /*
3297 * There could be multiple calls to this function if device gets
3298 * multiple link-up events (bme irqs).
3299 */
3300 if (mhi_ctx->init_done) {
3301 mhi_log(MHI_MSG_INFO, "mhi init already done, returning\n");
3302 return 0;
3303 }
3304
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003305 pdev = mhi_ctx->pdev;
3306
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003307 INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
3308
3309 mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq",
3310 WQ_HIGHPRI, 0);
3311 if (!mhi_ctx->pending_ring_wq) {
3312 rc = -ENOMEM;
3313 return rc;
3314 }
3315
3316 INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending);
3317
3318 INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable);
3319
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003320 INIT_WORK(&mhi_ctx->re_init, mhi_dev_reinit);
3321
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003322 mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq",
3323 WQ_HIGHPRI, 0);
3324 if (!mhi_ctx->ring_init_wq) {
3325 rc = -ENOMEM;
3326 return rc;
3327 }
3328
3329 INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
3330 INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003331 mutex_init(&mhi_ctx->mhi_event_lock);
3332 mutex_init(&mhi_ctx->mhi_write_test);
3333
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +05303334 mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
3335 if (!mhi_ctx->phandle) {
3336 pr_err("PCIe driver get handle failed.\n");
3337 return -EINVAL;
3338 }
3339
3340 rc = mhi_dev_recover(mhi_ctx);
3341 if (rc) {
3342 pr_err("%s: get mhi state failed\n", __func__);
3343 return rc;
3344 }
3345
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003346 rc = mhi_init(mhi_ctx);
3347 if (rc)
3348 return rc;
3349
3350 mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev,
3351 (TRB_MAX_DATA_SIZE * 4),
3352 &mhi_ctx->cache_dma_handle, GFP_KERNEL);
3353 if (!mhi_ctx->dma_cache)
3354 return -ENOMEM;
3355
3356 mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev,
3357 (TRB_MAX_DATA_SIZE * 4),
3358 &mhi_ctx->read_dma_handle,
3359 GFP_KERNEL);
3360 if (!mhi_ctx->read_handle)
3361 return -ENOMEM;
3362
3363 mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev,
3364 (TRB_MAX_DATA_SIZE * 24),
3365 &mhi_ctx->write_dma_handle,
3366 GFP_KERNEL);
3367 if (!mhi_ctx->write_handle)
3368 return -ENOMEM;
3369
3370 rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version);
3371 if (rc) {
3372 pr_err("Failed to update the MHI version\n");
3373 return rc;
3374 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003375 mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
3376 EP_PCIE_EVENT_PM_D3_COLD |
3377 EP_PCIE_EVENT_PM_D0 |
3378 EP_PCIE_EVENT_PM_RST_DEAST |
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003379 EP_PCIE_EVENT_LINKDOWN;
Siva Kumar Akkireddidd7c6ed2018-09-07 15:04:17 +05303380 if (!mhi_ctx->mhi_int)
3381 mhi_ctx->event_reg.events |= EP_PCIE_EVENT_MHI_A7;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003382 mhi_ctx->event_reg.user = mhi_ctx;
3383 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
3384 mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
3385
3386 rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
3387 if (rc) {
3388 pr_err("Failed to register for events from PCIe\n");
3389 return rc;
3390 }
3391
3392 pr_err("Registering with IPA\n");
3393
3394 rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
3395 if (rc < 0) {
3396 if (rc == -EEXIST) {
3397 mhi_ring_init_cb(mhi_ctx);
3398 } else {
3399 pr_err("Error calling IPA cb with %d\n", rc);
3400 return rc;
3401 }
3402 }
3403
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003404 /* Invoke MHI SM when device is in RESET state */
3405 rc = mhi_dev_sm_init(mhi_ctx);
3406 if (rc) {
3407 pr_err("%s: Error during SM init\n", __func__);
3408 return rc;
3409 }
3410
3411 /* set the env before setting the ready bit */
3412 rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE);
3413 if (rc) {
3414 pr_err("%s: env setting failed\n", __func__);
3415 return rc;
3416 }
3417
3418 /* All set, notify the host */
3419 mhi_dev_sm_set_ready();
3420
3421 if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
3422 rc = devm_request_irq(&pdev->dev, mhi_ctx->mhi_irq, mhi_dev_isr,
3423 IRQF_TRIGGER_HIGH, "mhi_isr", mhi_ctx);
3424 if (rc) {
3425 dev_err(&pdev->dev, "request mhi irq failed %d\n", rc);
3426 return -EINVAL;
3427 }
3428
3429 disable_irq(mhi_ctx->mhi_irq);
3430 }
3431
Siva Kumar Akkireddidaedab72018-12-29 21:17:01 +05303432 mhi_ctx->init_done = true;
3433
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003434 return 0;
3435}
3436
3437static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify)
3438{
3439 if (!notify || !notify->user) {
3440 pr_err("Null argument for notify\n");
3441 return;
3442 }
3443
3444 mhi_ctx = notify->user;
3445 mhi_dev_pcie_notify_event = notify->options;
3446 mhi_log(MHI_MSG_INFO,
3447 "PCIe event=0x%x\n", notify->options);
3448 queue_work(mhi_ctx->pcie_event_wq, &mhi_ctx->pcie_event);
3449}
3450
3451static void mhi_dev_pcie_handle_event(struct work_struct *work)
3452{
3453 struct mhi_dev *mhi_ctx = container_of(work, struct mhi_dev,
3454 pcie_event);
3455 int rc = 0;
3456
3457 if (mhi_dev_pcie_notify_event == MHI_INIT) {
3458 rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx);
3459 if (rc) {
3460 pr_err("Error during MHI device initialization\n");
3461 return;
3462 }
3463 } else if (mhi_dev_pcie_notify_event == MHI_REINIT) {
3464 rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx);
3465 if (rc) {
3466 pr_err("Error during MHI device re-initialization\n");
3467 return;
3468 }
3469 }
3470}
3471
3472static int mhi_dev_probe(struct platform_device *pdev)
3473{
3474 int rc = 0;
3475
3476 if (pdev->dev.of_node) {
3477 rc = get_device_tree_data(pdev);
3478 if (rc) {
3479 pr_err("Error reading MHI Dev DT\n");
3480 return rc;
3481 }
3482 mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
3483 "mhi", 0);
3484 if (mhi_ipc_log == NULL) {
3485 dev_err(&pdev->dev,
3486 "Failed to create IPC logging context\n");
3487 }
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05303488 /*
3489 * The below list and mutex should be initialized
3490 * before calling mhi_uci_init to avoid crash in
3491 * mhi_register_state_cb when accessing these.
3492 */
3493 INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
3494 mutex_init(&mhi_ctx->mhi_lock);
3495
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003496 mhi_uci_init();
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07003497 mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
3498 MHI_STATE_CONFIGURED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003499 }
3500
3501 INIT_WORK(&mhi_ctx->pcie_event, mhi_dev_pcie_handle_event);
3502 mhi_ctx->pcie_event_wq = alloc_workqueue("mhi_dev_pcie_event_wq",
3503 WQ_HIGHPRI, 0);
3504 if (!mhi_ctx->pcie_event_wq) {
3505 pr_err("no memory\n");
3506 rc = -ENOMEM;
3507 return rc;
3508 }
3509
3510 mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
3511 if (mhi_ctx->phandle) {
3512 /* PCIe link is already up */
3513 rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx);
3514 if (rc) {
3515 pr_err("Error during MHI device initialization\n");
3516 return rc;
3517 }
3518 } else {
3519 pr_debug("Register a PCIe callback\n");
3520 mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP;
3521 mhi_ctx->event_reg.user = mhi_ctx;
3522 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
3523 mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up;
3524 mhi_ctx->event_reg.options = MHI_INIT;
3525
3526 rc = ep_pcie_register_event(mhi_ctx->phandle,
3527 &mhi_ctx->event_reg);
3528 if (rc) {
3529 pr_err("Failed to register for events from PCIe\n");
3530 return rc;
3531 }
3532 }
3533
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003534 return 0;
3535}
3536
3537static int mhi_dev_remove(struct platform_device *pdev)
3538{
3539 platform_set_drvdata(pdev, NULL);
3540
3541 return 0;
3542}
3543
3544static const struct of_device_id mhi_dev_match_table[] = {
3545 { .compatible = "qcom,msm-mhi-dev" },
3546 {}
3547};
3548
3549static struct platform_driver mhi_dev_driver = {
3550 .driver = {
3551 .name = "qcom,msm-mhi-dev",
3552 .of_match_table = mhi_dev_match_table,
3553 },
3554 .probe = mhi_dev_probe,
3555 .remove = mhi_dev_remove,
3556};
3557
3558module_param(mhi_msg_lvl, uint, 0644);
3559module_param(mhi_ipc_msg_lvl, uint, 0644);
3560
3561MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl");
3562MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl");
3563
3564static int __init mhi_dev_init(void)
3565{
3566 return platform_driver_register(&mhi_dev_driver);
3567}
Siva Kumar Akkireddi016b09f2018-10-25 21:30:37 +05303568subsys_initcall(mhi_dev_init);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003569
3570static void __exit mhi_dev_exit(void)
3571{
3572 platform_driver_unregister(&mhi_dev_driver);
3573}
3574module_exit(mhi_dev_exit);
3575
3576MODULE_DESCRIPTION("MHI device driver");
3577MODULE_LICENSE("GPL v2");