blob: ab43d326bc07744082301bd1c0aa1eb94bf5d3d4 [file] [log] [blame]
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/of.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/of_irq.h>
24#include <linux/interrupt.h>
25#include <linux/workqueue.h>
26#include <linux/completion.h>
27#include <linux/platform_device.h>
28#include <linux/msm_ep_pcie.h>
29#include <linux/ipa_mhi.h>
30#include <linux/vmalloc.h>
31
32#include "mhi.h"
33#include "mhi_hwio.h"
34#include "mhi_sm.h"
35
36/* Wait time on the device for Host to set M0 state */
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080037#define MHI_DEV_M0_MAX_CNT 30
38/* Wait time before suspend/resume is complete */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080039#define MHI_SUSPEND_MIN 100
40#define MHI_SUSPEND_TIMEOUT 600
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -070041#define MHI_WAKEUP_TIMEOUT_CNT 20
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080042#define MHI_MASK_CH_EV_LEN 32
43#define MHI_RING_CMD_ID 0
44#define MHI_RING_PRIMARY_EVT_ID 1
45#define MHI_1K_SIZE 0x1000
46/* Updated Specification for event start is NER - 2 and end - NER -1 */
47#define MHI_HW_ACC_EVT_RING_START 2
48#define MHI_HW_ACC_EVT_RING_END 1
49
50#define MHI_HOST_REGION_NUM 2
51
52#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1
53#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2
54
55#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32))
56#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF)
57#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF)
58
59#define MHI_IPC_LOG_PAGES (100)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080060#define MHI_REGLEN 0x100
61#define MHI_INIT 0
62#define MHI_REINIT 1
63
64#define TR_RING_ELEMENT_SZ sizeof(struct mhi_dev_transfer_ring_element)
65#define RING_ELEMENT_TYPE_SZ sizeof(union mhi_dev_ring_element_type)
66
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080067enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
68enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
69void *mhi_ipc_log;
70
71static struct mhi_dev *mhi_ctx;
72static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
73 unsigned long data);
74static void mhi_ring_init_cb(void *user_data);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -070075static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080076static int mhi_deinit(struct mhi_dev *mhi);
77static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify);
78static int mhi_dev_pcie_notify_event;
79static void mhi_dev_transfer_completion_cb(void *mreq);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -070080static struct mhi_dev_uevent_info channel_state_info[MHI_MAX_CHANNELS];
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080081
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080082/*
83 * mhi_dev_ring_cache_completion_cb () - Call back function called
84 * by IPA driver when ring element cache is done
85 *
86 * @req : ring cache request
87 */
88static void mhi_dev_ring_cache_completion_cb(void *req)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080089{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080090 struct ring_cache_req *ring_req = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080091
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080092 if (req)
93 ring_req = (struct ring_cache_req *)req;
94 else {
95 pr_err("%s():ring cache req data is NULL\n", __func__);
96 return;
97 }
98 complete(ring_req->done);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080099}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800100
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800101void mhi_dev_read_from_host(struct mhi_dev *mhi, struct mhi_addr *transfer)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800102{
103 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800104 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
105 struct ring_cache_req ring_req;
106
107 DECLARE_COMPLETION(done);
108
109 ring_req.done = &done;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800110
111 if (!mhi) {
112 pr_err("invalid MHI ctx\n");
113 return;
114 }
115
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800116 if (mhi->config_iatu) {
117 offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa;
118 /* Mapping the translated physical address on the device */
119 host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset;
120 } else {
121 host_addr_pa = transfer->host_pa | bit_40;
122 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800123
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800124 mhi_log(MHI_MSG_VERBOSE,
125 "device 0x%x <<-- host 0x%llx, size %d\n",
126 transfer->phy_addr, host_addr_pa,
127 (int) transfer->size);
128 rc = ipa_dma_async_memcpy((u64)transfer->phy_addr, host_addr_pa,
129 (int)transfer->size,
130 mhi_dev_ring_cache_completion_cb, &ring_req);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800131 if (rc)
132 pr_err("error while reading from host:%d\n", rc);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800133
134 wait_for_completion(&done);
135}
136EXPORT_SYMBOL(mhi_dev_read_from_host);
137
138void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *transfer,
139 struct event_req *ereq, enum mhi_dev_transfer_type tr_type)
140{
141 int rc = 0;
142 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
143 dma_addr_t dma;
144
145 if (!mhi) {
146 pr_err("invalid MHI ctx\n");
147 return;
148 }
149 if (mhi->config_iatu) {
150 offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa;
151 /* Mapping the translated physical address on the device */
152 host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset;
153 } else {
154 host_addr_pa = transfer->host_pa | bit_40;
155 }
156
157 mhi_log(MHI_MSG_VERBOSE,
158 "device 0x%llx --> host 0x%llx, size %d\n",
159 (uint64_t) mhi->cache_dma_handle, host_addr_pa,
160 (int) transfer->size);
161 if (tr_type == MHI_DEV_DMA_ASYNC) {
162 dma = dma_map_single(&mhi->pdev->dev,
163 transfer->virt_addr, transfer->size,
164 DMA_TO_DEVICE);
165 if (ereq->event_type == SEND_EVENT_BUFFER) {
166 ereq->dma = dma;
167 ereq->dma_len = transfer->size;
168 } else if (ereq->event_type == SEND_EVENT_RD_OFFSET) {
169 ereq->event_rd_dma = dma;
170 }
171 rc = ipa_dma_async_memcpy(host_addr_pa, (uint64_t) dma,
172 (int)transfer->size,
173 ereq->client_cb, ereq);
174 if (rc)
175 pr_err("error while writing to host:%d\n", rc);
176 } else if (tr_type == MHI_DEV_DMA_SYNC) {
177 /* Copy the device content to a local device
178 * physical address.
179 */
180 memcpy(mhi->dma_cache, transfer->virt_addr,
181 transfer->size);
182 rc = ipa_dma_sync_memcpy(host_addr_pa,
183 (u64) mhi->cache_dma_handle,
184 (int) transfer->size);
185 if (rc)
186 pr_err("error while writing to host:%d\n", rc);
187 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800188}
189EXPORT_SYMBOL(mhi_dev_write_to_host);
190
191int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800192 struct mhi_dev *mhi, struct mhi_req *mreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800193{
194 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800195 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
196 struct mhi_dev_ring *ring = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800197
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800198
199 if (!mhi || !dev || !host_pa || !mreq) {
200 pr_err("%s():Invalid parameters\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800201 return -EINVAL;
202 }
203
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800204 if (mhi->config_iatu) {
205 offset = (uint64_t)host_pa - mhi->data_base.host_pa;
206 /* Mapping the translated physical address on the device */
207 host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset;
208 } else {
209 host_addr_pa = host_pa | bit_40;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800210 }
211
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800212 mhi_log(MHI_MSG_VERBOSE, "device 0x%llx <-- host 0x%llx, size %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800213 (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800214
215 if (mreq->mode == IPA_DMA_SYNC) {
216 rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle,
217 host_addr_pa, (int) len);
218 if (rc) {
219 pr_err("error while reading chan using sync:%d\n", rc);
220 return rc;
221 }
222 memcpy(dev, mhi->read_handle, len);
223 } else if (mreq->mode == IPA_DMA_ASYNC) {
224 ring = mreq->client->channel->ring;
225 mreq->dma = dma_map_single(&mhi->pdev->dev, dev, len,
226 DMA_FROM_DEVICE);
227 mhi_dev_ring_inc_index(ring, ring->rd_offset);
228
229 if (ring->rd_offset == ring->wr_offset)
230 mreq->snd_cmpl = 1;
231 else
232 mreq->snd_cmpl = 0;
233 rc = ipa_dma_async_memcpy(mreq->dma, host_addr_pa,
234 (int) len, mhi_dev_transfer_completion_cb,
235 mreq);
236 if (rc) {
237 pr_err("error while reading chan using async:%d\n", rc);
238 return rc;
239 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800240 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800241 return rc;
242}
243EXPORT_SYMBOL(mhi_transfer_host_to_device);
244
245int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800246 struct mhi_dev *mhi, struct mhi_req *req)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800247{
248 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800249 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
250 struct mhi_dev_ring *ring = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800251
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800252 if (!mhi || !dev || !req || !host_addr) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800253 pr_err("%sInvalid parameters\n", __func__);
254 return -EINVAL;
255 }
256
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800257 if (mhi->config_iatu) {
258 offset = (uint64_t)host_addr - mhi->data_base.host_pa;
259 /* Mapping the translated physical address on the device */
260 host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset;
261 } else {
262 host_addr_pa = host_addr | bit_40;
263 }
264 mhi_log(MHI_MSG_VERBOSE, "device 0x%llx ---> host 0x%llx, size %d\n",
265 (uint64_t) mhi->write_dma_handle,
266 host_addr_pa, (int) len);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800267
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800268 if (req->mode == IPA_DMA_SYNC) {
269 memcpy(mhi->write_handle, dev, len);
270 rc = ipa_dma_sync_memcpy(host_addr_pa,
271 (u64) mhi->write_dma_handle, (int) len);
272 } else if (req->mode == IPA_DMA_ASYNC) {
273 req->dma = dma_map_single(&mhi->pdev->dev, req->buf,
274 req->len, DMA_TO_DEVICE);
275 ring = req->client->channel->ring;
276 mhi_dev_ring_inc_index(ring, ring->rd_offset);
277 if (ring->rd_offset == ring->wr_offset)
278 req->snd_cmpl = 1;
279 rc = ipa_dma_async_memcpy(host_addr_pa,
280 (uint64_t) req->dma, (int) len,
281 mhi_dev_transfer_completion_cb, req);
282 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800283 return rc;
284}
285EXPORT_SYMBOL(mhi_transfer_device_to_host);
286
287int mhi_dev_is_list_empty(void)
288{
289
290 if (list_empty(&mhi_ctx->event_ring_list) &&
291 list_empty(&mhi_ctx->process_ring_list))
292 return 0;
293 else
294 return 1;
295}
296EXPORT_SYMBOL(mhi_dev_is_list_empty);
297
298static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi,
299 struct ep_pcie_db_config *erdb_cfg)
300{
301 switch (mhi->cfg.event_rings) {
302 case NUM_CHANNELS:
303 erdb_cfg->base = HW_CHANNEL_BASE;
304 erdb_cfg->end = HW_CHANNEL_END;
305 break;
306 default:
307 erdb_cfg->base = mhi->cfg.event_rings -
308 MHI_HW_ACC_EVT_RING_START;
309 erdb_cfg->end = mhi->cfg.event_rings -
310 MHI_HW_ACC_EVT_RING_END;
311 break;
312 }
313}
314
315int mhi_pcie_config_db_routing(struct mhi_dev *mhi)
316{
317 int rc = 0;
318 struct ep_pcie_db_config chdb_cfg, erdb_cfg;
319
320 if (!mhi) {
321 pr_err("Invalid MHI context\n");
322 return -EINVAL;
323 }
324
325 /* Configure Doorbell routing */
326 chdb_cfg.base = HW_CHANNEL_BASE;
327 chdb_cfg.end = HW_CHANNEL_END;
328 chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb;
329
330 mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
331
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800332 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800333 "Event rings 0x%x => er_base 0x%x, er_end %d\n",
334 mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
335 erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
336 ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg);
337
338 return rc;
339}
340EXPORT_SYMBOL(mhi_pcie_config_db_routing);
341
342static int mhi_hwc_init(struct mhi_dev *mhi)
343{
344 int rc = 0;
345 struct ep_pcie_msi_config cfg;
346 struct ipa_mhi_init_params ipa_init_params;
347 struct ep_pcie_db_config erdb_cfg;
348
349 /* Call IPA HW_ACC Init with MSI Address and db routing info */
350 rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
351 if (rc) {
352 pr_err("Error retrieving pcie msi logic\n");
353 return rc;
354 }
355
356 rc = mhi_pcie_config_db_routing(mhi);
357 if (rc) {
358 pr_err("Error configuring DB routing\n");
359 return rc;
360 }
361
362 mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800363 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800364 "Event rings 0x%x => er_base 0x%x, er_end %d\n",
365 mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
366
367 erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
368 memset(&ipa_init_params, 0, sizeof(ipa_init_params));
369 ipa_init_params.msi.addr_hi = cfg.upper;
370 ipa_init_params.msi.addr_low = cfg.lower;
371 ipa_init_params.msi.data = cfg.data;
372 ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
373 ipa_init_params.first_er_idx = erdb_cfg.base;
374 ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800375
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800376 if (mhi_ctx->config_iatu)
377 ipa_init_params.mmio_addr =
378 ((uint32_t) mhi_ctx->mmio_base_pa_addr) + MHI_REGLEN;
379 else
380 ipa_init_params.mmio_addr =
381 ((uint32_t) mhi_ctx->mmio_base_pa_addr);
382
383 if (!mhi_ctx->config_iatu)
384 ipa_init_params.assert_bit40 = true;
385
386 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800387 "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n",
388 ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data);
389 ipa_init_params.notify = mhi_hwc_cb;
390 ipa_init_params.priv = mhi;
391
392 rc = ipa_mhi_init(&ipa_init_params);
393 if (rc) {
394 pr_err("Error initializing IPA\n");
395 return rc;
396 }
397
398 return rc;
399}
400
401static int mhi_hwc_start(struct mhi_dev *mhi)
402{
403 int rc = 0;
404 struct ipa_mhi_start_params ipa_start_params;
405
406 memset(&ipa_start_params, 0, sizeof(ipa_start_params));
407
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800408 if (mhi->config_iatu) {
409 ipa_start_params.host_ctrl_addr = mhi->ctrl_base.device_pa;
410 ipa_start_params.host_data_addr = mhi->data_base.device_pa;
411 } else {
412 ipa_start_params.channel_context_array_addr =
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800413 mhi->ch_ctx_shadow.host_pa;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800414 ipa_start_params.event_context_array_addr =
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800415 mhi->ev_ctx_shadow.host_pa;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800416 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800417
418 rc = ipa_mhi_start(&ipa_start_params);
419 if (rc)
420 pr_err("Error starting IPA (rc = 0x%X)\n", rc);
421
422 return rc;
423}
424
425static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
426 unsigned long data)
427{
428 int rc = 0;
429
430 switch (event) {
431 case IPA_MHI_EVENT_READY:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800432 mhi_log(MHI_MSG_INFO,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800433 "HW Channel uC is ready event=0x%X\n", event);
434 rc = mhi_hwc_start(mhi_ctx);
435 if (rc) {
436 pr_err("hwc_init start failed with %d\n", rc);
437 return;
438 }
439
440 rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx);
441 if (rc) {
442 pr_err("Failed to enable channel db\n");
443 return;
444 }
445
446 rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx);
447 if (rc) {
448 pr_err("Failed to enable control interrupt\n");
449 return;
450 }
451
452 rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx);
453
454 if (rc) {
455 pr_err("Failed to enable command db\n");
456 return;
457 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800458
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700459 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800460
461 ep_pcie_mask_irq_event(mhi_ctx->phandle,
462 EP_PCIE_INT_EVT_MHI_A7, true);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800463 break;
464 case IPA_MHI_EVENT_DATA_AVAILABLE:
465 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
466 if (rc) {
467 pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc);
468 return;
469 }
470 break;
471 default:
472 pr_err("HW Channel uC unknown event 0x%X\n", event);
473 break;
474 }
475}
476
477static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid,
478 enum mhi_dev_ring_element_type_id type)
479{
480 int rc = 0;
481 struct ipa_mhi_connect_params connect_params;
482
483 memset(&connect_params, 0, sizeof(connect_params));
484
485 switch (type) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800486 case MHI_DEV_RING_EL_RESET:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800487 case MHI_DEV_RING_EL_STOP:
488 rc = ipa_mhi_disconnect_pipe(
489 mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
490 if (rc)
491 pr_err("Stopping HW Channel%d failed 0x%X\n",
492 chid, rc);
493 break;
494 case MHI_DEV_RING_EL_START:
495 connect_params.channel_id = chid;
496 connect_params.sys.skip_ep_cfg = true;
497 if ((chid % 2) == 0x0)
498 connect_params.sys.client = IPA_CLIENT_MHI_PROD;
499 else
500 connect_params.sys.client = IPA_CLIENT_MHI_CONS;
501
502 rc = ipa_mhi_connect_pipe(&connect_params,
503 &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
504 if (rc)
505 pr_err("HW Channel%d start failed 0x%X\n",
506 chid, rc);
507 break;
508 case MHI_DEV_RING_EL_INVALID:
509 default:
510 pr_err("Invalid Ring Element type = 0x%X\n", type);
511 break;
512 }
513
514 return rc;
515}
516
517static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev,
518 uint32_t *int_value)
519{
520 int rc = 0;
521
522 rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value);
523 if (rc) {
524 pr_err("Failed to read A7 status\n");
525 return;
526 }
527
528 mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value);
529 if (rc) {
530 pr_err("Failed to clear A7 status\n");
531 return;
532 }
533}
534
535static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id)
536{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800537 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800538
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800539 if (mhi->use_ipa) {
540 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800541 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800542 data_transfer.phy_addr = mhi->ch_ctx_cache_dma_handle +
543 sizeof(struct mhi_dev_ch_ctx) * ch_id;
544 }
545
546 data_transfer.size = sizeof(struct mhi_dev_ch_ctx);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800547 /* Fetch the channel ctx (*dst, *src, size) */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800548 mhi_dev_read_from_host(mhi, &data_transfer);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800549}
550
551int mhi_dev_syserr(struct mhi_dev *mhi)
552{
553
554 if (!mhi) {
555 pr_err("%s: Invalid MHI ctx\n", __func__);
556 return -EINVAL;
557 }
558
559 mhi_dev_dump_mmio(mhi);
560 pr_err("MHI dev sys error\n");
561
562 return 0;
563}
564EXPORT_SYMBOL(mhi_dev_syserr);
565
566int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
567 union mhi_dev_ring_element_type *el)
568{
569 int rc = 0;
570 uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
571 struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
572 union mhi_dev_ring_ctx *ctx;
573 struct ep_pcie_msi_config cfg;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800574 struct mhi_addr transfer_addr;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800575
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800576 rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
577 if (rc) {
578 pr_err("Error retrieving pcie msi logic\n");
579 return rc;
580 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800581
582 if (evnt_ring_idx > mhi->cfg.event_rings) {
583 pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
584 return -EINVAL;
585 }
586
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800587 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800588 if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800589 rc = mhi_ring_start(ring, ctx, mhi);
590 if (rc) {
591 mhi_log(MHI_MSG_ERROR,
592 "error starting event ring %d\n", evnt_ring);
593 return rc;
594 }
595 }
596
597 mutex_lock(&mhi->mhi_event_lock);
598 /* add the ring element */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800599 mhi_dev_add_element(ring, el, NULL, 0);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800600
601 ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
602 sizeof(union mhi_dev_ring_element_type)) +
603 ring->ring_ctx->generic.rbase;
604
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800605 mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800606 ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
607
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800608 if (mhi->use_ipa)
609 transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800610 sizeof(struct mhi_dev_ev_ctx) *
611 evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
612 (uint32_t) ring->ring_ctx;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800613 else
614 transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va +
615 sizeof(struct mhi_dev_ev_ctx) *
616 evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
617 (uint32_t) ring->ring_ctx;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800618
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800619 transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp;
620 transfer_addr.size = sizeof(uint64_t);
621
622 mhi_dev_write_to_host(mhi, &transfer_addr, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800623 /*
624 * rp update in host memory should be flushed
625 * before sending a MSI to the host
626 */
627 wmb();
628
629 mutex_unlock(&mhi->mhi_event_lock);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800630 mhi_log(MHI_MSG_VERBOSE, "event sent:\n");
631 mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
632 mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x\n", el->evt_tr_comp.len);
633 mhi_log(MHI_MSG_VERBOSE, "evnt code :0x%x\n", el->evt_tr_comp.code);
634 mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n", el->evt_tr_comp.type);
635 mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
636 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
637 if (rc) {
638 pr_err("%s: error sending msi\n", __func__);
639 return rc;
640 }
641 return rc;
642}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800643
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800644/*
645 * mhi_dev_event_buf_completion_cb() -Cb function called by IPA driver
646 * when transfer completion event buffer copy is done.
647 *
648 * @req - event_req structure
649 */
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800650
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800651static void mhi_dev_event_buf_completion_cb(void *req)
652{
653 struct event_req *ereq = NULL;
654
655 if (req) {
656 ereq = (struct event_req *)req;
657 } else {
658 pr_err("%s():event req data is invalid\n", __func__);
659 return;
660 }
661 dma_unmap_single(&mhi_ctx->pdev->dev, ereq->dma,
662 ereq->dma_len, DMA_TO_DEVICE);
663}
664
665/**
666 * mhi_dev_event_rd_offset_completion_cb() -CB function called by IPA driver
667 * when event rd_offset transfer is done.
668 *
669 * @req - event_req structure
670 */
671
672static void mhi_dev_event_rd_offset_completion_cb(void *req)
673{
674 union mhi_dev_ring_ctx *ctx;
675 int rc = 0;
676 struct event_req *ereq = (struct event_req *)req;
677 struct mhi_dev_channel *ch = ereq->context;
678 struct mhi_dev *mhi = ch->ring->mhi_dev;
679 unsigned long flags;
680
681 dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma,
682 sizeof(uint64_t), DMA_TO_DEVICE);
683 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring];
684 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
685 if (rc)
686 pr_err("%s: error sending in msi\n", __func__);
687
688 /* return the event req to pre allocated pooled list */
689 spin_lock_irqsave(&mhi->lock, flags);
690 list_add_tail(&ereq->list, &ch->event_req_buffers);
691 spin_unlock_irqrestore(&mhi->lock, flags);
692}
693
694static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring,
695 struct event_req *ereq, uint32_t evt_len)
696{
697 int rc = 0;
698 uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
699 struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
700 union mhi_dev_ring_ctx *ctx;
701 struct mhi_addr transfer_addr;
702 static int count;
703
704 if (!ereq) {
705 pr_err("%s(): invalid event req\n", __func__);
706 return -EINVAL;
707 }
708
709 if (count == 0) {
710 rc = ep_pcie_get_msi_config(mhi->phandle, &mhi->msi_cfg);
711 if (rc) {
712 pr_err("Error retrieving pcie msi logic\n");
713 return rc;
714 }
715 count++;
716 }
717
718 if (evnt_ring_idx > mhi->cfg.event_rings) {
719 pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
720 return -EINVAL;
721 }
722
723 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
724 if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
725 rc = mhi_ring_start(ring, ctx, mhi);
726 if (rc) {
727 mhi_log(MHI_MSG_ERROR,
728 "error starting event ring %d\n", evnt_ring);
729 return rc;
730 }
731 }
732
733 /* add the ring element */
734 ereq->client_cb = mhi_dev_event_buf_completion_cb;
735 ereq->event_type = SEND_EVENT_BUFFER;
736 rc = mhi_dev_add_element(ring, ereq->tr_events, ereq, evt_len);
737 if (rc) {
738 pr_err("%s(): error in adding element rc %d\n", __func__, rc);
739 return rc;
740 }
741 ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
742 sizeof(union mhi_dev_ring_element_type)) +
743 ring->ring_ctx->generic.rbase;
744
745 mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n",
746 ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
747
748 if (mhi->use_ipa)
749 transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
750 sizeof(struct mhi_dev_ev_ctx) *
751 evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp -
752 (uint32_t)ring->ring_ctx;
753 else
754 transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va +
755 sizeof(struct mhi_dev_ev_ctx) *
756 evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp -
757 (uint32_t)ring->ring_ctx;
758
759 transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp;
760 transfer_addr.size = sizeof(uint64_t);
761 ereq->event_type = SEND_EVENT_RD_OFFSET;
762 ereq->client_cb = mhi_dev_event_rd_offset_completion_cb;
763 ereq->event_ring = evnt_ring;
764 mhi_dev_write_to_host(mhi, &transfer_addr, ereq, MHI_DEV_DMA_ASYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800765 return rc;
766}
767
768static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch,
769 uint32_t rd_ofst, uint32_t len,
770 enum mhi_dev_cmd_completion_code code)
771{
772 int rc = 0;
773 union mhi_dev_ring_element_type compl_event;
774 struct mhi_dev *mhi = ch->ring->mhi_dev;
775
776 compl_event.evt_tr_comp.chid = ch->ch_id;
777 compl_event.evt_tr_comp.type =
778 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
779 compl_event.evt_tr_comp.len = len;
780 compl_event.evt_tr_comp.code = code;
781 compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
782 rd_ofst * sizeof(struct mhi_dev_transfer_ring_element);
783
784 rc = mhi_dev_send_event(mhi,
785 mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event);
786
787 return rc;
788}
789
790int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
791 enum mhi_dev_state state)
792{
793 union mhi_dev_ring_element_type event;
794 int rc = 0;
795
796 event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG;
797 event.evt_state_change.mhistate = state;
798
799 rc = mhi_dev_send_event(mhi, 0, &event);
800 if (rc) {
801 pr_err("Sending state change event failed\n");
802 return rc;
803 }
804
805 return rc;
806}
807EXPORT_SYMBOL(mhi_dev_send_state_change_event);
808
809int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env)
810{
811 union mhi_dev_ring_element_type event;
812 int rc = 0;
813
814 event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY;
815 event.evt_ee_state.execenv = exec_env;
816
817 rc = mhi_dev_send_event(mhi, 0, &event);
818 if (rc) {
819 pr_err("Sending EE change event failed\n");
820 return rc;
821 }
822
823 return rc;
824}
825EXPORT_SYMBOL(mhi_dev_send_ee_event);
826
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +0530827static void mhi_dev_trigger_cb(enum mhi_client_channel ch_id)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800828{
829 struct mhi_dev_ready_cb_info *info;
Siddartha Mohanadossf0aab7a2018-03-19 15:19:22 -0700830 enum mhi_ctrl_info state_data;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800831
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800832 list_for_each_entry(info, &mhi_ctx->client_cb_list, list)
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +0530833 if (info->cb && info->cb_data.channel == ch_id) {
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700834 mhi_ctrl_state_info(info->cb_data.channel, &state_data);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800835 info->cb_data.ctrl_info = state_data;
836 info->cb(&info->cb_data);
837 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800838}
839
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800840int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi)
841{
842 int rc = 0;
843
844 /*
845 * Expected usuage is when there is HW ACC traffic IPA uC notifes
846 * Q6 -> IPA A7 -> MHI core -> MHI SM
847 */
848 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
849 if (rc) {
850 pr_err("error sending SM event\n");
851 return rc;
852 }
853
854 return rc;
855}
856EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup);
857
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700858static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi,
859 enum mhi_dev_cmd_completion_code code)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800860{
861 int rc = 0;
862 union mhi_dev_ring_element_type event;
863
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700864 if (code > MHI_CMD_COMPL_CODE_RES) {
865 mhi_log(MHI_MSG_ERROR,
866 "Invalid cmd compl code: %d\n", code);
867 return -EINVAL;
868 }
869
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800870 /* send the command completion event to the host */
871 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase
872 + (mhi->ring[MHI_RING_CMD_ID].rd_offset *
873 (sizeof(union mhi_dev_ring_element_type)));
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800874 mhi_log(MHI_MSG_VERBOSE, "evt cmd comp ptr :%d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800875 (uint32_t) event.evt_cmd_comp.ptr);
876 event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700877 event.evt_cmd_comp.code = code;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800878 rc = mhi_dev_send_event(mhi, 0, &event);
879 if (rc)
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700880 mhi_log(MHI_MSG_ERROR, "Send completion failed\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800881
882 return rc;
883}
884
885static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
886 struct mhi_dev *mhi)
887{
888 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800889 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800890
891 if (ring->rd_offset != ring->wr_offset &&
892 mhi->ch_ctx_cache[ch_id].ch_type ==
893 MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800894 mhi_log(MHI_MSG_INFO, "Pending outbound transaction\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800895 return 0;
896 } else if (mhi->ch_ctx_cache[ch_id].ch_type ==
897 MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
898 mhi->ch[ch_id].wr_request_active) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800899 mhi_log(MHI_MSG_INFO, "Pending inbound transaction\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800900 return 0;
901 }
902
903 /* set the channel to stop */
904 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800905 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800906
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800907 if (mhi->use_ipa) {
908 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800909 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800910 } else {
911 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
912 sizeof(struct mhi_dev_ch_ctx) * ch_id;
913 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
914 sizeof(struct mhi_dev_ch_ctx) * ch_id;
915 }
916 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
917 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
918
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800919 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800920 mhi_dev_write_to_host(mhi, &data_transfer, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800921
922 /* send the completion event to the host */
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700923 rc = mhi_dev_send_cmd_comp_event(mhi,
924 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800925 if (rc)
926 pr_err("Error sending command completion event\n");
927
928 return rc;
929}
930
931static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi,
932 union mhi_dev_ring_element_type *el, void *ctx)
933{
934 int rc = 0;
935 uint32_t ch_id = 0;
936 union mhi_dev_ring_element_type event;
937 struct mhi_addr host_addr;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800938 struct mhi_dev_channel *ch;
939 struct mhi_dev_ring *ring;
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700940 char *connected[2] = { "MHI_CHANNEL_STATE_12=CONNECTED", NULL};
941 char *disconnected[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL};
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800942
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800943 ch_id = el->generic.chid;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800944 mhi_log(MHI_MSG_VERBOSE, "for channel:%d and cmd:%d\n",
945 ch_id, el->generic.type);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800946
947 switch (el->generic.type) {
948 case MHI_DEV_RING_EL_START:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800949 mhi_log(MHI_MSG_VERBOSE, "recived start cmd for channel %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800950 ch_id);
951 if (ch_id >= (HW_CHANNEL_BASE)) {
952 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
953 if (rc) {
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700954 mhi_log(MHI_MSG_ERROR,
955 "Error with HW channel cmd %d\n", rc);
956 rc = mhi_dev_send_cmd_comp_event(mhi,
957 MHI_CMD_COMPL_CODE_UNDEFINED);
958 if (rc)
959 mhi_log(MHI_MSG_ERROR,
960 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800961 return;
962 }
963 goto send_start_completion_event;
964 }
965
966 /* fetch the channel context from host */
967 mhi_dev_fetch_ch_ctx(mhi, ch_id);
968
969 /* Initialize and configure the corresponding channel ring */
970 rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id],
971 (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id],
972 mhi);
973 if (rc) {
974 mhi_log(MHI_MSG_ERROR,
975 "start ring failed for ch %d\n", ch_id);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700976 rc = mhi_dev_send_cmd_comp_event(mhi,
977 MHI_CMD_COMPL_CODE_UNDEFINED);
978 if (rc)
979 mhi_log(MHI_MSG_ERROR,
980 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800981 return;
982 }
983
984 mhi->ring[mhi->ch_ring_start + ch_id].state =
985 RING_STATE_PENDING;
986
987 /* set the channel to running */
988 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800989 mhi->ch[ch_id].state = MHI_DEV_CH_STARTED;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800990 mhi->ch[ch_id].ch_id = ch_id;
991 mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id];
992 mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type;
993
994 /* enable DB for event ring */
995 rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id);
996 if (rc) {
997 pr_err("Failed to enable channel db\n");
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700998 rc = mhi_dev_send_cmd_comp_event(mhi,
999 MHI_CMD_COMPL_CODE_UNDEFINED);
1000 if (rc)
1001 mhi_log(MHI_MSG_ERROR,
1002 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001003 return;
1004 }
1005
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001006 if (mhi->use_ipa)
1007 host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001008 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001009 else
1010 host_addr.device_va = mhi->ch_ctx_shadow.device_va +
1011 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1012
1013 host_addr.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
1014 host_addr.size = sizeof(enum mhi_dev_ch_ctx_state);
1015
1016 mhi_dev_write_to_host(mhi, &host_addr, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001017
1018send_start_completion_event:
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001019 rc = mhi_dev_send_cmd_comp_event(mhi,
1020 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001021 if (rc)
1022 pr_err("Error sending command completion event\n");
1023
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05301024 mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001025 /* Trigger callback to clients */
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +05301026 mhi_dev_trigger_cb(ch_id);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001027 if (ch_id == MHI_CLIENT_MBIM_OUT)
1028 kobject_uevent_env(&mhi_ctx->dev->kobj,
1029 KOBJ_CHANGE, connected);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001030 break;
1031 case MHI_DEV_RING_EL_STOP:
1032 if (ch_id >= HW_CHANNEL_BASE) {
1033 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001034 if (rc)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001035 mhi_log(MHI_MSG_ERROR,
1036 "send channel stop cmd event failed\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001037
1038 /* send the completion event to the host */
1039 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
1040 (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1041 (sizeof(union mhi_dev_ring_element_type)));
1042 event.evt_cmd_comp.type =
1043 MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
1044 if (rc == 0)
1045 event.evt_cmd_comp.code =
1046 MHI_CMD_COMPL_CODE_SUCCESS;
1047 else
1048 event.evt_cmd_comp.code =
1049 MHI_CMD_COMPL_CODE_UNDEFINED;
1050
1051 rc = mhi_dev_send_event(mhi, 0, &event);
1052 if (rc) {
1053 pr_err("stop event send failed\n");
1054 return;
1055 }
1056 } else {
1057 /*
1058 * Check if there are any pending transactions for the
1059 * ring associated with the channel. If no, proceed to
1060 * write disable the channel state else send stop
1061 * channel command to check if one can suspend the
1062 * command.
1063 */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001064 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1065 if (ring->state == RING_STATE_UINT) {
1066 pr_err("Channel not opened for %d\n", ch_id);
1067 return;
1068 }
1069
1070 ch = &mhi->ch[ch_id];
1071
1072 mutex_lock(&ch->ch_lock);
1073
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001074 mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP;
1075 rc = mhi_dev_process_stop_cmd(
1076 &mhi->ring[mhi->ch_ring_start + ch_id],
1077 ch_id, mhi);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001078 if (rc)
1079 pr_err("stop event send failed\n");
1080
1081 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001082 mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
1083 if (ch_id == MHI_CLIENT_MBIM_OUT)
1084 kobject_uevent_env(&mhi_ctx->dev->kobj,
1085 KOBJ_CHANGE, disconnected);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001086 }
1087 break;
1088 case MHI_DEV_RING_EL_RESET:
1089 mhi_log(MHI_MSG_VERBOSE,
1090 "received reset cmd for channel %d\n", ch_id);
1091 if (ch_id >= HW_CHANNEL_BASE) {
1092 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001093 if (rc)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001094 mhi_log(MHI_MSG_ERROR,
1095 "send channel stop cmd event failed\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001096
1097 /* send the completion event to the host */
1098 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
1099 (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1100 (sizeof(union mhi_dev_ring_element_type)));
1101 event.evt_cmd_comp.type =
1102 MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
1103 if (rc == 0)
1104 event.evt_cmd_comp.code =
1105 MHI_CMD_COMPL_CODE_SUCCESS;
1106 else
1107 event.evt_cmd_comp.code =
1108 MHI_CMD_COMPL_CODE_UNDEFINED;
1109
1110 rc = mhi_dev_send_event(mhi, 0, &event);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001111 if (rc) {
1112 pr_err("stop event send failed\n");
1113 return;
1114 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001115 } else {
1116
1117 mhi_log(MHI_MSG_VERBOSE,
1118 "received reset cmd for channel %d\n",
1119 ch_id);
1120
1121 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1122 if (ring->state == RING_STATE_UINT) {
1123 pr_err("Channel not opened for %d\n", ch_id);
1124 return;
1125 }
1126
1127 ch = &mhi->ch[ch_id];
1128
1129 mutex_lock(&ch->ch_lock);
1130
1131 /* hard stop and set the channel to stop */
1132 mhi->ch_ctx_cache[ch_id].ch_state =
1133 MHI_DEV_CH_STATE_DISABLED;
1134 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
1135 if (mhi->use_ipa)
1136 host_addr.host_pa =
1137 mhi->ch_ctx_shadow.host_pa +
1138 (sizeof(struct mhi_dev_ch_ctx) * ch_id);
1139 else
1140 host_addr.device_va =
1141 mhi->ch_ctx_shadow.device_va +
1142 (sizeof(struct mhi_dev_ch_ctx) * ch_id);
1143
1144 host_addr.virt_addr =
1145 &mhi->ch_ctx_cache[ch_id].ch_state;
1146 host_addr.size = sizeof(enum mhi_dev_ch_ctx_state);
1147
1148 /* update the channel state in the host */
1149 mhi_dev_write_to_host(mhi, &host_addr, NULL,
1150 MHI_DEV_DMA_SYNC);
1151
1152 /* send the completion event to the host */
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001153 rc = mhi_dev_send_cmd_comp_event(mhi,
1154 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001155 if (rc)
1156 pr_err("Error sending command completion event\n");
1157 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001158 mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
1159 if (ch_id == MHI_CLIENT_MBIM_OUT)
1160 kobject_uevent_env(&mhi_ctx->dev->kobj,
1161 KOBJ_CHANGE, disconnected);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001162 }
1163 break;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001164 default:
1165 pr_err("%s: Invalid command:%d\n", __func__, el->generic.type);
1166 break;
1167 }
1168}
1169
1170static void mhi_dev_process_tre_ring(struct mhi_dev *mhi,
1171 union mhi_dev_ring_element_type *el, void *ctx)
1172{
1173 struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx;
1174 struct mhi_dev_channel *ch;
1175 struct mhi_dev_client_cb_reason reason;
1176
1177 if (ring->id < mhi->ch_ring_start) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001178 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001179 "invalid channel ring id (%d), should be < %d\n",
1180 ring->id, mhi->ch_ring_start);
1181 return;
1182 }
1183
1184 ch = &mhi->ch[ring->id - mhi->ch_ring_start];
1185 reason.ch_id = ch->ch_id;
1186 reason.reason = MHI_DEV_TRE_AVAILABLE;
1187
1188 /* Invoke a callback to let the client know its data is ready.
1189 * Copy this event to the clients context so that it can be
1190 * sent out once the client has fetch the data. Update the rp
1191 * before sending the data as part of the event completion
1192 */
1193 if (ch->active_client && ch->active_client->event_trigger != NULL)
1194 ch->active_client->event_trigger(&reason);
1195}
1196
1197static void mhi_dev_process_ring_pending(struct work_struct *work)
1198{
1199 struct mhi_dev *mhi = container_of(work,
1200 struct mhi_dev, pending_work);
1201 struct list_head *cp, *q;
1202 struct mhi_dev_ring *ring;
1203 struct mhi_dev_channel *ch;
1204 int rc = 0;
1205
1206 mutex_lock(&mhi_ctx->mhi_lock);
1207 rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]);
1208 if (rc) {
1209 mhi_log(MHI_MSG_ERROR, "error processing command ring\n");
1210 goto exit;
1211 }
1212
1213 list_for_each_safe(cp, q, &mhi->process_ring_list) {
1214 ring = list_entry(cp, struct mhi_dev_ring, list);
1215 list_del(cp);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001216 mhi_log(MHI_MSG_VERBOSE, "processing ring %d\n", ring->id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001217 rc = mhi_dev_process_ring(ring);
1218 if (rc) {
1219 mhi_log(MHI_MSG_ERROR,
1220 "error processing ring %d\n", ring->id);
1221 goto exit;
1222 }
1223
1224 if (ring->id < mhi->ch_ring_start) {
1225 mhi_log(MHI_MSG_ERROR,
1226 "ring (%d) is not a channel ring\n", ring->id);
1227 goto exit;
1228 }
1229
1230 ch = &mhi->ch[ring->id - mhi->ch_ring_start];
1231 rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id);
1232 if (rc) {
1233 mhi_log(MHI_MSG_ERROR,
1234 "error enabling chdb interrupt for %d\n", ch->ch_id);
1235 goto exit;
1236 }
1237 }
1238
1239exit:
1240 mutex_unlock(&mhi_ctx->mhi_lock);
1241}
1242
1243static int mhi_dev_get_event_notify(enum mhi_dev_state state,
1244 enum mhi_dev_event *event)
1245{
1246 int rc = 0;
1247
1248 switch (state) {
1249 case MHI_DEV_M0_STATE:
1250 *event = MHI_DEV_EVENT_M0_STATE;
1251 break;
1252 case MHI_DEV_M1_STATE:
1253 *event = MHI_DEV_EVENT_M1_STATE;
1254 break;
1255 case MHI_DEV_M2_STATE:
1256 *event = MHI_DEV_EVENT_M2_STATE;
1257 break;
1258 case MHI_DEV_M3_STATE:
1259 *event = MHI_DEV_EVENT_M3_STATE;
1260 break;
1261 default:
1262 rc = -EINVAL;
1263 break;
1264 }
1265
1266 return rc;
1267}
1268
1269static void mhi_dev_queue_channel_db(struct mhi_dev *mhi,
1270 uint32_t chintr_value, uint32_t ch_num)
1271{
1272 struct mhi_dev_ring *ring;
1273 int rc = 0;
1274
1275 for (; chintr_value; ch_num++, chintr_value >>= 1) {
1276 if (chintr_value & 1) {
1277 ring = &mhi->ring[ch_num + mhi->ch_ring_start];
1278 if (ring->state == RING_STATE_UINT) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001279 pr_debug("Channel not opened for %d\n", ch_num);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001280 break;
1281 }
1282 mhi_ring_set_state(ring, RING_STATE_PENDING);
1283 list_add(&ring->list, &mhi->process_ring_list);
1284 rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num);
1285 if (rc) {
1286 pr_err("Error disabling chdb\n");
1287 return;
1288 }
1289 queue_work(mhi->pending_ring_wq, &mhi->pending_work);
1290 }
1291 }
1292}
1293
1294static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi)
1295{
1296 int i, rc = 0;
1297 uint32_t chintr_value = 0, ch_num = 0;
1298
1299 rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi);
1300 if (rc) {
1301 pr_err("Read channel db\n");
1302 return;
1303 }
1304
1305 for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
1306 ch_num = i * MHI_MASK_CH_EV_LEN;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001307 /* Process channel status whose mask is enabled */
1308 chintr_value = (mhi->chdb[i].status & mhi->chdb[i].mask);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001309 if (chintr_value) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001310 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001311 "processing id: %d, ch interrupt 0x%x\n",
1312 i, chintr_value);
1313 mhi_dev_queue_channel_db(mhi, chintr_value, ch_num);
1314 rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i),
1315 mhi->chdb[i].status);
1316 if (rc) {
1317 pr_err("Error writing interrupt clear for A7\n");
1318 return;
1319 }
1320 }
1321 }
1322}
1323
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001324static int mhi_dev_abort(struct mhi_dev *mhi)
1325{
1326 struct mhi_dev_channel *ch;
1327 struct mhi_dev_ring *ring;
1328 int ch_id = 0, rc = 0;
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07001329 char *disconnected_12[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL};
1330 char *disconnected_14[2] = { "MHI_CHANNEL_STATE_14=DISCONNECTED", NULL};
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001331
1332 /* Hard stop all the channels */
1333 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1334 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1335 if (ring->state == RING_STATE_UINT)
1336 continue;
1337
1338 ch = &mhi->ch[ch_id];
1339 mutex_lock(&ch->ch_lock);
1340 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
1341 mutex_unlock(&ch->ch_lock);
1342 }
1343
1344 /* Update ctrl node */
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001345 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_DISCONNECTED);
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07001346 mhi_update_state_info(MHI_CLIENT_MBIM_OUT, MHI_STATE_DISCONNECTED);
1347 mhi_update_state_info(MHI_CLIENT_QMI_OUT, MHI_STATE_DISCONNECTED);
1348 rc = kobject_uevent_env(&mhi_ctx->dev->kobj,
1349 KOBJ_CHANGE, disconnected_12);
1350 if (rc)
1351 pr_err("Error sending uevent:%d\n", rc);
1352
1353 rc = kobject_uevent_env(&mhi_ctx->dev->kobj,
1354 KOBJ_CHANGE, disconnected_14);
1355 if (rc)
1356 pr_err("Error sending uevent:%d\n", rc);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001357
1358 flush_workqueue(mhi->ring_init_wq);
1359 flush_workqueue(mhi->pending_ring_wq);
1360
1361 /* Initiate MHI IPA reset */
1362 ipa_mhi_destroy();
1363
1364 /* Clean up initialized channels */
1365 rc = mhi_deinit(mhi);
1366 if (rc) {
1367 pr_err("Error during mhi_deinit with %d\n", rc);
1368 return rc;
1369 }
1370
1371 rc = mhi_dev_mmio_mask_chdb_interrupts(mhi_ctx);
1372 if (rc) {
1373 pr_err("Failed to enable channel db\n");
1374 return rc;
1375 }
1376
1377 rc = mhi_dev_mmio_disable_ctrl_interrupt(mhi_ctx);
1378 if (rc) {
1379 pr_err("Failed to enable control interrupt\n");
1380 return rc;
1381 }
1382
1383 rc = mhi_dev_mmio_disable_cmdb_interrupt(mhi_ctx);
1384 if (rc) {
1385 pr_err("Failed to enable command db\n");
1386 return rc;
1387 }
1388
1389
1390 atomic_set(&mhi_ctx->re_init_done, 0);
1391
1392 mhi_log(MHI_MSG_INFO,
1393 "Register a PCIe callback during re-init\n");
1394 mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP;
1395 mhi_ctx->event_reg.user = mhi_ctx;
1396 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
1397 mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up;
1398 mhi_ctx->event_reg.options = MHI_REINIT;
1399
1400 rc = ep_pcie_register_event(mhi_ctx->phandle,
1401 &mhi_ctx->event_reg);
1402 if (rc) {
1403 pr_err("Failed to register for events from PCIe\n");
1404 return rc;
1405 }
1406
1407 /* Set RESET field to 0 */
1408 mhi_dev_mmio_reset(mhi_ctx);
1409
1410 return rc;
1411}
1412
1413static void mhi_dev_transfer_completion_cb(void *mreq)
1414{
1415 struct mhi_dev_channel *ch;
1416 struct mhi_dev_client *client;
1417 union mhi_dev_ring_element_type *el;
1418 int rc = 0;
1419 struct mhi_req *req = (struct mhi_req *)mreq;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001420 union mhi_dev_ring_element_type *compl_ev = NULL;
1421 struct mhi_dev *mhi = NULL;
1422 unsigned long flags;
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301423 size_t transfer_len;
1424 u32 snd_cmpl;
1425 uint32_t rd_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001426
1427 client = req->client;
1428 ch = client->channel;
1429 mhi = ch->ring->mhi_dev;
1430 el = req->el;
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301431 transfer_len = req->len;
1432 snd_cmpl = req->snd_cmpl;
1433 rd_offset = req->rd_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001434 ch->curr_ereq->context = ch;
1435
1436 dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
1437 req->len, DMA_FROM_DEVICE);
1438
1439 /* Trigger client call back */
1440 req->client_cb(req);
1441
1442 if (el->tre.ieot) {
1443 compl_ev = ch->curr_ereq->tr_events + ch->curr_ereq->num_events;
1444 compl_ev->evt_tr_comp.chid = ch->ch_id;
1445 compl_ev->evt_tr_comp.type =
1446 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301447 compl_ev->evt_tr_comp.len = transfer_len;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001448 compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT;
1449 compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301450 rd_offset * TR_RING_ELEMENT_SZ;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001451 ch->curr_ereq->num_events++;
1452
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301453 if (ch->curr_ereq->num_events >= MAX_TR_EVENTS || snd_cmpl) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001454 mhi_log(MHI_MSG_VERBOSE,
1455 "num of tr events %d for ch %d\n",
1456 ch->curr_ereq->num_events, ch->ch_id);
1457 rc = mhi_dev_send_multiple_tr_events(mhi,
1458 mhi->ch_ctx_cache[ch->ch_id].err_indx,
1459 ch->curr_ereq, (ch->curr_ereq->num_events*
1460 sizeof(union mhi_dev_ring_element_type)));
1461 if (rc)
1462 mhi_log(MHI_MSG_ERROR,
1463 "failed to send compl evts\n");
1464 if (!list_empty(&ch->event_req_buffers)) {
1465 ch->curr_ereq =
1466 container_of(ch->event_req_buffers.next,
1467 struct event_req, list);
1468 spin_lock_irqsave(&mhi->lock, flags);
1469 list_del_init(&ch->curr_ereq->list);
1470 spin_unlock_irqrestore(&mhi->lock, flags);
1471 ch->curr_ereq->num_events = 0;
1472 } else
1473 pr_err("%s evt req buffers empty\n", __func__);
1474 }
1475 } else
1476 mhi_log(MHI_MSG_ERROR, "ieot is not valid\n");
1477
1478 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
1479 ch->state = MHI_DEV_CH_STOPPED;
1480 rc = mhi_dev_process_stop_cmd(ch->ring, ch->ch_id, mhi_ctx);
1481 if (rc)
1482 mhi_log(MHI_MSG_ERROR,
1483 "Error while stopping channel (%d)\n", ch->ch_id);
1484 }
1485}
1486
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001487static void mhi_dev_scheduler(struct work_struct *work)
1488{
1489 struct mhi_dev *mhi = container_of(work,
1490 struct mhi_dev, chdb_ctrl_work);
1491 int rc = 0;
1492 uint32_t int_value = 0;
1493 struct mhi_dev_ring *ring;
1494 enum mhi_dev_state state;
1495 enum mhi_dev_event event = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001496 bool mhi_reset = false;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001497
1498 mutex_lock(&mhi_ctx->mhi_lock);
1499 /* Check for interrupts */
1500 mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value);
1501
1502 if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001503 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001504 "processing ctrl interrupt with %d\n", int_value);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001505 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001506 if (rc) {
1507 pr_err("%s: get mhi state failed\n", __func__);
1508 mutex_unlock(&mhi_ctx->mhi_lock);
1509 return;
1510 }
1511
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001512 if (mhi_reset) {
1513 mhi_log(MHI_MSG_VERBOSE,
1514 "processing mhi device reset\n");
1515 rc = mhi_dev_abort(mhi);
1516 if (rc)
1517 pr_err("device reset failed:%d\n", rc);
1518 mutex_unlock(&mhi_ctx->mhi_lock);
1519 queue_work(mhi->ring_init_wq, &mhi->re_init);
1520 return;
1521 }
1522
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001523 rc = mhi_dev_get_event_notify(state, &event);
1524 if (rc) {
1525 pr_err("unsupported state :%d\n", state);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001526 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001527 }
1528
1529 rc = mhi_dev_notify_sm_event(event);
1530 if (rc) {
1531 pr_err("error sending SM event\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001532 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001533 }
1534 }
1535
1536 if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001537 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001538 "processing cmd db interrupt with %d\n", int_value);
1539 ring = &mhi->ring[MHI_RING_CMD_ID];
1540 ring->state = RING_STATE_PENDING;
1541 queue_work(mhi->pending_ring_wq, &mhi->pending_work);
1542 }
1543
1544 /* get the specific channel interrupts */
1545 mhi_dev_check_channel_interrupt(mhi);
1546
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001547fail:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001548 mutex_unlock(&mhi_ctx->mhi_lock);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001549
1550 if (mhi->config_iatu || mhi->mhi_int)
1551 enable_irq(mhi->mhi_irq);
1552 else
1553 ep_pcie_mask_irq_event(mhi->phandle,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001554 EP_PCIE_INT_EVT_MHI_A7, true);
1555}
1556
1557void mhi_dev_notify_a7_event(struct mhi_dev *mhi)
1558{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001559
1560 if (!atomic_read(&mhi->mhi_dev_wake)) {
1561 pm_stay_awake(mhi->dev);
1562 atomic_set(&mhi->mhi_dev_wake, 1);
1563 }
1564 mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock\n");
1565
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001566 schedule_work(&mhi->chdb_ctrl_work);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001567 mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001568}
1569EXPORT_SYMBOL(mhi_dev_notify_a7_event);
1570
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001571static irqreturn_t mhi_dev_isr(int irq, void *dev_id)
1572{
1573 struct mhi_dev *mhi = dev_id;
1574
1575 disable_irq_nosync(mhi->mhi_irq);
1576 schedule_work(&mhi->chdb_ctrl_work);
1577 mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
1578
1579 return IRQ_HANDLED;
1580}
1581
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001582int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi)
1583{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001584 struct ep_pcie_iatu control, data;
1585 int rc = 0;
1586 struct ep_pcie_iatu entries[MHI_HOST_REGION_NUM];
1587
1588 data.start = mhi->data_base.device_pa;
1589 data.end = mhi->data_base.device_pa + mhi->data_base.size - 1;
1590 data.tgt_lower = HOST_ADDR_LSB(mhi->data_base.host_pa);
1591 data.tgt_upper = HOST_ADDR_MSB(mhi->data_base.host_pa);
1592
1593 control.start = mhi->ctrl_base.device_pa;
1594 control.end = mhi->ctrl_base.device_pa + mhi->ctrl_base.size - 1;
1595 control.tgt_lower = HOST_ADDR_LSB(mhi->ctrl_base.host_pa);
1596 control.tgt_upper = HOST_ADDR_MSB(mhi->ctrl_base.host_pa);
1597
1598 entries[0] = data;
1599 entries[1] = control;
1600
1601 rc = ep_pcie_config_outbound_iatu(mhi_ctx->phandle, entries,
1602 MHI_HOST_REGION_NUM);
1603 if (rc) {
1604 pr_err("error configure iATU\n");
1605 return rc;
1606 }
1607
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001608 return 0;
1609}
1610EXPORT_SYMBOL(mhi_dev_config_outbound_iatu);
1611
1612static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
1613{
1614 int rc = 0;
1615 struct platform_device *pdev;
1616 uint64_t addr1 = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001617 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001618
1619 pdev = mhi->pdev;
1620
1621 /* Get host memory region configuration */
1622 mhi_dev_get_mhi_addr(mhi);
1623
1624 mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb,
1625 mhi->host_addr.ctrl_base_msb);
1626 mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb,
1627 mhi->host_addr.data_base_msb);
1628
1629 addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb,
1630 mhi->host_addr.ctrl_limit_msb);
1631 mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa;
1632 addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb,
1633 mhi->host_addr.data_limit_msb);
1634 mhi->data_base.size = addr1 - mhi->data_base.host_pa;
1635
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001636 if (mhi->config_iatu) {
1637 if (mhi->ctrl_base.host_pa > mhi->data_base.host_pa) {
1638 mhi->data_base.device_pa = mhi->device_local_pa_base;
1639 mhi->ctrl_base.device_pa = mhi->device_local_pa_base +
1640 mhi->ctrl_base.host_pa - mhi->data_base.host_pa;
1641 } else {
1642 mhi->ctrl_base.device_pa = mhi->device_local_pa_base;
1643 mhi->data_base.device_pa = mhi->device_local_pa_base +
1644 mhi->data_base.host_pa - mhi->ctrl_base.host_pa;
1645 }
1646
1647 if (!mhi->use_ipa) {
1648 mhi->ctrl_base.device_va =
1649 (uintptr_t) devm_ioremap_nocache(&pdev->dev,
1650 mhi->ctrl_base.device_pa,
1651 mhi->ctrl_base.size);
1652 if (!mhi->ctrl_base.device_va) {
1653 pr_err("io remap failed for mhi address\n");
1654 return -EINVAL;
1655 }
1656 }
1657 }
1658
1659 if (mhi->config_iatu) {
1660 rc = mhi_dev_config_outbound_iatu(mhi);
1661 if (rc) {
1662 pr_err("Configuring iATU failed\n");
1663 return rc;
1664 }
1665 }
1666
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001667 /* Get Channel, event and command context base pointer */
1668 rc = mhi_dev_mmio_get_chc_base(mhi);
1669 if (rc) {
1670 pr_err("Fetching channel context failed\n");
1671 return rc;
1672 }
1673
1674 rc = mhi_dev_mmio_get_erc_base(mhi);
1675 if (rc) {
1676 pr_err("Fetching event ring context failed\n");
1677 return rc;
1678 }
1679
1680 rc = mhi_dev_mmio_get_crc_base(mhi);
1681 if (rc) {
1682 pr_err("Fetching command ring context failed\n");
1683 return rc;
1684 }
1685
1686 rc = mhi_dev_update_ner(mhi);
1687 if (rc) {
1688 pr_err("Fetching NER failed\n");
1689 return rc;
1690 }
1691
1692 mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx);
1693 mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) *
1694 mhi->cfg.event_rings;
1695 mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
1696 mhi->cfg.channels;
1697
1698 mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
1699 sizeof(struct mhi_dev_cmd_ctx),
1700 &mhi->cmd_ctx_cache_dma_handle,
1701 GFP_KERNEL);
1702 if (!mhi->cmd_ctx_cache) {
1703 pr_err("no memory while allocating cmd ctx\n");
1704 return -ENOMEM;
1705 }
1706 memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx));
1707
1708 mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
1709 sizeof(struct mhi_dev_ev_ctx) *
1710 mhi->cfg.event_rings,
1711 &mhi->ev_ctx_cache_dma_handle,
1712 GFP_KERNEL);
1713 if (!mhi->ev_ctx_cache)
1714 return -ENOMEM;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001715 memset(mhi->ev_ctx_cache, 0, sizeof(struct mhi_dev_ev_ctx) *
1716 mhi->cfg.event_rings);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001717
1718 mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
1719 sizeof(struct mhi_dev_ch_ctx) *
1720 mhi->cfg.channels,
1721 &mhi->ch_ctx_cache_dma_handle,
1722 GFP_KERNEL);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001723 if (!mhi->ch_ctx_cache)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001724 return -ENOMEM;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001725 memset(mhi->ch_ctx_cache, 0, sizeof(struct mhi_dev_ch_ctx) *
1726 mhi->cfg.channels);
1727
1728 if (mhi->use_ipa) {
1729 data_transfer.phy_addr = mhi->cmd_ctx_cache_dma_handle;
1730 data_transfer.host_pa = mhi->cmd_ctx_shadow.host_pa;
1731 }
1732
1733 data_transfer.size = mhi->cmd_ctx_shadow.size;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001734
1735 /* Cache the command and event context */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001736 mhi_dev_read_from_host(mhi, &data_transfer);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001737
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001738 if (mhi->use_ipa) {
1739 data_transfer.phy_addr = mhi->ev_ctx_cache_dma_handle;
1740 data_transfer.host_pa = mhi->ev_ctx_shadow.host_pa;
1741 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001742
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001743 data_transfer.size = mhi->ev_ctx_shadow.size;
1744
1745 mhi_dev_read_from_host(mhi, &data_transfer);
1746
1747 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001748 "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
1749 mhi->cmd_ctx_cache->rbase,
1750 mhi->cmd_ctx_cache->rp,
1751 mhi->cmd_ctx_cache->wp);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001752 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001753 "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
1754 mhi_ctx->ev_ctx_cache->rbase,
1755 mhi->ev_ctx_cache->rp,
1756 mhi->ev_ctx_cache->wp);
1757
1758 rc = mhi_ring_start(&mhi->ring[0],
1759 (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);
1760 if (rc) {
1761 pr_err("error in ring start\n");
1762 return rc;
1763 }
1764
1765 return 0;
1766}
1767
1768int mhi_dev_suspend(struct mhi_dev *mhi)
1769{
1770 int ch_id = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001771 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001772
1773 mutex_lock(&mhi_ctx->mhi_write_test);
1774 atomic_set(&mhi->is_suspended, 1);
1775
1776 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1777 if (mhi->ch_ctx_cache[ch_id].ch_state !=
1778 MHI_DEV_CH_STATE_RUNNING)
1779 continue;
1780
1781 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED;
1782
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001783 if (mhi->use_ipa) {
1784 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001785 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001786 } else {
1787 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
1788 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1789 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
1790 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1791 }
1792
1793 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
1794 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001795
1796 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001797 mhi_dev_write_to_host(mhi, &data_transfer, NULL,
1798 MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001799
1800 }
1801
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001802 atomic_set(&mhi->mhi_dev_wake, 0);
1803 pm_relax(mhi->dev);
1804 mhi_log(MHI_MSG_VERBOSE, "releasing mhi wakelock\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001805
1806 mutex_unlock(&mhi_ctx->mhi_write_test);
1807
1808 return rc;
1809}
1810EXPORT_SYMBOL(mhi_dev_suspend);
1811
1812int mhi_dev_resume(struct mhi_dev *mhi)
1813{
1814 int ch_id = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001815 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001816
1817 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1818 if (mhi->ch_ctx_cache[ch_id].ch_state !=
1819 MHI_DEV_CH_STATE_SUSPENDED)
1820 continue;
1821
1822 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001823 if (mhi->use_ipa) {
1824 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001825 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001826 } else {
1827 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
1828 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1829 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
1830 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1831 }
1832
1833 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
1834 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001835
1836 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001837 mhi_dev_write_to_host(mhi, &data_transfer, NULL,
1838 MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001839 }
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001840 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001841
1842 atomic_set(&mhi->is_suspended, 0);
1843
1844 return rc;
1845}
1846EXPORT_SYMBOL(mhi_dev_resume);
1847
1848static int mhi_dev_ring_init(struct mhi_dev *dev)
1849{
1850 int i = 0;
1851
1852 mhi_log(MHI_MSG_INFO, "initializing all rings");
1853 dev->cmd_ring_idx = 0;
1854 dev->ev_ring_start = 1;
1855 dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings;
1856
1857 /* Initialize CMD ring */
1858 mhi_ring_init(&dev->ring[dev->cmd_ring_idx],
1859 RING_TYPE_CMD, dev->cmd_ring_idx);
1860
1861 mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx],
1862 mhi_dev_process_cmd_ring);
1863
1864 /* Initialize Event ring */
1865 for (i = dev->ev_ring_start; i < (dev->cfg.event_rings
1866 + dev->ev_ring_start); i++)
1867 mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i);
1868
1869 /* Initialize CH */
1870 for (i = dev->ch_ring_start; i < (dev->cfg.channels
1871 + dev->ch_ring_start); i++) {
1872 mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i);
1873 mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring);
1874 }
1875
1876
1877 return 0;
1878}
1879
1880int mhi_dev_open_channel(uint32_t chan_id,
1881 struct mhi_dev_client **handle_client,
1882 void (*mhi_dev_client_cb_reason)
1883 (struct mhi_dev_client_cb_reason *cb))
1884{
1885 int rc = 0;
1886 struct mhi_dev_channel *ch;
1887 struct platform_device *pdev;
1888
1889 pdev = mhi_ctx->pdev;
1890 ch = &mhi_ctx->ch[chan_id];
1891
1892 mutex_lock(&ch->ch_lock);
1893
1894 if (ch->active_client) {
1895 mhi_log(MHI_MSG_ERROR,
1896 "Channel (%d) already opened by client\n", chan_id);
1897 rc = -EINVAL;
1898 goto exit;
1899 }
1900
1901 /* Initialize the channel, client and state information */
1902 *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL);
1903 if (!(*handle_client)) {
1904 dev_err(&pdev->dev, "can not allocate mhi_dev memory\n");
1905 rc = -ENOMEM;
1906 goto exit;
1907 }
1908
1909 ch->active_client = (*handle_client);
1910 (*handle_client)->channel = ch;
1911 (*handle_client)->event_trigger = mhi_dev_client_cb_reason;
1912
1913 if (ch->state == MHI_DEV_CH_UNINT) {
1914 ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start];
1915 ch->state = MHI_DEV_CH_PENDING_START;
1916 } else if (ch->state == MHI_DEV_CH_CLOSED)
1917 ch->state = MHI_DEV_CH_STARTED;
1918 else if (ch->state == MHI_DEV_CH_STOPPED)
1919 ch->state = MHI_DEV_CH_PENDING_START;
1920
1921exit:
1922 mutex_unlock(&ch->ch_lock);
1923 return rc;
1924}
1925EXPORT_SYMBOL(mhi_dev_open_channel);
1926
1927int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
1928{
1929 struct mhi_dev_channel *ch;
1930 int rc;
1931
1932 ch = handle->channel;
1933
1934 rc = ch->ring->rd_offset == ch->ring->wr_offset;
1935
1936 return rc;
1937}
1938EXPORT_SYMBOL(mhi_dev_channel_isempty);
1939
1940int mhi_dev_close_channel(struct mhi_dev_client *handle)
1941{
1942 struct mhi_dev_channel *ch;
1943 int rc = 0;
1944
1945 ch = handle->channel;
1946
1947 mutex_lock(&ch->ch_lock);
1948 if (ch->state != MHI_DEV_CH_PENDING_START) {
1949 if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
1950 !mhi_dev_channel_isempty(handle)) {
1951 mhi_log(MHI_MSG_ERROR,
1952 "Trying to close an active channel (%d)\n",
1953 ch->ch_id);
1954 mutex_unlock(&ch->ch_lock);
1955 rc = -EAGAIN;
1956 goto exit;
1957 } else if (ch->tre_loc) {
1958 mhi_log(MHI_MSG_ERROR,
1959 "Trying to close channel (%d) when a TRE is active",
1960 ch->ch_id);
1961 mutex_unlock(&ch->ch_lock);
1962 rc = -EAGAIN;
1963 goto exit;
1964 }
1965 }
1966
1967 ch->state = MHI_DEV_CH_CLOSED;
1968 ch->active_client = NULL;
1969 kfree(handle);
1970exit:
1971 mutex_unlock(&ch->ch_lock);
1972 return rc;
1973}
1974EXPORT_SYMBOL(mhi_dev_close_channel);
1975
1976static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch,
1977 struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el,
1978 uint32_t *chain)
1979{
1980 uint32_t td_done = 0;
1981
1982 /*
1983 * A full TRE worth of data was consumed.
1984 * Check if we are at a TD boundary.
1985 */
1986 if (ch->tre_bytes_left == 0) {
1987 if (el->tre.chain) {
1988 if (el->tre.ieob)
1989 mhi_dev_send_completion_event(ch,
1990 ring->rd_offset, el->tre.len,
1991 MHI_CMD_COMPL_CODE_EOB);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001992 *chain = 1;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001993 } else {
1994 if (el->tre.ieot)
1995 mhi_dev_send_completion_event(
1996 ch, ring->rd_offset, el->tre.len,
1997 MHI_CMD_COMPL_CODE_EOT);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001998 td_done = 1;
1999 *chain = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002000 }
2001 mhi_dev_ring_inc_index(ring, ring->rd_offset);
2002 ch->tre_bytes_left = 0;
2003 ch->tre_loc = 0;
2004 }
2005
2006 return td_done;
2007}
2008
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002009int mhi_dev_read_channel(struct mhi_req *mreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002010{
2011 struct mhi_dev_channel *ch;
2012 struct mhi_dev_ring *ring;
2013 union mhi_dev_ring_element_type *el;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002014 size_t bytes_to_read, addr_offset;
2015 uint64_t read_from_loc;
2016 ssize_t bytes_read = 0;
2017 uint32_t write_to_loc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002018 size_t usr_buf_remaining;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002019 int td_done = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002020 struct mhi_dev_client *handle_client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002021
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002022 if (!mreq) {
2023 mhi_log(MHI_MSG_ERROR, "invalid mhi request\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002024 return -ENXIO;
2025 }
2026
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002027 if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2028 pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info);
2029 return -ENODEV;
2030 }
2031
2032 if (!mreq->client) {
2033 mhi_log(MHI_MSG_ERROR, "invalid mhi request\n");
2034 return -ENXIO;
2035 }
2036 handle_client = mreq->client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002037 ch = handle_client->channel;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002038 usr_buf_remaining = mreq->len;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002039 ring = ch->ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002040 mreq->chain = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002041
2042 mutex_lock(&ch->ch_lock);
2043
2044 do {
2045 el = &ring->ring_cache[ring->rd_offset];
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002046 mhi_log(MHI_MSG_VERBOSE, "evtptr : 0x%llx\n",
2047 el->tre.data_buf_ptr);
2048 mhi_log(MHI_MSG_VERBOSE, "evntlen : 0x%x, offset:%d\n",
2049 el->tre.len, ring->rd_offset);
2050
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002051 if (ch->tre_loc) {
2052 bytes_to_read = min(usr_buf_remaining,
2053 ch->tre_bytes_left);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002054 mreq->chain = 1;
2055 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002056 "remaining buffered data size %d\n",
2057 (int) ch->tre_bytes_left);
2058 } else {
2059 if (ring->rd_offset == ring->wr_offset) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002060 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002061 "nothing to read, returning\n");
2062 bytes_read = 0;
2063 goto exit;
2064 }
2065
2066 if (ch->state == MHI_DEV_CH_STOPPED) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002067 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002068 "channel (%d) already stopped\n",
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002069 mreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002070 bytes_read = -1;
2071 goto exit;
2072 }
2073
2074 ch->tre_loc = el->tre.data_buf_ptr;
2075 ch->tre_size = el->tre.len;
2076 ch->tre_bytes_left = ch->tre_size;
2077
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002078 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002079 "user_buf_remaining %d, ch->tre_size %d\n",
2080 usr_buf_remaining, ch->tre_size);
2081 bytes_to_read = min(usr_buf_remaining, ch->tre_size);
2082 }
2083
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002084 bytes_read += bytes_to_read;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002085 addr_offset = ch->tre_size - ch->tre_bytes_left;
2086 read_from_loc = ch->tre_loc + addr_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002087 write_to_loc = (uint32_t) mreq->buf +
2088 (mreq->len - usr_buf_remaining);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002089 ch->tre_bytes_left -= bytes_to_read;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002090 mreq->el = el;
2091 mreq->actual_len = bytes_read;
2092 mreq->rd_offset = ring->rd_offset;
2093 mhi_log(MHI_MSG_VERBOSE, "reading %d bytes from chan %d\n",
2094 bytes_to_read, mreq->chan);
2095 rc = mhi_transfer_host_to_device((void *) write_to_loc,
2096 read_from_loc, bytes_to_read, mhi_ctx, mreq);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002097 if (rc) {
2098 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002099 "Error while reading chan (%d) rc %d\n",
2100 mreq->chan, rc);
2101 mutex_unlock(&ch->ch_lock);
2102 return rc;
2103 }
2104 usr_buf_remaining -= bytes_to_read;
2105
2106 if (mreq->mode == IPA_DMA_ASYNC) {
2107 ch->tre_bytes_left = 0;
2108 ch->tre_loc = 0;
2109 goto exit;
2110 } else {
2111 td_done = mhi_dev_check_tre_bytes_left(ch, ring,
2112 el, &mreq->chain);
2113 }
2114 } while (usr_buf_remaining && !td_done);
2115 if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) {
2116 ch->state = MHI_DEV_CH_STOPPED;
2117 rc = mhi_dev_process_stop_cmd(ring, mreq->chan, mhi_ctx);
2118 if (rc) {
2119 mhi_log(MHI_MSG_ERROR,
2120 "Error while stopping channel (%d)\n",
2121 mreq->chan);
2122 bytes_read = -EIO;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002123 }
2124 }
2125exit:
2126 mutex_unlock(&ch->ch_lock);
2127 return bytes_read;
2128}
2129EXPORT_SYMBOL(mhi_dev_read_channel);
2130
2131static void skip_to_next_td(struct mhi_dev_channel *ch)
2132{
2133 struct mhi_dev_ring *ring = ch->ring;
2134 union mhi_dev_ring_element_type *el;
2135 uint32_t td_boundary_reached = 0;
2136
2137 ch->skip_td = 1;
2138 el = &ring->ring_cache[ring->rd_offset];
2139 while (ring->rd_offset != ring->wr_offset) {
2140 if (td_boundary_reached) {
2141 ch->skip_td = 0;
2142 break;
2143 }
2144 if (!el->tre.chain)
2145 td_boundary_reached = 1;
2146 mhi_dev_ring_inc_index(ring, ring->rd_offset);
2147 el = &ring->ring_cache[ring->rd_offset];
2148 }
2149}
2150
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002151int mhi_dev_write_channel(struct mhi_req *wreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002152{
2153 struct mhi_dev_channel *ch;
2154 struct mhi_dev_ring *ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002155 struct mhi_dev_client *handle_client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002156 union mhi_dev_ring_element_type *el;
2157 enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
2158 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002159 uint64_t skip_tres = 0, write_to_loc;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002160 uint32_t read_from_loc;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002161 size_t usr_buf_remaining;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002162 size_t usr_buf_offset = 0;
2163 size_t bytes_to_write = 0;
2164 size_t bytes_written = 0;
2165 uint32_t tre_len = 0, suspend_wait_timeout = 0;
2166
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002167 if (!wreq || !wreq->client || !wreq->buf) {
2168 pr_err("%s: invalid parameters\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002169 return -ENXIO;
2170 }
2171
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002172 if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2173 pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info);
2174 return -ENODEV;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002175 }
2176
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002177 usr_buf_remaining = wreq->len;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002178 mutex_lock(&mhi_ctx->mhi_write_test);
2179
2180 if (atomic_read(&mhi_ctx->is_suspended)) {
2181 /*
2182 * Expected usage is when there is a write
2183 * to the MHI core -> notify SM.
2184 */
2185 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP);
2186 if (rc) {
2187 pr_err("error sending core wakeup event\n");
2188 mutex_unlock(&mhi_ctx->mhi_write_test);
2189 return rc;
2190 }
2191 }
2192
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002193 while (atomic_read(&mhi_ctx->is_suspended) &&
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07002194 suspend_wait_timeout < MHI_WAKEUP_TIMEOUT_CNT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002195 /* wait for the suspend to finish */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002196 msleep(MHI_SUSPEND_MIN);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002197 suspend_wait_timeout++;
2198 }
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07002199
2200 if (suspend_wait_timeout >= MHI_WAKEUP_TIMEOUT_CNT ||
2201 mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2202 pr_err("Failed to wake up core\n");
2203 mutex_unlock(&mhi_ctx->mhi_write_test);
2204 return -ENODEV;
2205 }
2206
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002207 handle_client = wreq->client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002208 ch = handle_client->channel;
2209 ch->wr_request_active = true;
2210
2211 ring = ch->ring;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002212
2213 mutex_lock(&ch->ch_lock);
2214
2215 if (ch->state == MHI_DEV_CH_STOPPED) {
2216 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002217 "channel %d already stopped\n", wreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002218 bytes_written = -1;
2219 goto exit;
2220 }
2221
2222 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002223 if (mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx) < 0)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002224 bytes_written = -1;
2225 goto exit;
2226 }
2227
2228 if (ch->skip_td)
2229 skip_to_next_td(ch);
2230
2231 do {
2232 if (ring->rd_offset == ring->wr_offset) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002233 mhi_log(MHI_MSG_ERROR,
2234 "%s():rd & wr offsets are equal\n",
2235 __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002236 mhi_log(MHI_MSG_INFO, "No TREs available\n");
2237 break;
2238 }
2239
2240 el = &ring->ring_cache[ring->rd_offset];
2241 tre_len = el->tre.len;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002242 if (wreq->len > tre_len) {
2243 pr_err("%s(): rlen = %d, tlen = %d: client buf > tre len\n",
2244 __func__, wreq->len, tre_len);
2245 bytes_written = -ENOMEM;
2246 goto exit;
2247 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002248
2249 bytes_to_write = min(usr_buf_remaining, tre_len);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002250 usr_buf_offset = wreq->len - bytes_to_write;
2251 read_from_loc = (uint32_t) wreq->buf + usr_buf_offset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002252 write_to_loc = el->tre.data_buf_ptr;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002253 wreq->rd_offset = ring->rd_offset;
2254 wreq->el = el;
2255 rc = mhi_transfer_device_to_host(write_to_loc,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002256 (void *) read_from_loc,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002257 bytes_to_write,
2258 mhi_ctx, wreq);
2259 if (rc) {
2260 mhi_log(MHI_MSG_ERROR,
2261 "Error while writing chan (%d) rc %d\n",
2262 wreq->chan, rc);
2263 goto exit;
2264 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002265 bytes_written += bytes_to_write;
2266 usr_buf_remaining -= bytes_to_write;
2267
2268 if (usr_buf_remaining) {
2269 if (!el->tre.chain)
2270 code = MHI_CMD_COMPL_CODE_OVERFLOW;
2271 else if (el->tre.ieob)
2272 code = MHI_CMD_COMPL_CODE_EOB;
2273 } else {
2274 if (el->tre.chain)
2275 skip_tres = 1;
2276 code = MHI_CMD_COMPL_CODE_EOT;
2277 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002278 if (wreq->mode == IPA_DMA_SYNC) {
2279 rc = mhi_dev_send_completion_event(ch,
2280 ring->rd_offset, bytes_to_write, code);
2281 if (rc)
2282 mhi_log(MHI_MSG_VERBOSE,
2283 "err in snding cmpl evt ch:%d\n",
2284 wreq->chan);
2285 mhi_dev_ring_inc_index(ring, ring->rd_offset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002286 }
2287
2288 if (ch->state == MHI_DEV_CH_PENDING_STOP)
2289 break;
2290
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002291 } while (!skip_tres && usr_buf_remaining);
2292
2293 if (skip_tres)
2294 skip_to_next_td(ch);
2295
2296 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002297 rc = mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002298 if (rc) {
2299 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002300 "channel %d stop failed\n", wreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002301 }
2302 }
2303exit:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002304 ch->wr_request_active = false;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002305 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002306 mutex_unlock(&mhi_ctx->mhi_write_test);
2307 return bytes_written;
2308}
2309EXPORT_SYMBOL(mhi_dev_write_channel);
2310
2311static void mhi_dev_enable(struct work_struct *work)
2312{
2313 int rc = 0;
2314 struct ep_pcie_msi_config msi_cfg;
2315 struct mhi_dev *mhi = container_of(work,
2316 struct mhi_dev, ring_init_cb_work);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002317 bool mhi_reset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002318 enum mhi_dev_state state;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002319 uint32_t max_cnt = 0, bhi_intvec = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002320
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002321 if (mhi->use_ipa) {
2322 rc = ipa_dma_init();
2323 if (rc) {
2324 pr_err("ipa dma init failed\n");
2325 return;
2326 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002327
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002328 rc = ipa_dma_enable();
2329 if (rc) {
2330 pr_err("ipa enable failed\n");
2331 return;
2332 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002333 }
2334
2335 rc = mhi_dev_ring_init(mhi);
2336 if (rc) {
2337 pr_err("MHI dev ring init failed\n");
2338 return;
2339 }
2340
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002341 /*Enable MHI dev network stack Interface*/
2342 rc = mhi_dev_net_interface_init();
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002343 if (rc)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002344 pr_err("%s Failed to initialize mhi_dev_net iface\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002345
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002346 rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
2347 if (rc)
2348 return;
2349
2350 if (bhi_intvec != 0xffffffff) {
2351 /* Indicate the host that the device is ready */
2352 rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg);
2353 if (!rc) {
2354 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, bhi_intvec);
2355 if (rc) {
2356 pr_err("%s: error sending msi\n", __func__);
2357 return;
2358 }
2359 } else {
2360 pr_err("MHI: error geting msi configs\n");
2361 }
2362 }
2363
2364 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002365 if (rc) {
2366 pr_err("%s: get mhi state failed\n", __func__);
2367 return;
2368 }
2369
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002370 while (state != MHI_DEV_M0_STATE && max_cnt < MHI_SUSPEND_TIMEOUT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002371 /* Wait for Host to set the M0 state */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002372 msleep(MHI_SUSPEND_MIN);
2373 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002374 if (rc) {
2375 pr_err("%s: get mhi state failed\n", __func__);
2376 return;
2377 }
2378 max_cnt++;
2379 }
2380
2381 mhi_log(MHI_MSG_INFO, "state:%d\n", state);
2382
2383 if (state == MHI_DEV_M0_STATE) {
2384 rc = mhi_dev_cache_host_cfg(mhi);
2385 if (rc) {
2386 pr_err("Failed to cache the host config\n");
2387 return;
2388 }
2389
2390 rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
2391 if (rc) {
2392 pr_err("%s: env setting failed\n", __func__);
2393 return;
2394 }
2395 } else {
2396 pr_err("MHI device failed to enter M0\n");
2397 return;
2398 }
2399
2400 rc = mhi_hwc_init(mhi_ctx);
2401 if (rc) {
2402 pr_err("error during hwc_init\n");
2403 return;
2404 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002405
2406 if (mhi_ctx->config_iatu || mhi_ctx->mhi_int)
2407 enable_irq(mhi_ctx->mhi_irq);
2408
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002409 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002410}
2411
2412static void mhi_ring_init_cb(void *data)
2413{
2414 struct mhi_dev *mhi = data;
2415
2416 if (!mhi) {
2417 pr_err("Invalid MHI ctx\n");
2418 return;
2419 }
2420
2421 queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work);
2422}
2423
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002424int mhi_register_state_cb(void (*mhi_state_cb)
2425 (struct mhi_dev_client_cb_data *cb_data),
2426 void *data, enum mhi_client_channel channel)
2427{
2428 struct mhi_dev_ready_cb_info *cb_info = NULL;
2429
2430 if (!mhi_ctx) {
2431 pr_err("MHI device not ready\n");
2432 return -ENXIO;
2433 }
2434
2435 if (channel > MHI_MAX_CHANNELS) {
2436 pr_err("Invalid channel :%d\n", channel);
2437 return -EINVAL;
2438 }
2439
2440 mutex_lock(&mhi_ctx->mhi_lock);
2441 cb_info = kmalloc(sizeof(struct mhi_dev_ready_cb_info), GFP_KERNEL);
2442 if (!cb_info) {
2443 mutex_unlock(&mhi_ctx->mhi_lock);
2444 return -ENOMEM;
2445 }
2446
2447 cb_info->cb = mhi_state_cb;
2448 cb_info->cb_data.user_data = data;
2449 cb_info->cb_data.channel = channel;
2450
2451 list_add_tail(&cb_info->list, &mhi_ctx->client_cb_list);
2452
2453 /**
2454 * If channel is open during registration, no callback is issued.
2455 * Instead return -EEXIST to notify the client. Clients request
2456 * is added to the list to notify future state change notification.
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302457 * Channel struct may not be allocated yet if this function is called
2458 * early during boot - add an explicit check for non-null "ch".
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002459 */
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302460 if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002461 mutex_unlock(&mhi_ctx->mhi_lock);
2462 return -EEXIST;
2463 }
2464
2465 mutex_unlock(&mhi_ctx->mhi_lock);
2466
2467 return 0;
2468}
2469EXPORT_SYMBOL(mhi_register_state_cb);
2470
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002471static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002472{
2473 struct mhi_dev_client_cb_reason reason;
2474
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002475 if (uevent_idx == MHI_DEV_UEVENT_CTRL)
2476 mhi_ctx->ctrl_info = info;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002477
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002478 channel_state_info[uevent_idx].ctrl_info = info;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002479
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002480 if (uevent_idx == MHI_CLIENT_QMI_OUT ||
2481 uevent_idx == MHI_CLIENT_QMI_IN) {
2482 /* For legacy reasons for QTI client */
2483 reason.reason = MHI_DEV_CTRL_UPDATE;
2484 uci_ctrl_update(&reason);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002485 }
2486
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002487}
2488
2489int mhi_ctrl_state_info(uint32_t idx, uint32_t *info)
2490{
2491 if (idx == MHI_DEV_UEVENT_CTRL)
2492 *info = mhi_ctx->ctrl_info;
2493 else
2494 *info = channel_state_info[idx].ctrl_info;
2495
2496 mhi_log(MHI_MSG_VERBOSE, "idx:%d, ctrl:%d", idx, *info);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002497
2498 return 0;
2499}
2500EXPORT_SYMBOL(mhi_ctrl_state_info);
2501
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002502static int get_device_tree_data(struct platform_device *pdev)
2503{
2504 struct mhi_dev *mhi;
2505 int rc = 0;
2506 struct resource *res_mem = NULL;
2507
2508 mhi = devm_kzalloc(&pdev->dev,
2509 sizeof(struct mhi_dev), GFP_KERNEL);
2510 if (!mhi)
2511 return -ENOMEM;
2512
2513 mhi->pdev = pdev;
2514 mhi->dev = &pdev->dev;
2515 res_mem = platform_get_resource_byname(pdev,
2516 IORESOURCE_MEM, "mhi_mmio_base");
2517 if (!res_mem) {
2518 rc = -EINVAL;
2519 pr_err("Request MHI MMIO physical memory region failed\n");
2520 return rc;
2521 }
2522
2523 mhi->mmio_base_pa_addr = res_mem->start;
2524 mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE);
2525 if (!mhi->mmio_base_addr) {
2526 pr_err("Failed to IO map MMIO registers.\n");
2527 rc = -EINVAL;
2528 return rc;
2529 }
2530
2531 res_mem = platform_get_resource_byname(pdev,
2532 IORESOURCE_MEM, "ipa_uc_mbox_crdb");
2533 if (!res_mem) {
2534 rc = -EINVAL;
2535 pr_err("Request IPA_UC_MBOX CRDB physical region failed\n");
2536 return rc;
2537 }
2538
2539 mhi->ipa_uc_mbox_crdb = res_mem->start;
2540
2541 res_mem = platform_get_resource_byname(pdev,
2542 IORESOURCE_MEM, "ipa_uc_mbox_erdb");
2543 if (!res_mem) {
2544 rc = -EINVAL;
2545 pr_err("Request IPA_UC_MBOX ERDB physical region failed\n");
2546 return rc;
2547 }
2548
2549 mhi->ipa_uc_mbox_erdb = res_mem->start;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002550 mhi_ctx = mhi;
2551
2552 rc = of_property_read_u32((&pdev->dev)->of_node,
2553 "qcom,mhi-ifc-id",
2554 &mhi_ctx->ifc_id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002555 if (rc) {
2556 pr_err("qcom,mhi-ifc-id does not exist.\n");
2557 return rc;
2558 }
2559
2560 rc = of_property_read_u32((&pdev->dev)->of_node,
2561 "qcom,mhi-ep-msi",
2562 &mhi_ctx->mhi_ep_msi_num);
2563 if (rc) {
2564 pr_err("qcom,mhi-ep-msi does not exist.\n");
2565 return rc;
2566 }
2567
2568 rc = of_property_read_u32((&pdev->dev)->of_node,
2569 "qcom,mhi-version",
2570 &mhi_ctx->mhi_version);
2571 if (rc) {
2572 pr_err("qcom,mhi-version does not exist.\n");
2573 return rc;
2574 }
2575
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002576 mhi_ctx->use_ipa = of_property_read_bool((&pdev->dev)->of_node,
2577 "qcom,use-ipa-software-channel");
2578
2579 mhi_ctx->config_iatu = of_property_read_bool((&pdev->dev)->of_node,
2580 "qcom,mhi-config-iatu");
2581
2582 if (mhi_ctx->config_iatu) {
2583 rc = of_property_read_u32((&pdev->dev)->of_node,
2584 "qcom,mhi-local-pa-base",
2585 &mhi_ctx->device_local_pa_base);
2586 if (rc) {
2587 pr_err("qcom,mhi-local-pa-base does not exist\n");
2588 return rc;
2589 }
2590 }
2591
2592 mhi_ctx->mhi_int = of_property_read_bool((&pdev->dev)->of_node,
2593 "qcom,mhi-interrupt");
2594
2595 if (mhi->config_iatu || mhi_ctx->mhi_int) {
2596 mhi->mhi_irq = platform_get_irq_byname(pdev, "mhi-device-inta");
2597 if (mhi->mhi_irq < 0) {
2598 pr_err("Invalid MHI device interrupt\n");
2599 rc = mhi->mhi_irq;
2600 return rc;
2601 }
2602 }
2603
2604 device_init_wakeup(mhi->dev, true);
2605 /* MHI device will be woken up from PCIe event */
2606 device_set_wakeup_capable(mhi->dev, false);
2607 /* Hold a wakelock until completion of M0 */
2608 pm_stay_awake(mhi->dev);
2609 atomic_set(&mhi->mhi_dev_wake, 1);
2610
2611 mhi_log(MHI_MSG_VERBOSE, "acquiring wakelock\n");
2612
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002613 return 0;
2614}
2615
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002616static int mhi_deinit(struct mhi_dev *mhi)
2617{
2618 int rc = 0, i = 0, ring_id = 0;
2619 struct mhi_dev_ring *ring;
2620 struct platform_device *pdev = mhi->pdev;
2621
2622 ring_id = mhi->cfg.channels + mhi->cfg.event_rings + 1;
2623
2624 for (i = 0; i < ring_id; i++) {
2625 ring = &mhi->ring[i];
2626 if (ring->state == RING_STATE_UINT)
2627 continue;
2628
2629 dma_free_coherent(mhi->dev, ring->ring_size *
2630 sizeof(union mhi_dev_ring_element_type),
2631 ring->ring_cache,
2632 ring->ring_cache_dma_handle);
2633 }
2634
2635 for (i = 0; i < mhi->cfg.channels; i++)
2636 mutex_destroy(&mhi->ch[i].ch_lock);
2637
2638 devm_kfree(&pdev->dev, mhi->mmio_backup);
2639 devm_kfree(&pdev->dev, mhi->ch);
2640 devm_kfree(&pdev->dev, mhi->ring);
2641
2642 mhi_dev_sm_exit(mhi);
2643
2644 mhi->mmio_initialized = false;
2645
2646 return rc;
2647}
2648
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002649static int mhi_init(struct mhi_dev *mhi)
2650{
2651 int rc = 0, i = 0;
2652 struct platform_device *pdev = mhi->pdev;
2653
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002654 rc = mhi_dev_mmio_init(mhi);
2655 if (rc) {
2656 pr_err("Failed to update the MMIO init\n");
2657 return rc;
2658 }
2659
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002660 mhi->ring = devm_kzalloc(&pdev->dev,
2661 (sizeof(struct mhi_dev_ring) *
2662 (mhi->cfg.channels + mhi->cfg.event_rings + 1)),
2663 GFP_KERNEL);
2664 if (!mhi->ring)
2665 return -ENOMEM;
2666
2667 mhi->ch = devm_kzalloc(&pdev->dev,
2668 (sizeof(struct mhi_dev_channel) *
2669 (mhi->cfg.channels)), GFP_KERNEL);
2670 if (!mhi->ch)
2671 return -ENOMEM;
2672
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002673
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002674 for (i = 0; i < mhi->cfg.channels; i++) {
2675 mutex_init(&mhi->ch[i].ch_lock);
2676 if (i == MHI_CLIENT_IP_SW_4_OUT || i == MHI_CLIENT_IP_SW_4_IN) {
2677 int nreq = 0;
2678
2679 INIT_LIST_HEAD(&mhi->ch[i].event_req_buffers);
2680 while (nreq < MHI_MAX_EVT_REQ) {
2681 struct event_req *ereq;
2682 /* Pre allocate event requests */
2683 ereq = kzalloc(sizeof(struct event_req),
2684 GFP_KERNEL);
2685 if (!ereq)
2686 return -ENOMEM;
2687
2688 /* pre allocate buffers to queue
2689 * transfer completion events
2690 */
2691 ereq->tr_events = kzalloc(RING_ELEMENT_TYPE_SZ*
2692 MAX_TR_EVENTS, GFP_KERNEL);
2693 if (!ereq->tr_events) {
2694 kfree(ereq);
2695 return -ENOMEM;
2696 }
2697 list_add_tail(&ereq->list,
2698 &mhi->ch[i].event_req_buffers);
2699 nreq++;
2700 }
2701 mhi->ch[i].curr_ereq =
2702 container_of(mhi->ch[i].event_req_buffers.next,
2703 struct event_req, list);
2704 list_del_init(&mhi->ch[i].curr_ereq->list);
2705 }
2706 }
2707
2708 spin_lock_init(&mhi->lock);
2709 mhi->mmio_backup = devm_kzalloc(&pdev->dev,
2710 MHI_DEV_MMIO_RANGE, GFP_KERNEL);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002711 if (!mhi->mmio_backup)
2712 return -ENOMEM;
2713
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002714 return 0;
2715}
2716
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002717static int mhi_dev_resume_mmio_mhi_reinit(struct mhi_dev *mhi_ctx)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002718{
2719 int rc = 0;
2720
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002721 mutex_lock(&mhi_ctx->mhi_lock);
2722 if (atomic_read(&mhi_ctx->re_init_done)) {
2723 mhi_log(MHI_MSG_INFO, "Re_init done, return\n");
2724 mutex_unlock(&mhi_ctx->mhi_lock);
2725 return 0;
2726 }
2727
2728 rc = mhi_init(mhi_ctx);
2729 if (rc) {
2730 pr_err("Error initializing MHI MMIO with %d\n", rc);
2731 goto fail;
2732 }
2733
2734 mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
2735 EP_PCIE_EVENT_PM_D3_COLD |
2736 EP_PCIE_EVENT_PM_D0 |
2737 EP_PCIE_EVENT_PM_RST_DEAST |
2738 EP_PCIE_EVENT_MHI_A7 |
2739 EP_PCIE_EVENT_LINKDOWN;
2740 mhi_ctx->event_reg.user = mhi_ctx;
2741 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
2742 mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
2743
2744 rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
2745 if (rc) {
2746 pr_err("Failed to register for events from PCIe\n");
2747 goto fail;
2748 }
2749
2750 rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
2751 if (rc < 0) {
2752 if (rc == -EEXIST) {
2753 mhi_ring_init_cb(mhi_ctx);
2754 } else {
2755 pr_err("Error calling IPA cb with %d\n", rc);
2756 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002757 }
2758 }
2759
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002760 /* Invoke MHI SM when device is in RESET state */
2761 rc = mhi_dev_sm_init(mhi_ctx);
2762 if (rc) {
2763 pr_err("%s: Error during SM init\n", __func__);
2764 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002765 }
2766
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002767 /* set the env before setting the ready bit */
2768 rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE);
2769 if (rc) {
2770 pr_err("%s: env setting failed\n", __func__);
2771 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002772 }
2773
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002774 /* All set, notify the host */
2775 rc = mhi_dev_sm_set_ready();
2776 if (rc) {
2777 pr_err("%s: unable to set ready bit\n", __func__);
2778 goto fail;
2779 }
2780
2781 atomic_set(&mhi_ctx->is_suspended, 0);
2782fail:
2783 atomic_set(&mhi_ctx->re_init_done, 1);
2784 mutex_unlock(&mhi_ctx->mhi_lock);
2785 return rc;
2786}
2787
2788static void mhi_dev_reinit(struct work_struct *work)
2789{
2790 struct mhi_dev *mhi_ctx = container_of(work,
2791 struct mhi_dev, re_init);
2792 enum ep_pcie_link_status link_state;
2793 int rc = 0;
2794
2795 link_state = ep_pcie_get_linkstatus(mhi_ctx->phandle);
2796 if (link_state == EP_PCIE_LINK_ENABLED) {
2797 /* PCIe link is up with BME set */
2798 rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx);
2799 if (rc) {
2800 pr_err("Failed to register for events from PCIe\n");
2801 return;
2802 }
2803 }
2804
2805 mhi_log(MHI_MSG_VERBOSE, "Wait for PCIe linkup\n");
2806}
2807
2808static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx)
2809{
2810 struct platform_device *pdev;
2811 int rc = 0;
2812
2813 pdev = mhi_ctx->pdev;
2814
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002815 INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
2816
2817 mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq",
2818 WQ_HIGHPRI, 0);
2819 if (!mhi_ctx->pending_ring_wq) {
2820 rc = -ENOMEM;
2821 return rc;
2822 }
2823
2824 INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending);
2825
2826 INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable);
2827
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002828 INIT_WORK(&mhi_ctx->re_init, mhi_dev_reinit);
2829
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002830 mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq",
2831 WQ_HIGHPRI, 0);
2832 if (!mhi_ctx->ring_init_wq) {
2833 rc = -ENOMEM;
2834 return rc;
2835 }
2836
2837 INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
2838 INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002839 mutex_init(&mhi_ctx->mhi_event_lock);
2840 mutex_init(&mhi_ctx->mhi_write_test);
2841
2842 rc = mhi_init(mhi_ctx);
2843 if (rc)
2844 return rc;
2845
2846 mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev,
2847 (TRB_MAX_DATA_SIZE * 4),
2848 &mhi_ctx->cache_dma_handle, GFP_KERNEL);
2849 if (!mhi_ctx->dma_cache)
2850 return -ENOMEM;
2851
2852 mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev,
2853 (TRB_MAX_DATA_SIZE * 4),
2854 &mhi_ctx->read_dma_handle,
2855 GFP_KERNEL);
2856 if (!mhi_ctx->read_handle)
2857 return -ENOMEM;
2858
2859 mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev,
2860 (TRB_MAX_DATA_SIZE * 24),
2861 &mhi_ctx->write_dma_handle,
2862 GFP_KERNEL);
2863 if (!mhi_ctx->write_handle)
2864 return -ENOMEM;
2865
2866 rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version);
2867 if (rc) {
2868 pr_err("Failed to update the MHI version\n");
2869 return rc;
2870 }
2871
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002872 mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
2873 if (!mhi_ctx->phandle) {
2874 pr_err("PCIe driver get handle failed.\n");
2875 return -EINVAL;
2876 }
2877
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002878 mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
2879 EP_PCIE_EVENT_PM_D3_COLD |
2880 EP_PCIE_EVENT_PM_D0 |
2881 EP_PCIE_EVENT_PM_RST_DEAST |
2882 EP_PCIE_EVENT_MHI_A7 |
2883 EP_PCIE_EVENT_LINKDOWN;
2884 mhi_ctx->event_reg.user = mhi_ctx;
2885 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
2886 mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
2887
2888 rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
2889 if (rc) {
2890 pr_err("Failed to register for events from PCIe\n");
2891 return rc;
2892 }
2893
2894 pr_err("Registering with IPA\n");
2895
2896 rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
2897 if (rc < 0) {
2898 if (rc == -EEXIST) {
2899 mhi_ring_init_cb(mhi_ctx);
2900 } else {
2901 pr_err("Error calling IPA cb with %d\n", rc);
2902 return rc;
2903 }
2904 }
2905
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002906 /* Invoke MHI SM when device is in RESET state */
2907 rc = mhi_dev_sm_init(mhi_ctx);
2908 if (rc) {
2909 pr_err("%s: Error during SM init\n", __func__);
2910 return rc;
2911 }
2912
2913 /* set the env before setting the ready bit */
2914 rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE);
2915 if (rc) {
2916 pr_err("%s: env setting failed\n", __func__);
2917 return rc;
2918 }
2919
2920 /* All set, notify the host */
2921 mhi_dev_sm_set_ready();
2922
2923 if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
2924 rc = devm_request_irq(&pdev->dev, mhi_ctx->mhi_irq, mhi_dev_isr,
2925 IRQF_TRIGGER_HIGH, "mhi_isr", mhi_ctx);
2926 if (rc) {
2927 dev_err(&pdev->dev, "request mhi irq failed %d\n", rc);
2928 return -EINVAL;
2929 }
2930
2931 disable_irq(mhi_ctx->mhi_irq);
2932 }
2933
2934 return 0;
2935}
2936
2937static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify)
2938{
2939 if (!notify || !notify->user) {
2940 pr_err("Null argument for notify\n");
2941 return;
2942 }
2943
2944 mhi_ctx = notify->user;
2945 mhi_dev_pcie_notify_event = notify->options;
2946 mhi_log(MHI_MSG_INFO,
2947 "PCIe event=0x%x\n", notify->options);
2948 queue_work(mhi_ctx->pcie_event_wq, &mhi_ctx->pcie_event);
2949}
2950
2951static void mhi_dev_pcie_handle_event(struct work_struct *work)
2952{
2953 struct mhi_dev *mhi_ctx = container_of(work, struct mhi_dev,
2954 pcie_event);
2955 int rc = 0;
2956
2957 if (mhi_dev_pcie_notify_event == MHI_INIT) {
2958 rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx);
2959 if (rc) {
2960 pr_err("Error during MHI device initialization\n");
2961 return;
2962 }
2963 } else if (mhi_dev_pcie_notify_event == MHI_REINIT) {
2964 rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx);
2965 if (rc) {
2966 pr_err("Error during MHI device re-initialization\n");
2967 return;
2968 }
2969 }
2970}
2971
2972static int mhi_dev_probe(struct platform_device *pdev)
2973{
2974 int rc = 0;
2975
2976 if (pdev->dev.of_node) {
2977 rc = get_device_tree_data(pdev);
2978 if (rc) {
2979 pr_err("Error reading MHI Dev DT\n");
2980 return rc;
2981 }
2982 mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
2983 "mhi", 0);
2984 if (mhi_ipc_log == NULL) {
2985 dev_err(&pdev->dev,
2986 "Failed to create IPC logging context\n");
2987 }
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302988 /*
2989 * The below list and mutex should be initialized
2990 * before calling mhi_uci_init to avoid crash in
2991 * mhi_register_state_cb when accessing these.
2992 */
2993 INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
2994 mutex_init(&mhi_ctx->mhi_lock);
2995
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002996 mhi_uci_init();
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002997 mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
2998 MHI_STATE_CONFIGURED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002999 }
3000
3001 INIT_WORK(&mhi_ctx->pcie_event, mhi_dev_pcie_handle_event);
3002 mhi_ctx->pcie_event_wq = alloc_workqueue("mhi_dev_pcie_event_wq",
3003 WQ_HIGHPRI, 0);
3004 if (!mhi_ctx->pcie_event_wq) {
3005 pr_err("no memory\n");
3006 rc = -ENOMEM;
3007 return rc;
3008 }
3009
3010 mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
3011 if (mhi_ctx->phandle) {
3012 /* PCIe link is already up */
3013 rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx);
3014 if (rc) {
3015 pr_err("Error during MHI device initialization\n");
3016 return rc;
3017 }
3018 } else {
3019 pr_debug("Register a PCIe callback\n");
3020 mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP;
3021 mhi_ctx->event_reg.user = mhi_ctx;
3022 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
3023 mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up;
3024 mhi_ctx->event_reg.options = MHI_INIT;
3025
3026 rc = ep_pcie_register_event(mhi_ctx->phandle,
3027 &mhi_ctx->event_reg);
3028 if (rc) {
3029 pr_err("Failed to register for events from PCIe\n");
3030 return rc;
3031 }
3032 }
3033
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003034 return 0;
3035}
3036
3037static int mhi_dev_remove(struct platform_device *pdev)
3038{
3039 platform_set_drvdata(pdev, NULL);
3040
3041 return 0;
3042}
3043
3044static const struct of_device_id mhi_dev_match_table[] = {
3045 { .compatible = "qcom,msm-mhi-dev" },
3046 {}
3047};
3048
3049static struct platform_driver mhi_dev_driver = {
3050 .driver = {
3051 .name = "qcom,msm-mhi-dev",
3052 .of_match_table = mhi_dev_match_table,
3053 },
3054 .probe = mhi_dev_probe,
3055 .remove = mhi_dev_remove,
3056};
3057
3058module_param(mhi_msg_lvl, uint, 0644);
3059module_param(mhi_ipc_msg_lvl, uint, 0644);
3060
3061MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl");
3062MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl");
3063
3064static int __init mhi_dev_init(void)
3065{
3066 return platform_driver_register(&mhi_dev_driver);
3067}
3068module_init(mhi_dev_init);
3069
3070static void __exit mhi_dev_exit(void)
3071{
3072 platform_driver_unregister(&mhi_dev_driver);
3073}
3074module_exit(mhi_dev_exit);
3075
3076MODULE_DESCRIPTION("MHI device driver");
3077MODULE_LICENSE("GPL v2");