blob: 3e577cfca98abab795943f614a189df5ae4364b4 [file] [log] [blame]
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/of.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/delay.h>
20#include <linux/mutex.h>
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/of_irq.h>
24#include <linux/interrupt.h>
25#include <linux/workqueue.h>
26#include <linux/completion.h>
27#include <linux/platform_device.h>
28#include <linux/msm_ep_pcie.h>
29#include <linux/ipa_mhi.h>
30#include <linux/vmalloc.h>
31
32#include "mhi.h"
33#include "mhi_hwio.h"
34#include "mhi_sm.h"
35
36/* Wait time on the device for Host to set M0 state */
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080037#define MHI_DEV_M0_MAX_CNT 30
38/* Wait time before suspend/resume is complete */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080039#define MHI_SUSPEND_MIN 100
40#define MHI_SUSPEND_TIMEOUT 600
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -070041#define MHI_WAKEUP_TIMEOUT_CNT 20
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080042#define MHI_MASK_CH_EV_LEN 32
43#define MHI_RING_CMD_ID 0
44#define MHI_RING_PRIMARY_EVT_ID 1
45#define MHI_1K_SIZE 0x1000
46/* Updated Specification for event start is NER - 2 and end - NER -1 */
47#define MHI_HW_ACC_EVT_RING_START 2
48#define MHI_HW_ACC_EVT_RING_END 1
49
50#define MHI_HOST_REGION_NUM 2
51
52#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1
53#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2
54
55#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32))
56#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF)
57#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF)
58
59#define MHI_IPC_LOG_PAGES (100)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080060#define MHI_REGLEN 0x100
61#define MHI_INIT 0
62#define MHI_REINIT 1
63
64#define TR_RING_ELEMENT_SZ sizeof(struct mhi_dev_transfer_ring_element)
65#define RING_ELEMENT_TYPE_SZ sizeof(union mhi_dev_ring_element_type)
66
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080067enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
68enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
69void *mhi_ipc_log;
70
71static struct mhi_dev *mhi_ctx;
72static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
73 unsigned long data);
74static void mhi_ring_init_cb(void *user_data);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -070075static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080076static int mhi_deinit(struct mhi_dev *mhi);
77static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify);
78static int mhi_dev_pcie_notify_event;
79static void mhi_dev_transfer_completion_cb(void *mreq);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -070080static struct mhi_dev_uevent_info channel_state_info[MHI_MAX_CHANNELS];
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080081
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080082/*
83 * mhi_dev_ring_cache_completion_cb () - Call back function called
84 * by IPA driver when ring element cache is done
85 *
86 * @req : ring cache request
87 */
88static void mhi_dev_ring_cache_completion_cb(void *req)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080089{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080090 struct ring_cache_req *ring_req = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080091
Siddartha Mohanadosse0954332018-01-15 14:03:03 -080092 if (req)
93 ring_req = (struct ring_cache_req *)req;
94 else {
95 pr_err("%s():ring cache req data is NULL\n", __func__);
96 return;
97 }
98 complete(ring_req->done);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -080099}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800100
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800101void mhi_dev_read_from_host(struct mhi_dev *mhi, struct mhi_addr *transfer)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800102{
103 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800104 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
105 struct ring_cache_req ring_req;
106
107 DECLARE_COMPLETION(done);
108
109 ring_req.done = &done;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800110
111 if (!mhi) {
112 pr_err("invalid MHI ctx\n");
113 return;
114 }
115
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800116 if (mhi->config_iatu) {
117 offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa;
118 /* Mapping the translated physical address on the device */
119 host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset;
120 } else {
121 host_addr_pa = transfer->host_pa | bit_40;
122 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800123
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800124 mhi_log(MHI_MSG_VERBOSE,
125 "device 0x%x <<-- host 0x%llx, size %d\n",
126 transfer->phy_addr, host_addr_pa,
127 (int) transfer->size);
128 rc = ipa_dma_async_memcpy((u64)transfer->phy_addr, host_addr_pa,
129 (int)transfer->size,
130 mhi_dev_ring_cache_completion_cb, &ring_req);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800131 if (rc)
132 pr_err("error while reading from host:%d\n", rc);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800133
134 wait_for_completion(&done);
135}
136EXPORT_SYMBOL(mhi_dev_read_from_host);
137
138void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *transfer,
139 struct event_req *ereq, enum mhi_dev_transfer_type tr_type)
140{
141 int rc = 0;
142 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
143 dma_addr_t dma;
144
145 if (!mhi) {
146 pr_err("invalid MHI ctx\n");
147 return;
148 }
149 if (mhi->config_iatu) {
150 offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa;
151 /* Mapping the translated physical address on the device */
152 host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset;
153 } else {
154 host_addr_pa = transfer->host_pa | bit_40;
155 }
156
157 mhi_log(MHI_MSG_VERBOSE,
158 "device 0x%llx --> host 0x%llx, size %d\n",
159 (uint64_t) mhi->cache_dma_handle, host_addr_pa,
160 (int) transfer->size);
161 if (tr_type == MHI_DEV_DMA_ASYNC) {
162 dma = dma_map_single(&mhi->pdev->dev,
163 transfer->virt_addr, transfer->size,
164 DMA_TO_DEVICE);
165 if (ereq->event_type == SEND_EVENT_BUFFER) {
166 ereq->dma = dma;
167 ereq->dma_len = transfer->size;
168 } else if (ereq->event_type == SEND_EVENT_RD_OFFSET) {
169 ereq->event_rd_dma = dma;
170 }
171 rc = ipa_dma_async_memcpy(host_addr_pa, (uint64_t) dma,
172 (int)transfer->size,
173 ereq->client_cb, ereq);
174 if (rc)
175 pr_err("error while writing to host:%d\n", rc);
176 } else if (tr_type == MHI_DEV_DMA_SYNC) {
177 /* Copy the device content to a local device
178 * physical address.
179 */
180 memcpy(mhi->dma_cache, transfer->virt_addr,
181 transfer->size);
182 rc = ipa_dma_sync_memcpy(host_addr_pa,
183 (u64) mhi->cache_dma_handle,
184 (int) transfer->size);
185 if (rc)
186 pr_err("error while writing to host:%d\n", rc);
187 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800188}
189EXPORT_SYMBOL(mhi_dev_write_to_host);
190
191int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800192 struct mhi_dev *mhi, struct mhi_req *mreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800193{
194 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800195 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
196 struct mhi_dev_ring *ring = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800197
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800198
199 if (!mhi || !dev || !host_pa || !mreq) {
200 pr_err("%s():Invalid parameters\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800201 return -EINVAL;
202 }
203
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800204 if (mhi->config_iatu) {
205 offset = (uint64_t)host_pa - mhi->data_base.host_pa;
206 /* Mapping the translated physical address on the device */
207 host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset;
208 } else {
209 host_addr_pa = host_pa | bit_40;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800210 }
211
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800212 mhi_log(MHI_MSG_VERBOSE, "device 0x%llx <-- host 0x%llx, size %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800213 (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800214
215 if (mreq->mode == IPA_DMA_SYNC) {
216 rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle,
217 host_addr_pa, (int) len);
218 if (rc) {
219 pr_err("error while reading chan using sync:%d\n", rc);
220 return rc;
221 }
222 memcpy(dev, mhi->read_handle, len);
223 } else if (mreq->mode == IPA_DMA_ASYNC) {
224 ring = mreq->client->channel->ring;
225 mreq->dma = dma_map_single(&mhi->pdev->dev, dev, len,
226 DMA_FROM_DEVICE);
227 mhi_dev_ring_inc_index(ring, ring->rd_offset);
228
229 if (ring->rd_offset == ring->wr_offset)
230 mreq->snd_cmpl = 1;
231 else
232 mreq->snd_cmpl = 0;
233 rc = ipa_dma_async_memcpy(mreq->dma, host_addr_pa,
234 (int) len, mhi_dev_transfer_completion_cb,
235 mreq);
236 if (rc) {
237 pr_err("error while reading chan using async:%d\n", rc);
238 return rc;
239 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800240 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800241 return rc;
242}
243EXPORT_SYMBOL(mhi_transfer_host_to_device);
244
245int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800246 struct mhi_dev *mhi, struct mhi_req *req)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800247{
248 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800249 uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0;
250 struct mhi_dev_ring *ring = NULL;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800251
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800252 if (!mhi || !dev || !req || !host_addr) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800253 pr_err("%sInvalid parameters\n", __func__);
254 return -EINVAL;
255 }
256
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800257 if (mhi->config_iatu) {
258 offset = (uint64_t)host_addr - mhi->data_base.host_pa;
259 /* Mapping the translated physical address on the device */
260 host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset;
261 } else {
262 host_addr_pa = host_addr | bit_40;
263 }
264 mhi_log(MHI_MSG_VERBOSE, "device 0x%llx ---> host 0x%llx, size %d\n",
265 (uint64_t) mhi->write_dma_handle,
266 host_addr_pa, (int) len);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800267
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800268 if (req->mode == IPA_DMA_SYNC) {
269 memcpy(mhi->write_handle, dev, len);
270 rc = ipa_dma_sync_memcpy(host_addr_pa,
271 (u64) mhi->write_dma_handle, (int) len);
272 } else if (req->mode == IPA_DMA_ASYNC) {
273 req->dma = dma_map_single(&mhi->pdev->dev, req->buf,
274 req->len, DMA_TO_DEVICE);
275 ring = req->client->channel->ring;
276 mhi_dev_ring_inc_index(ring, ring->rd_offset);
277 if (ring->rd_offset == ring->wr_offset)
278 req->snd_cmpl = 1;
279 rc = ipa_dma_async_memcpy(host_addr_pa,
280 (uint64_t) req->dma, (int) len,
281 mhi_dev_transfer_completion_cb, req);
282 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800283 return rc;
284}
285EXPORT_SYMBOL(mhi_transfer_device_to_host);
286
287int mhi_dev_is_list_empty(void)
288{
289
290 if (list_empty(&mhi_ctx->event_ring_list) &&
291 list_empty(&mhi_ctx->process_ring_list))
292 return 0;
293 else
294 return 1;
295}
296EXPORT_SYMBOL(mhi_dev_is_list_empty);
297
298static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi,
299 struct ep_pcie_db_config *erdb_cfg)
300{
301 switch (mhi->cfg.event_rings) {
302 case NUM_CHANNELS:
303 erdb_cfg->base = HW_CHANNEL_BASE;
304 erdb_cfg->end = HW_CHANNEL_END;
305 break;
306 default:
307 erdb_cfg->base = mhi->cfg.event_rings -
308 MHI_HW_ACC_EVT_RING_START;
309 erdb_cfg->end = mhi->cfg.event_rings -
310 MHI_HW_ACC_EVT_RING_END;
311 break;
312 }
313}
314
315int mhi_pcie_config_db_routing(struct mhi_dev *mhi)
316{
317 int rc = 0;
318 struct ep_pcie_db_config chdb_cfg, erdb_cfg;
319
320 if (!mhi) {
321 pr_err("Invalid MHI context\n");
322 return -EINVAL;
323 }
324
325 /* Configure Doorbell routing */
326 chdb_cfg.base = HW_CHANNEL_BASE;
327 chdb_cfg.end = HW_CHANNEL_END;
328 chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb;
329
330 mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
331
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800332 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800333 "Event rings 0x%x => er_base 0x%x, er_end %d\n",
334 mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
335 erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
336 ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg);
337
338 return rc;
339}
340EXPORT_SYMBOL(mhi_pcie_config_db_routing);
341
342static int mhi_hwc_init(struct mhi_dev *mhi)
343{
344 int rc = 0;
345 struct ep_pcie_msi_config cfg;
346 struct ipa_mhi_init_params ipa_init_params;
347 struct ep_pcie_db_config erdb_cfg;
348
349 /* Call IPA HW_ACC Init with MSI Address and db routing info */
350 rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
351 if (rc) {
352 pr_err("Error retrieving pcie msi logic\n");
353 return rc;
354 }
355
356 rc = mhi_pcie_config_db_routing(mhi);
357 if (rc) {
358 pr_err("Error configuring DB routing\n");
359 return rc;
360 }
361
362 mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800363 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800364 "Event rings 0x%x => er_base 0x%x, er_end %d\n",
365 mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
366
367 erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
368 memset(&ipa_init_params, 0, sizeof(ipa_init_params));
369 ipa_init_params.msi.addr_hi = cfg.upper;
370 ipa_init_params.msi.addr_low = cfg.lower;
371 ipa_init_params.msi.data = cfg.data;
372 ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
373 ipa_init_params.first_er_idx = erdb_cfg.base;
374 ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800375
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800376 if (mhi_ctx->config_iatu)
377 ipa_init_params.mmio_addr =
378 ((uint32_t) mhi_ctx->mmio_base_pa_addr) + MHI_REGLEN;
379 else
380 ipa_init_params.mmio_addr =
381 ((uint32_t) mhi_ctx->mmio_base_pa_addr);
382
383 if (!mhi_ctx->config_iatu)
384 ipa_init_params.assert_bit40 = true;
385
386 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800387 "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n",
388 ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data);
389 ipa_init_params.notify = mhi_hwc_cb;
390 ipa_init_params.priv = mhi;
391
392 rc = ipa_mhi_init(&ipa_init_params);
393 if (rc) {
394 pr_err("Error initializing IPA\n");
395 return rc;
396 }
397
398 return rc;
399}
400
401static int mhi_hwc_start(struct mhi_dev *mhi)
402{
403 int rc = 0;
404 struct ipa_mhi_start_params ipa_start_params;
405
406 memset(&ipa_start_params, 0, sizeof(ipa_start_params));
407
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800408 if (mhi->config_iatu) {
409 ipa_start_params.host_ctrl_addr = mhi->ctrl_base.device_pa;
410 ipa_start_params.host_data_addr = mhi->data_base.device_pa;
411 } else {
412 ipa_start_params.channel_context_array_addr =
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800413 mhi->ch_ctx_shadow.host_pa;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800414 ipa_start_params.event_context_array_addr =
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800415 mhi->ev_ctx_shadow.host_pa;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800416 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800417
418 rc = ipa_mhi_start(&ipa_start_params);
419 if (rc)
420 pr_err("Error starting IPA (rc = 0x%X)\n", rc);
421
422 return rc;
423}
424
425static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
426 unsigned long data)
427{
428 int rc = 0;
429
430 switch (event) {
431 case IPA_MHI_EVENT_READY:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800432 mhi_log(MHI_MSG_INFO,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800433 "HW Channel uC is ready event=0x%X\n", event);
434 rc = mhi_hwc_start(mhi_ctx);
435 if (rc) {
436 pr_err("hwc_init start failed with %d\n", rc);
437 return;
438 }
439
440 rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx);
441 if (rc) {
442 pr_err("Failed to enable channel db\n");
443 return;
444 }
445
446 rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx);
447 if (rc) {
448 pr_err("Failed to enable control interrupt\n");
449 return;
450 }
451
452 rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx);
453
454 if (rc) {
455 pr_err("Failed to enable command db\n");
456 return;
457 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800458
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700459 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800460
461 ep_pcie_mask_irq_event(mhi_ctx->phandle,
462 EP_PCIE_INT_EVT_MHI_A7, true);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800463 break;
464 case IPA_MHI_EVENT_DATA_AVAILABLE:
465 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
466 if (rc) {
467 pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc);
468 return;
469 }
470 break;
471 default:
472 pr_err("HW Channel uC unknown event 0x%X\n", event);
473 break;
474 }
475}
476
477static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid,
478 enum mhi_dev_ring_element_type_id type)
479{
480 int rc = 0;
481 struct ipa_mhi_connect_params connect_params;
482
483 memset(&connect_params, 0, sizeof(connect_params));
484
485 switch (type) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800486 case MHI_DEV_RING_EL_RESET:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800487 case MHI_DEV_RING_EL_STOP:
488 rc = ipa_mhi_disconnect_pipe(
489 mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
490 if (rc)
491 pr_err("Stopping HW Channel%d failed 0x%X\n",
492 chid, rc);
493 break;
494 case MHI_DEV_RING_EL_START:
495 connect_params.channel_id = chid;
496 connect_params.sys.skip_ep_cfg = true;
497 if ((chid % 2) == 0x0)
498 connect_params.sys.client = IPA_CLIENT_MHI_PROD;
499 else
500 connect_params.sys.client = IPA_CLIENT_MHI_CONS;
501
502 rc = ipa_mhi_connect_pipe(&connect_params,
503 &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
504 if (rc)
505 pr_err("HW Channel%d start failed 0x%X\n",
506 chid, rc);
507 break;
508 case MHI_DEV_RING_EL_INVALID:
509 default:
510 pr_err("Invalid Ring Element type = 0x%X\n", type);
511 break;
512 }
513
514 return rc;
515}
516
517static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev,
518 uint32_t *int_value)
519{
520 int rc = 0;
521
522 rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value);
523 if (rc) {
524 pr_err("Failed to read A7 status\n");
525 return;
526 }
527
528 mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value);
529 if (rc) {
530 pr_err("Failed to clear A7 status\n");
531 return;
532 }
533}
534
535static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id)
536{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800537 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800538
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800539 if (mhi->use_ipa) {
540 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800541 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800542 data_transfer.phy_addr = mhi->ch_ctx_cache_dma_handle +
543 sizeof(struct mhi_dev_ch_ctx) * ch_id;
544 }
545
546 data_transfer.size = sizeof(struct mhi_dev_ch_ctx);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800547 /* Fetch the channel ctx (*dst, *src, size) */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800548 mhi_dev_read_from_host(mhi, &data_transfer);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800549}
550
551int mhi_dev_syserr(struct mhi_dev *mhi)
552{
553
554 if (!mhi) {
555 pr_err("%s: Invalid MHI ctx\n", __func__);
556 return -EINVAL;
557 }
558
559 mhi_dev_dump_mmio(mhi);
560 pr_err("MHI dev sys error\n");
561
562 return 0;
563}
564EXPORT_SYMBOL(mhi_dev_syserr);
565
566int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
567 union mhi_dev_ring_element_type *el)
568{
569 int rc = 0;
570 uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
571 struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
572 union mhi_dev_ring_ctx *ctx;
573 struct ep_pcie_msi_config cfg;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800574 struct mhi_addr transfer_addr;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800575
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800576 rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
577 if (rc) {
578 pr_err("Error retrieving pcie msi logic\n");
579 return rc;
580 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800581
582 if (evnt_ring_idx > mhi->cfg.event_rings) {
583 pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
584 return -EINVAL;
585 }
586
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800587 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800588 if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800589 rc = mhi_ring_start(ring, ctx, mhi);
590 if (rc) {
591 mhi_log(MHI_MSG_ERROR,
592 "error starting event ring %d\n", evnt_ring);
593 return rc;
594 }
595 }
596
597 mutex_lock(&mhi->mhi_event_lock);
598 /* add the ring element */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800599 mhi_dev_add_element(ring, el, NULL, 0);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800600
601 ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
602 sizeof(union mhi_dev_ring_element_type)) +
603 ring->ring_ctx->generic.rbase;
604
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800605 mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800606 ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
607
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800608 if (mhi->use_ipa)
609 transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800610 sizeof(struct mhi_dev_ev_ctx) *
611 evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
612 (uint32_t) ring->ring_ctx;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800613 else
614 transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va +
615 sizeof(struct mhi_dev_ev_ctx) *
616 evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
617 (uint32_t) ring->ring_ctx;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800618
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800619 transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp;
620 transfer_addr.size = sizeof(uint64_t);
621
622 mhi_dev_write_to_host(mhi, &transfer_addr, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800623 /*
624 * rp update in host memory should be flushed
625 * before sending a MSI to the host
626 */
627 wmb();
628
629 mutex_unlock(&mhi->mhi_event_lock);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800630 mhi_log(MHI_MSG_VERBOSE, "event sent:\n");
631 mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
632 mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x\n", el->evt_tr_comp.len);
633 mhi_log(MHI_MSG_VERBOSE, "evnt code :0x%x\n", el->evt_tr_comp.code);
634 mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n", el->evt_tr_comp.type);
635 mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
636 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
637 if (rc) {
638 pr_err("%s: error sending msi\n", __func__);
639 return rc;
640 }
641 return rc;
642}
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800643
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800644/*
645 * mhi_dev_event_buf_completion_cb() -Cb function called by IPA driver
646 * when transfer completion event buffer copy is done.
647 *
648 * @req - event_req structure
649 */
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800650
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800651static void mhi_dev_event_buf_completion_cb(void *req)
652{
653 struct event_req *ereq = NULL;
654
655 if (req) {
656 ereq = (struct event_req *)req;
657 } else {
658 pr_err("%s():event req data is invalid\n", __func__);
659 return;
660 }
661 dma_unmap_single(&mhi_ctx->pdev->dev, ereq->dma,
662 ereq->dma_len, DMA_TO_DEVICE);
663}
664
665/**
666 * mhi_dev_event_rd_offset_completion_cb() -CB function called by IPA driver
667 * when event rd_offset transfer is done.
668 *
669 * @req - event_req structure
670 */
671
672static void mhi_dev_event_rd_offset_completion_cb(void *req)
673{
674 union mhi_dev_ring_ctx *ctx;
675 int rc = 0;
676 struct event_req *ereq = (struct event_req *)req;
677 struct mhi_dev_channel *ch = ereq->context;
678 struct mhi_dev *mhi = ch->ring->mhi_dev;
679 unsigned long flags;
680
681 dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma,
682 sizeof(uint64_t), DMA_TO_DEVICE);
683 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring];
684 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
685 if (rc)
686 pr_err("%s: error sending in msi\n", __func__);
687
688 /* return the event req to pre allocated pooled list */
689 spin_lock_irqsave(&mhi->lock, flags);
690 list_add_tail(&ereq->list, &ch->event_req_buffers);
691 spin_unlock_irqrestore(&mhi->lock, flags);
692}
693
694static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring,
695 struct event_req *ereq, uint32_t evt_len)
696{
697 int rc = 0;
698 uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
699 struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
700 union mhi_dev_ring_ctx *ctx;
701 struct mhi_addr transfer_addr;
702 static int count;
703
704 if (!ereq) {
705 pr_err("%s(): invalid event req\n", __func__);
706 return -EINVAL;
707 }
708
709 if (count == 0) {
710 rc = ep_pcie_get_msi_config(mhi->phandle, &mhi->msi_cfg);
711 if (rc) {
712 pr_err("Error retrieving pcie msi logic\n");
713 return rc;
714 }
715 count++;
716 }
717
718 if (evnt_ring_idx > mhi->cfg.event_rings) {
719 pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
720 return -EINVAL;
721 }
722
723 ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
724 if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
725 rc = mhi_ring_start(ring, ctx, mhi);
726 if (rc) {
727 mhi_log(MHI_MSG_ERROR,
728 "error starting event ring %d\n", evnt_ring);
729 return rc;
730 }
731 }
732
733 /* add the ring element */
734 ereq->client_cb = mhi_dev_event_buf_completion_cb;
735 ereq->event_type = SEND_EVENT_BUFFER;
736 rc = mhi_dev_add_element(ring, ereq->tr_events, ereq, evt_len);
737 if (rc) {
738 pr_err("%s(): error in adding element rc %d\n", __func__, rc);
739 return rc;
740 }
741 ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
742 sizeof(union mhi_dev_ring_element_type)) +
743 ring->ring_ctx->generic.rbase;
744
745 mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n",
746 ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
747
748 if (mhi->use_ipa)
749 transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
750 sizeof(struct mhi_dev_ev_ctx) *
751 evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp -
752 (uint32_t)ring->ring_ctx;
753 else
754 transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va +
755 sizeof(struct mhi_dev_ev_ctx) *
756 evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp -
757 (uint32_t)ring->ring_ctx;
758
759 transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp;
760 transfer_addr.size = sizeof(uint64_t);
761 ereq->event_type = SEND_EVENT_RD_OFFSET;
762 ereq->client_cb = mhi_dev_event_rd_offset_completion_cb;
763 ereq->event_ring = evnt_ring;
764 mhi_dev_write_to_host(mhi, &transfer_addr, ereq, MHI_DEV_DMA_ASYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800765 return rc;
766}
767
768static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch,
769 uint32_t rd_ofst, uint32_t len,
770 enum mhi_dev_cmd_completion_code code)
771{
772 int rc = 0;
773 union mhi_dev_ring_element_type compl_event;
774 struct mhi_dev *mhi = ch->ring->mhi_dev;
775
776 compl_event.evt_tr_comp.chid = ch->ch_id;
777 compl_event.evt_tr_comp.type =
778 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
779 compl_event.evt_tr_comp.len = len;
780 compl_event.evt_tr_comp.code = code;
781 compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
782 rd_ofst * sizeof(struct mhi_dev_transfer_ring_element);
783
784 rc = mhi_dev_send_event(mhi,
785 mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event);
786
787 return rc;
788}
789
790int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
791 enum mhi_dev_state state)
792{
793 union mhi_dev_ring_element_type event;
794 int rc = 0;
795
796 event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG;
797 event.evt_state_change.mhistate = state;
798
799 rc = mhi_dev_send_event(mhi, 0, &event);
800 if (rc) {
801 pr_err("Sending state change event failed\n");
802 return rc;
803 }
804
805 return rc;
806}
807EXPORT_SYMBOL(mhi_dev_send_state_change_event);
808
809int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env)
810{
811 union mhi_dev_ring_element_type event;
812 int rc = 0;
813
814 event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY;
815 event.evt_ee_state.execenv = exec_env;
816
817 rc = mhi_dev_send_event(mhi, 0, &event);
818 if (rc) {
819 pr_err("Sending EE change event failed\n");
820 return rc;
821 }
822
823 return rc;
824}
825EXPORT_SYMBOL(mhi_dev_send_ee_event);
826
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +0530827static void mhi_dev_trigger_cb(enum mhi_client_channel ch_id)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800828{
829 struct mhi_dev_ready_cb_info *info;
Siddartha Mohanadossf0aab7a2018-03-19 15:19:22 -0700830 enum mhi_ctrl_info state_data;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800831
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800832 list_for_each_entry(info, &mhi_ctx->client_cb_list, list)
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +0530833 if (info->cb && info->cb_data.channel == ch_id) {
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700834 mhi_ctrl_state_info(info->cb_data.channel, &state_data);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800835 info->cb_data.ctrl_info = state_data;
836 info->cb(&info->cb_data);
837 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800838}
839
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800840int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi)
841{
842 int rc = 0;
843
844 /*
845 * Expected usuage is when there is HW ACC traffic IPA uC notifes
846 * Q6 -> IPA A7 -> MHI core -> MHI SM
847 */
848 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
849 if (rc) {
850 pr_err("error sending SM event\n");
851 return rc;
852 }
853
854 return rc;
855}
856EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup);
857
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700858static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi,
859 enum mhi_dev_cmd_completion_code code)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800860{
861 int rc = 0;
862 union mhi_dev_ring_element_type event;
863
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700864 if (code > MHI_CMD_COMPL_CODE_RES) {
865 mhi_log(MHI_MSG_ERROR,
866 "Invalid cmd compl code: %d\n", code);
867 return -EINVAL;
868 }
869
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800870 /* send the command completion event to the host */
871 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase
872 + (mhi->ring[MHI_RING_CMD_ID].rd_offset *
873 (sizeof(union mhi_dev_ring_element_type)));
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800874 mhi_log(MHI_MSG_VERBOSE, "evt cmd comp ptr :%d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800875 (uint32_t) event.evt_cmd_comp.ptr);
876 event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700877 event.evt_cmd_comp.code = code;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800878 rc = mhi_dev_send_event(mhi, 0, &event);
879 if (rc)
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700880 mhi_log(MHI_MSG_ERROR, "Send completion failed\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800881
882 return rc;
883}
884
885static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
886 struct mhi_dev *mhi)
887{
888 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800889 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800890
891 if (ring->rd_offset != ring->wr_offset &&
892 mhi->ch_ctx_cache[ch_id].ch_type ==
893 MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800894 mhi_log(MHI_MSG_INFO, "Pending outbound transaction\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800895 return 0;
896 } else if (mhi->ch_ctx_cache[ch_id].ch_type ==
897 MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
898 mhi->ch[ch_id].wr_request_active) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800899 mhi_log(MHI_MSG_INFO, "Pending inbound transaction\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800900 return 0;
901 }
902
903 /* set the channel to stop */
904 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800905 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800906
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800907 if (mhi->use_ipa) {
908 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800909 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800910 } else {
911 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
912 sizeof(struct mhi_dev_ch_ctx) * ch_id;
913 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
914 sizeof(struct mhi_dev_ch_ctx) * ch_id;
915 }
916 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
917 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
918
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800919 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800920 mhi_dev_write_to_host(mhi, &data_transfer, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800921
922 /* send the completion event to the host */
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700923 rc = mhi_dev_send_cmd_comp_event(mhi,
924 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800925 if (rc)
926 pr_err("Error sending command completion event\n");
927
928 return rc;
929}
930
931static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi,
932 union mhi_dev_ring_element_type *el, void *ctx)
933{
934 int rc = 0;
935 uint32_t ch_id = 0;
936 union mhi_dev_ring_element_type event;
937 struct mhi_addr host_addr;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800938 struct mhi_dev_channel *ch;
939 struct mhi_dev_ring *ring;
Siddartha Mohanadossba314f22018-03-19 15:43:50 -0700940 char *connected[2] = { "MHI_CHANNEL_STATE_12=CONNECTED", NULL};
941 char *disconnected[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL};
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800942
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800943 ch_id = el->generic.chid;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800944 mhi_log(MHI_MSG_VERBOSE, "for channel:%d and cmd:%d\n",
945 ch_id, el->generic.type);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800946
947 switch (el->generic.type) {
948 case MHI_DEV_RING_EL_START:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800949 mhi_log(MHI_MSG_VERBOSE, "recived start cmd for channel %d\n",
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800950 ch_id);
951 if (ch_id >= (HW_CHANNEL_BASE)) {
952 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
953 if (rc) {
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700954 mhi_log(MHI_MSG_ERROR,
955 "Error with HW channel cmd %d\n", rc);
956 rc = mhi_dev_send_cmd_comp_event(mhi,
957 MHI_CMD_COMPL_CODE_UNDEFINED);
958 if (rc)
959 mhi_log(MHI_MSG_ERROR,
960 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800961 return;
962 }
963 goto send_start_completion_event;
964 }
965
966 /* fetch the channel context from host */
967 mhi_dev_fetch_ch_ctx(mhi, ch_id);
968
969 /* Initialize and configure the corresponding channel ring */
970 rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id],
971 (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id],
972 mhi);
973 if (rc) {
974 mhi_log(MHI_MSG_ERROR,
975 "start ring failed for ch %d\n", ch_id);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700976 rc = mhi_dev_send_cmd_comp_event(mhi,
977 MHI_CMD_COMPL_CODE_UNDEFINED);
978 if (rc)
979 mhi_log(MHI_MSG_ERROR,
980 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800981 return;
982 }
983
984 mhi->ring[mhi->ch_ring_start + ch_id].state =
985 RING_STATE_PENDING;
986
987 /* set the channel to running */
988 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -0800989 mhi->ch[ch_id].state = MHI_DEV_CH_STARTED;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -0800990 mhi->ch[ch_id].ch_id = ch_id;
991 mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id];
992 mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type;
993
994 /* enable DB for event ring */
995 rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id);
996 if (rc) {
997 pr_err("Failed to enable channel db\n");
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -0700998 rc = mhi_dev_send_cmd_comp_event(mhi,
999 MHI_CMD_COMPL_CODE_UNDEFINED);
1000 if (rc)
1001 mhi_log(MHI_MSG_ERROR,
1002 "Error with compl event\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001003 return;
1004 }
1005
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001006 if (mhi->use_ipa)
1007 host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001008 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001009 else
1010 host_addr.device_va = mhi->ch_ctx_shadow.device_va +
1011 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1012
1013 host_addr.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
1014 host_addr.size = sizeof(enum mhi_dev_ch_ctx_state);
1015
1016 mhi_dev_write_to_host(mhi, &host_addr, NULL, MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001017
1018send_start_completion_event:
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001019 rc = mhi_dev_send_cmd_comp_event(mhi,
1020 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001021 if (rc)
1022 pr_err("Error sending command completion event\n");
1023
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05301024 mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001025 /* Trigger callback to clients */
Siva Kumar Akkireddi6a719ae2018-06-11 22:44:14 +05301026 mhi_dev_trigger_cb(ch_id);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001027 if (ch_id == MHI_CLIENT_MBIM_OUT)
1028 kobject_uevent_env(&mhi_ctx->dev->kobj,
1029 KOBJ_CHANGE, connected);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001030 break;
1031 case MHI_DEV_RING_EL_STOP:
1032 if (ch_id >= HW_CHANNEL_BASE) {
1033 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001034 if (rc)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001035 mhi_log(MHI_MSG_ERROR,
1036 "send channel stop cmd event failed\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001037
1038 /* send the completion event to the host */
1039 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
1040 (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1041 (sizeof(union mhi_dev_ring_element_type)));
1042 event.evt_cmd_comp.type =
1043 MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
1044 if (rc == 0)
1045 event.evt_cmd_comp.code =
1046 MHI_CMD_COMPL_CODE_SUCCESS;
1047 else
1048 event.evt_cmd_comp.code =
1049 MHI_CMD_COMPL_CODE_UNDEFINED;
1050
1051 rc = mhi_dev_send_event(mhi, 0, &event);
1052 if (rc) {
1053 pr_err("stop event send failed\n");
1054 return;
1055 }
1056 } else {
1057 /*
1058 * Check if there are any pending transactions for the
1059 * ring associated with the channel. If no, proceed to
1060 * write disable the channel state else send stop
1061 * channel command to check if one can suspend the
1062 * command.
1063 */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001064 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1065 if (ring->state == RING_STATE_UINT) {
1066 pr_err("Channel not opened for %d\n", ch_id);
1067 return;
1068 }
1069
1070 ch = &mhi->ch[ch_id];
1071
1072 mutex_lock(&ch->ch_lock);
1073
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001074 mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP;
1075 rc = mhi_dev_process_stop_cmd(
1076 &mhi->ring[mhi->ch_ring_start + ch_id],
1077 ch_id, mhi);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001078 if (rc)
1079 pr_err("stop event send failed\n");
1080
1081 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001082 mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
1083 if (ch_id == MHI_CLIENT_MBIM_OUT)
1084 kobject_uevent_env(&mhi_ctx->dev->kobj,
1085 KOBJ_CHANGE, disconnected);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001086 }
1087 break;
1088 case MHI_DEV_RING_EL_RESET:
1089 mhi_log(MHI_MSG_VERBOSE,
1090 "received reset cmd for channel %d\n", ch_id);
1091 if (ch_id >= HW_CHANNEL_BASE) {
1092 rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001093 if (rc)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001094 mhi_log(MHI_MSG_ERROR,
1095 "send channel stop cmd event failed\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001096
1097 /* send the completion event to the host */
1098 event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
1099 (mhi->ring[MHI_RING_CMD_ID].rd_offset *
1100 (sizeof(union mhi_dev_ring_element_type)));
1101 event.evt_cmd_comp.type =
1102 MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
1103 if (rc == 0)
1104 event.evt_cmd_comp.code =
1105 MHI_CMD_COMPL_CODE_SUCCESS;
1106 else
1107 event.evt_cmd_comp.code =
1108 MHI_CMD_COMPL_CODE_UNDEFINED;
1109
1110 rc = mhi_dev_send_event(mhi, 0, &event);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001111 if (rc) {
1112 pr_err("stop event send failed\n");
1113 return;
1114 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001115 } else {
1116
1117 mhi_log(MHI_MSG_VERBOSE,
1118 "received reset cmd for channel %d\n",
1119 ch_id);
1120
1121 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1122 if (ring->state == RING_STATE_UINT) {
1123 pr_err("Channel not opened for %d\n", ch_id);
1124 return;
1125 }
1126
1127 ch = &mhi->ch[ch_id];
1128
1129 mutex_lock(&ch->ch_lock);
1130
1131 /* hard stop and set the channel to stop */
1132 mhi->ch_ctx_cache[ch_id].ch_state =
1133 MHI_DEV_CH_STATE_DISABLED;
1134 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
1135 if (mhi->use_ipa)
1136 host_addr.host_pa =
1137 mhi->ch_ctx_shadow.host_pa +
1138 (sizeof(struct mhi_dev_ch_ctx) * ch_id);
1139 else
1140 host_addr.device_va =
1141 mhi->ch_ctx_shadow.device_va +
1142 (sizeof(struct mhi_dev_ch_ctx) * ch_id);
1143
1144 host_addr.virt_addr =
1145 &mhi->ch_ctx_cache[ch_id].ch_state;
1146 host_addr.size = sizeof(enum mhi_dev_ch_ctx_state);
1147
1148 /* update the channel state in the host */
1149 mhi_dev_write_to_host(mhi, &host_addr, NULL,
1150 MHI_DEV_DMA_SYNC);
1151
1152 /* send the completion event to the host */
Siddartha Mohanadossb9fc5472018-03-19 14:23:10 -07001153 rc = mhi_dev_send_cmd_comp_event(mhi,
1154 MHI_CMD_COMPL_CODE_SUCCESS);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001155 if (rc)
1156 pr_err("Error sending command completion event\n");
1157 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001158 mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
1159 if (ch_id == MHI_CLIENT_MBIM_OUT)
1160 kobject_uevent_env(&mhi_ctx->dev->kobj,
1161 KOBJ_CHANGE, disconnected);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001162 }
1163 break;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001164 default:
1165 pr_err("%s: Invalid command:%d\n", __func__, el->generic.type);
1166 break;
1167 }
1168}
1169
1170static void mhi_dev_process_tre_ring(struct mhi_dev *mhi,
1171 union mhi_dev_ring_element_type *el, void *ctx)
1172{
1173 struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx;
1174 struct mhi_dev_channel *ch;
1175 struct mhi_dev_client_cb_reason reason;
1176
1177 if (ring->id < mhi->ch_ring_start) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001178 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001179 "invalid channel ring id (%d), should be < %d\n",
1180 ring->id, mhi->ch_ring_start);
1181 return;
1182 }
1183
1184 ch = &mhi->ch[ring->id - mhi->ch_ring_start];
1185 reason.ch_id = ch->ch_id;
1186 reason.reason = MHI_DEV_TRE_AVAILABLE;
1187
1188 /* Invoke a callback to let the client know its data is ready.
1189 * Copy this event to the clients context so that it can be
1190 * sent out once the client has fetch the data. Update the rp
1191 * before sending the data as part of the event completion
1192 */
1193 if (ch->active_client && ch->active_client->event_trigger != NULL)
1194 ch->active_client->event_trigger(&reason);
1195}
1196
1197static void mhi_dev_process_ring_pending(struct work_struct *work)
1198{
1199 struct mhi_dev *mhi = container_of(work,
1200 struct mhi_dev, pending_work);
1201 struct list_head *cp, *q;
1202 struct mhi_dev_ring *ring;
1203 struct mhi_dev_channel *ch;
1204 int rc = 0;
1205
1206 mutex_lock(&mhi_ctx->mhi_lock);
1207 rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]);
1208 if (rc) {
1209 mhi_log(MHI_MSG_ERROR, "error processing command ring\n");
1210 goto exit;
1211 }
1212
1213 list_for_each_safe(cp, q, &mhi->process_ring_list) {
1214 ring = list_entry(cp, struct mhi_dev_ring, list);
1215 list_del(cp);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001216 mhi_log(MHI_MSG_VERBOSE, "processing ring %d\n", ring->id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001217 rc = mhi_dev_process_ring(ring);
1218 if (rc) {
1219 mhi_log(MHI_MSG_ERROR,
1220 "error processing ring %d\n", ring->id);
1221 goto exit;
1222 }
1223
1224 if (ring->id < mhi->ch_ring_start) {
1225 mhi_log(MHI_MSG_ERROR,
1226 "ring (%d) is not a channel ring\n", ring->id);
1227 goto exit;
1228 }
1229
1230 ch = &mhi->ch[ring->id - mhi->ch_ring_start];
1231 rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id);
1232 if (rc) {
1233 mhi_log(MHI_MSG_ERROR,
1234 "error enabling chdb interrupt for %d\n", ch->ch_id);
1235 goto exit;
1236 }
1237 }
1238
1239exit:
1240 mutex_unlock(&mhi_ctx->mhi_lock);
1241}
1242
1243static int mhi_dev_get_event_notify(enum mhi_dev_state state,
1244 enum mhi_dev_event *event)
1245{
1246 int rc = 0;
1247
1248 switch (state) {
1249 case MHI_DEV_M0_STATE:
1250 *event = MHI_DEV_EVENT_M0_STATE;
1251 break;
1252 case MHI_DEV_M1_STATE:
1253 *event = MHI_DEV_EVENT_M1_STATE;
1254 break;
1255 case MHI_DEV_M2_STATE:
1256 *event = MHI_DEV_EVENT_M2_STATE;
1257 break;
1258 case MHI_DEV_M3_STATE:
1259 *event = MHI_DEV_EVENT_M3_STATE;
1260 break;
1261 default:
1262 rc = -EINVAL;
1263 break;
1264 }
1265
1266 return rc;
1267}
1268
1269static void mhi_dev_queue_channel_db(struct mhi_dev *mhi,
1270 uint32_t chintr_value, uint32_t ch_num)
1271{
1272 struct mhi_dev_ring *ring;
1273 int rc = 0;
1274
1275 for (; chintr_value; ch_num++, chintr_value >>= 1) {
1276 if (chintr_value & 1) {
1277 ring = &mhi->ring[ch_num + mhi->ch_ring_start];
1278 if (ring->state == RING_STATE_UINT) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001279 pr_debug("Channel not opened for %d\n", ch_num);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001280 break;
1281 }
1282 mhi_ring_set_state(ring, RING_STATE_PENDING);
1283 list_add(&ring->list, &mhi->process_ring_list);
1284 rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num);
1285 if (rc) {
1286 pr_err("Error disabling chdb\n");
1287 return;
1288 }
1289 queue_work(mhi->pending_ring_wq, &mhi->pending_work);
1290 }
1291 }
1292}
1293
1294static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi)
1295{
1296 int i, rc = 0;
1297 uint32_t chintr_value = 0, ch_num = 0;
1298
1299 rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi);
1300 if (rc) {
1301 pr_err("Read channel db\n");
1302 return;
1303 }
1304
1305 for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
1306 ch_num = i * MHI_MASK_CH_EV_LEN;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001307 /* Process channel status whose mask is enabled */
1308 chintr_value = (mhi->chdb[i].status & mhi->chdb[i].mask);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001309 if (chintr_value) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001310 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001311 "processing id: %d, ch interrupt 0x%x\n",
1312 i, chintr_value);
1313 mhi_dev_queue_channel_db(mhi, chintr_value, ch_num);
1314 rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i),
1315 mhi->chdb[i].status);
1316 if (rc) {
1317 pr_err("Error writing interrupt clear for A7\n");
1318 return;
1319 }
1320 }
1321 }
1322}
1323
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001324static int mhi_dev_abort(struct mhi_dev *mhi)
1325{
1326 struct mhi_dev_channel *ch;
1327 struct mhi_dev_ring *ring;
1328 int ch_id = 0, rc = 0;
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07001329 char *disconnected_12[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL};
1330 char *disconnected_14[2] = { "MHI_CHANNEL_STATE_14=DISCONNECTED", NULL};
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001331
1332 /* Hard stop all the channels */
1333 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1334 ring = &mhi->ring[ch_id + mhi->ch_ring_start];
1335 if (ring->state == RING_STATE_UINT)
1336 continue;
1337
1338 ch = &mhi->ch[ch_id];
1339 mutex_lock(&ch->ch_lock);
1340 mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED;
1341 mutex_unlock(&ch->ch_lock);
1342 }
1343
1344 /* Update ctrl node */
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001345 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_DISCONNECTED);
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07001346 mhi_update_state_info(MHI_CLIENT_MBIM_OUT, MHI_STATE_DISCONNECTED);
1347 mhi_update_state_info(MHI_CLIENT_QMI_OUT, MHI_STATE_DISCONNECTED);
1348 rc = kobject_uevent_env(&mhi_ctx->dev->kobj,
1349 KOBJ_CHANGE, disconnected_12);
1350 if (rc)
1351 pr_err("Error sending uevent:%d\n", rc);
1352
1353 rc = kobject_uevent_env(&mhi_ctx->dev->kobj,
1354 KOBJ_CHANGE, disconnected_14);
1355 if (rc)
1356 pr_err("Error sending uevent:%d\n", rc);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001357
1358 flush_workqueue(mhi->ring_init_wq);
1359 flush_workqueue(mhi->pending_ring_wq);
1360
1361 /* Initiate MHI IPA reset */
1362 ipa_mhi_destroy();
1363
1364 /* Clean up initialized channels */
1365 rc = mhi_deinit(mhi);
1366 if (rc) {
1367 pr_err("Error during mhi_deinit with %d\n", rc);
1368 return rc;
1369 }
1370
1371 rc = mhi_dev_mmio_mask_chdb_interrupts(mhi_ctx);
1372 if (rc) {
1373 pr_err("Failed to enable channel db\n");
1374 return rc;
1375 }
1376
1377 rc = mhi_dev_mmio_disable_ctrl_interrupt(mhi_ctx);
1378 if (rc) {
1379 pr_err("Failed to enable control interrupt\n");
1380 return rc;
1381 }
1382
1383 rc = mhi_dev_mmio_disable_cmdb_interrupt(mhi_ctx);
1384 if (rc) {
1385 pr_err("Failed to enable command db\n");
1386 return rc;
1387 }
1388
1389
1390 atomic_set(&mhi_ctx->re_init_done, 0);
1391
1392 mhi_log(MHI_MSG_INFO,
1393 "Register a PCIe callback during re-init\n");
1394 mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP;
1395 mhi_ctx->event_reg.user = mhi_ctx;
1396 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
1397 mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up;
1398 mhi_ctx->event_reg.options = MHI_REINIT;
1399
1400 rc = ep_pcie_register_event(mhi_ctx->phandle,
1401 &mhi_ctx->event_reg);
1402 if (rc) {
1403 pr_err("Failed to register for events from PCIe\n");
1404 return rc;
1405 }
1406
1407 /* Set RESET field to 0 */
1408 mhi_dev_mmio_reset(mhi_ctx);
1409
1410 return rc;
1411}
1412
1413static void mhi_dev_transfer_completion_cb(void *mreq)
1414{
1415 struct mhi_dev_channel *ch;
1416 struct mhi_dev_client *client;
1417 union mhi_dev_ring_element_type *el;
1418 int rc = 0;
1419 struct mhi_req *req = (struct mhi_req *)mreq;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001420 union mhi_dev_ring_element_type *compl_ev = NULL;
1421 struct mhi_dev *mhi = NULL;
1422 unsigned long flags;
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301423 size_t transfer_len;
1424 u32 snd_cmpl;
1425 uint32_t rd_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001426
1427 client = req->client;
1428 ch = client->channel;
1429 mhi = ch->ring->mhi_dev;
1430 el = req->el;
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301431 transfer_len = req->len;
1432 snd_cmpl = req->snd_cmpl;
1433 rd_offset = req->rd_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001434 ch->curr_ereq->context = ch;
1435
1436 dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
1437 req->len, DMA_FROM_DEVICE);
1438
1439 /* Trigger client call back */
1440 req->client_cb(req);
1441
1442 if (el->tre.ieot) {
1443 compl_ev = ch->curr_ereq->tr_events + ch->curr_ereq->num_events;
1444 compl_ev->evt_tr_comp.chid = ch->ch_id;
1445 compl_ev->evt_tr_comp.type =
1446 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301447 compl_ev->evt_tr_comp.len = transfer_len;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001448 compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT;
1449 compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301450 rd_offset * TR_RING_ELEMENT_SZ;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001451 ch->curr_ereq->num_events++;
1452
Siva Kumar Akkireddi500fa282018-05-21 19:00:49 +05301453 if (ch->curr_ereq->num_events >= MAX_TR_EVENTS || snd_cmpl) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001454 mhi_log(MHI_MSG_VERBOSE,
1455 "num of tr events %d for ch %d\n",
1456 ch->curr_ereq->num_events, ch->ch_id);
1457 rc = mhi_dev_send_multiple_tr_events(mhi,
1458 mhi->ch_ctx_cache[ch->ch_id].err_indx,
1459 ch->curr_ereq, (ch->curr_ereq->num_events*
1460 sizeof(union mhi_dev_ring_element_type)));
1461 if (rc)
1462 mhi_log(MHI_MSG_ERROR,
1463 "failed to send compl evts\n");
1464 if (!list_empty(&ch->event_req_buffers)) {
1465 ch->curr_ereq =
1466 container_of(ch->event_req_buffers.next,
1467 struct event_req, list);
1468 spin_lock_irqsave(&mhi->lock, flags);
1469 list_del_init(&ch->curr_ereq->list);
1470 spin_unlock_irqrestore(&mhi->lock, flags);
1471 ch->curr_ereq->num_events = 0;
1472 } else
1473 pr_err("%s evt req buffers empty\n", __func__);
1474 }
1475 } else
1476 mhi_log(MHI_MSG_ERROR, "ieot is not valid\n");
1477
1478 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
1479 ch->state = MHI_DEV_CH_STOPPED;
1480 rc = mhi_dev_process_stop_cmd(ch->ring, ch->ch_id, mhi_ctx);
1481 if (rc)
1482 mhi_log(MHI_MSG_ERROR,
1483 "Error while stopping channel (%d)\n", ch->ch_id);
1484 }
1485}
1486
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001487static void mhi_dev_scheduler(struct work_struct *work)
1488{
1489 struct mhi_dev *mhi = container_of(work,
1490 struct mhi_dev, chdb_ctrl_work);
1491 int rc = 0;
1492 uint32_t int_value = 0;
1493 struct mhi_dev_ring *ring;
1494 enum mhi_dev_state state;
1495 enum mhi_dev_event event = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001496 bool mhi_reset = false;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001497
1498 mutex_lock(&mhi_ctx->mhi_lock);
1499 /* Check for interrupts */
1500 mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value);
1501
1502 if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001503 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001504 "processing ctrl interrupt with %d\n", int_value);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001505 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001506 if (rc) {
1507 pr_err("%s: get mhi state failed\n", __func__);
1508 mutex_unlock(&mhi_ctx->mhi_lock);
1509 return;
1510 }
1511
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001512 if (mhi_reset) {
1513 mhi_log(MHI_MSG_VERBOSE,
1514 "processing mhi device reset\n");
1515 rc = mhi_dev_abort(mhi);
1516 if (rc)
1517 pr_err("device reset failed:%d\n", rc);
1518 mutex_unlock(&mhi_ctx->mhi_lock);
1519 queue_work(mhi->ring_init_wq, &mhi->re_init);
1520 return;
1521 }
1522
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001523 rc = mhi_dev_get_event_notify(state, &event);
1524 if (rc) {
1525 pr_err("unsupported state :%d\n", state);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001526 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001527 }
1528
1529 rc = mhi_dev_notify_sm_event(event);
1530 if (rc) {
1531 pr_err("error sending SM event\n");
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001532 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001533 }
1534 }
1535
1536 if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001537 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001538 "processing cmd db interrupt with %d\n", int_value);
1539 ring = &mhi->ring[MHI_RING_CMD_ID];
1540 ring->state = RING_STATE_PENDING;
1541 queue_work(mhi->pending_ring_wq, &mhi->pending_work);
1542 }
1543
1544 /* get the specific channel interrupts */
1545 mhi_dev_check_channel_interrupt(mhi);
1546
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001547fail:
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001548 mutex_unlock(&mhi_ctx->mhi_lock);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001549
1550 if (mhi->config_iatu || mhi->mhi_int)
1551 enable_irq(mhi->mhi_irq);
1552 else
1553 ep_pcie_mask_irq_event(mhi->phandle,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001554 EP_PCIE_INT_EVT_MHI_A7, true);
1555}
1556
1557void mhi_dev_notify_a7_event(struct mhi_dev *mhi)
1558{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001559
1560 if (!atomic_read(&mhi->mhi_dev_wake)) {
1561 pm_stay_awake(mhi->dev);
1562 atomic_set(&mhi->mhi_dev_wake, 1);
1563 }
1564 mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock\n");
1565
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001566 schedule_work(&mhi->chdb_ctrl_work);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001567 mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001568}
1569EXPORT_SYMBOL(mhi_dev_notify_a7_event);
1570
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001571static irqreturn_t mhi_dev_isr(int irq, void *dev_id)
1572{
1573 struct mhi_dev *mhi = dev_id;
1574
Rama Krishna Phani A62cedde2018-06-11 19:57:00 +05301575 if (!atomic_read(&mhi->mhi_dev_wake)) {
1576 pm_stay_awake(mhi->dev);
1577 atomic_set(&mhi->mhi_dev_wake, 1);
1578 mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock in ISR\n");
1579 }
1580
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001581 disable_irq_nosync(mhi->mhi_irq);
1582 schedule_work(&mhi->chdb_ctrl_work);
1583 mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n");
1584
1585 return IRQ_HANDLED;
1586}
1587
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001588int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi)
1589{
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001590 struct ep_pcie_iatu control, data;
1591 int rc = 0;
1592 struct ep_pcie_iatu entries[MHI_HOST_REGION_NUM];
1593
1594 data.start = mhi->data_base.device_pa;
1595 data.end = mhi->data_base.device_pa + mhi->data_base.size - 1;
1596 data.tgt_lower = HOST_ADDR_LSB(mhi->data_base.host_pa);
1597 data.tgt_upper = HOST_ADDR_MSB(mhi->data_base.host_pa);
1598
1599 control.start = mhi->ctrl_base.device_pa;
1600 control.end = mhi->ctrl_base.device_pa + mhi->ctrl_base.size - 1;
1601 control.tgt_lower = HOST_ADDR_LSB(mhi->ctrl_base.host_pa);
1602 control.tgt_upper = HOST_ADDR_MSB(mhi->ctrl_base.host_pa);
1603
1604 entries[0] = data;
1605 entries[1] = control;
1606
1607 rc = ep_pcie_config_outbound_iatu(mhi_ctx->phandle, entries,
1608 MHI_HOST_REGION_NUM);
1609 if (rc) {
1610 pr_err("error configure iATU\n");
1611 return rc;
1612 }
1613
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001614 return 0;
1615}
1616EXPORT_SYMBOL(mhi_dev_config_outbound_iatu);
1617
1618static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
1619{
1620 int rc = 0;
1621 struct platform_device *pdev;
1622 uint64_t addr1 = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001623 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001624
1625 pdev = mhi->pdev;
1626
1627 /* Get host memory region configuration */
1628 mhi_dev_get_mhi_addr(mhi);
1629
1630 mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb,
1631 mhi->host_addr.ctrl_base_msb);
1632 mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb,
1633 mhi->host_addr.data_base_msb);
1634
1635 addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb,
1636 mhi->host_addr.ctrl_limit_msb);
1637 mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa;
1638 addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb,
1639 mhi->host_addr.data_limit_msb);
1640 mhi->data_base.size = addr1 - mhi->data_base.host_pa;
1641
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001642 if (mhi->config_iatu) {
1643 if (mhi->ctrl_base.host_pa > mhi->data_base.host_pa) {
1644 mhi->data_base.device_pa = mhi->device_local_pa_base;
1645 mhi->ctrl_base.device_pa = mhi->device_local_pa_base +
1646 mhi->ctrl_base.host_pa - mhi->data_base.host_pa;
1647 } else {
1648 mhi->ctrl_base.device_pa = mhi->device_local_pa_base;
1649 mhi->data_base.device_pa = mhi->device_local_pa_base +
1650 mhi->data_base.host_pa - mhi->ctrl_base.host_pa;
1651 }
1652
1653 if (!mhi->use_ipa) {
1654 mhi->ctrl_base.device_va =
1655 (uintptr_t) devm_ioremap_nocache(&pdev->dev,
1656 mhi->ctrl_base.device_pa,
1657 mhi->ctrl_base.size);
1658 if (!mhi->ctrl_base.device_va) {
1659 pr_err("io remap failed for mhi address\n");
1660 return -EINVAL;
1661 }
1662 }
1663 }
1664
1665 if (mhi->config_iatu) {
1666 rc = mhi_dev_config_outbound_iatu(mhi);
1667 if (rc) {
1668 pr_err("Configuring iATU failed\n");
1669 return rc;
1670 }
1671 }
1672
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001673 /* Get Channel, event and command context base pointer */
1674 rc = mhi_dev_mmio_get_chc_base(mhi);
1675 if (rc) {
1676 pr_err("Fetching channel context failed\n");
1677 return rc;
1678 }
1679
1680 rc = mhi_dev_mmio_get_erc_base(mhi);
1681 if (rc) {
1682 pr_err("Fetching event ring context failed\n");
1683 return rc;
1684 }
1685
1686 rc = mhi_dev_mmio_get_crc_base(mhi);
1687 if (rc) {
1688 pr_err("Fetching command ring context failed\n");
1689 return rc;
1690 }
1691
1692 rc = mhi_dev_update_ner(mhi);
1693 if (rc) {
1694 pr_err("Fetching NER failed\n");
1695 return rc;
1696 }
1697
1698 mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx);
1699 mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) *
1700 mhi->cfg.event_rings;
1701 mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
1702 mhi->cfg.channels;
1703
1704 mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
1705 sizeof(struct mhi_dev_cmd_ctx),
1706 &mhi->cmd_ctx_cache_dma_handle,
1707 GFP_KERNEL);
1708 if (!mhi->cmd_ctx_cache) {
1709 pr_err("no memory while allocating cmd ctx\n");
1710 return -ENOMEM;
1711 }
1712 memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx));
1713
1714 mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
1715 sizeof(struct mhi_dev_ev_ctx) *
1716 mhi->cfg.event_rings,
1717 &mhi->ev_ctx_cache_dma_handle,
1718 GFP_KERNEL);
1719 if (!mhi->ev_ctx_cache)
1720 return -ENOMEM;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001721 memset(mhi->ev_ctx_cache, 0, sizeof(struct mhi_dev_ev_ctx) *
1722 mhi->cfg.event_rings);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001723
1724 mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
1725 sizeof(struct mhi_dev_ch_ctx) *
1726 mhi->cfg.channels,
1727 &mhi->ch_ctx_cache_dma_handle,
1728 GFP_KERNEL);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001729 if (!mhi->ch_ctx_cache)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001730 return -ENOMEM;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001731 memset(mhi->ch_ctx_cache, 0, sizeof(struct mhi_dev_ch_ctx) *
1732 mhi->cfg.channels);
1733
1734 if (mhi->use_ipa) {
1735 data_transfer.phy_addr = mhi->cmd_ctx_cache_dma_handle;
1736 data_transfer.host_pa = mhi->cmd_ctx_shadow.host_pa;
1737 }
1738
1739 data_transfer.size = mhi->cmd_ctx_shadow.size;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001740
1741 /* Cache the command and event context */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001742 mhi_dev_read_from_host(mhi, &data_transfer);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001743
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001744 if (mhi->use_ipa) {
1745 data_transfer.phy_addr = mhi->ev_ctx_cache_dma_handle;
1746 data_transfer.host_pa = mhi->ev_ctx_shadow.host_pa;
1747 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001748
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001749 data_transfer.size = mhi->ev_ctx_shadow.size;
1750
1751 mhi_dev_read_from_host(mhi, &data_transfer);
1752
1753 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001754 "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
1755 mhi->cmd_ctx_cache->rbase,
1756 mhi->cmd_ctx_cache->rp,
1757 mhi->cmd_ctx_cache->wp);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001758 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001759 "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
1760 mhi_ctx->ev_ctx_cache->rbase,
1761 mhi->ev_ctx_cache->rp,
1762 mhi->ev_ctx_cache->wp);
1763
1764 rc = mhi_ring_start(&mhi->ring[0],
1765 (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);
1766 if (rc) {
1767 pr_err("error in ring start\n");
1768 return rc;
1769 }
1770
1771 return 0;
1772}
1773
1774int mhi_dev_suspend(struct mhi_dev *mhi)
1775{
1776 int ch_id = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001777 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001778
1779 mutex_lock(&mhi_ctx->mhi_write_test);
1780 atomic_set(&mhi->is_suspended, 1);
1781
1782 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1783 if (mhi->ch_ctx_cache[ch_id].ch_state !=
1784 MHI_DEV_CH_STATE_RUNNING)
1785 continue;
1786
1787 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED;
1788
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001789 if (mhi->use_ipa) {
1790 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001791 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001792 } else {
1793 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
1794 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1795 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
1796 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1797 }
1798
1799 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
1800 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001801
1802 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001803 mhi_dev_write_to_host(mhi, &data_transfer, NULL,
1804 MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001805
1806 }
1807
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001808 atomic_set(&mhi->mhi_dev_wake, 0);
1809 pm_relax(mhi->dev);
1810 mhi_log(MHI_MSG_VERBOSE, "releasing mhi wakelock\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001811
1812 mutex_unlock(&mhi_ctx->mhi_write_test);
1813
1814 return rc;
1815}
1816EXPORT_SYMBOL(mhi_dev_suspend);
1817
1818int mhi_dev_resume(struct mhi_dev *mhi)
1819{
1820 int ch_id = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001821 struct mhi_addr data_transfer;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001822
1823 for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
1824 if (mhi->ch_ctx_cache[ch_id].ch_state !=
1825 MHI_DEV_CH_STATE_SUSPENDED)
1826 continue;
1827
1828 mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001829 if (mhi->use_ipa) {
1830 data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa +
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001831 sizeof(struct mhi_dev_ch_ctx) * ch_id;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001832 } else {
1833 data_transfer.device_va = mhi->ch_ctx_shadow.device_va +
1834 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1835 data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa +
1836 sizeof(struct mhi_dev_ch_ctx) * ch_id;
1837 }
1838
1839 data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state);
1840 data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001841
1842 /* update the channel state in the host */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001843 mhi_dev_write_to_host(mhi, &data_transfer, NULL,
1844 MHI_DEV_DMA_SYNC);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001845 }
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07001846 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001847
1848 atomic_set(&mhi->is_suspended, 0);
1849
1850 return rc;
1851}
1852EXPORT_SYMBOL(mhi_dev_resume);
1853
1854static int mhi_dev_ring_init(struct mhi_dev *dev)
1855{
1856 int i = 0;
1857
1858 mhi_log(MHI_MSG_INFO, "initializing all rings");
1859 dev->cmd_ring_idx = 0;
1860 dev->ev_ring_start = 1;
1861 dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings;
1862
1863 /* Initialize CMD ring */
1864 mhi_ring_init(&dev->ring[dev->cmd_ring_idx],
1865 RING_TYPE_CMD, dev->cmd_ring_idx);
1866
1867 mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx],
1868 mhi_dev_process_cmd_ring);
1869
1870 /* Initialize Event ring */
1871 for (i = dev->ev_ring_start; i < (dev->cfg.event_rings
1872 + dev->ev_ring_start); i++)
1873 mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i);
1874
1875 /* Initialize CH */
1876 for (i = dev->ch_ring_start; i < (dev->cfg.channels
1877 + dev->ch_ring_start); i++) {
1878 mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i);
1879 mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring);
1880 }
1881
1882
1883 return 0;
1884}
1885
1886int mhi_dev_open_channel(uint32_t chan_id,
1887 struct mhi_dev_client **handle_client,
1888 void (*mhi_dev_client_cb_reason)
1889 (struct mhi_dev_client_cb_reason *cb))
1890{
1891 int rc = 0;
1892 struct mhi_dev_channel *ch;
1893 struct platform_device *pdev;
1894
1895 pdev = mhi_ctx->pdev;
1896 ch = &mhi_ctx->ch[chan_id];
1897
1898 mutex_lock(&ch->ch_lock);
1899
1900 if (ch->active_client) {
1901 mhi_log(MHI_MSG_ERROR,
1902 "Channel (%d) already opened by client\n", chan_id);
1903 rc = -EINVAL;
1904 goto exit;
1905 }
1906
1907 /* Initialize the channel, client and state information */
1908 *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL);
1909 if (!(*handle_client)) {
1910 dev_err(&pdev->dev, "can not allocate mhi_dev memory\n");
1911 rc = -ENOMEM;
1912 goto exit;
1913 }
1914
1915 ch->active_client = (*handle_client);
1916 (*handle_client)->channel = ch;
1917 (*handle_client)->event_trigger = mhi_dev_client_cb_reason;
1918
1919 if (ch->state == MHI_DEV_CH_UNINT) {
1920 ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start];
1921 ch->state = MHI_DEV_CH_PENDING_START;
1922 } else if (ch->state == MHI_DEV_CH_CLOSED)
1923 ch->state = MHI_DEV_CH_STARTED;
1924 else if (ch->state == MHI_DEV_CH_STOPPED)
1925 ch->state = MHI_DEV_CH_PENDING_START;
1926
1927exit:
1928 mutex_unlock(&ch->ch_lock);
1929 return rc;
1930}
1931EXPORT_SYMBOL(mhi_dev_open_channel);
1932
1933int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
1934{
1935 struct mhi_dev_channel *ch;
1936 int rc;
1937
1938 ch = handle->channel;
1939
1940 rc = ch->ring->rd_offset == ch->ring->wr_offset;
1941
1942 return rc;
1943}
1944EXPORT_SYMBOL(mhi_dev_channel_isempty);
1945
1946int mhi_dev_close_channel(struct mhi_dev_client *handle)
1947{
1948 struct mhi_dev_channel *ch;
1949 int rc = 0;
1950
1951 ch = handle->channel;
1952
1953 mutex_lock(&ch->ch_lock);
1954 if (ch->state != MHI_DEV_CH_PENDING_START) {
1955 if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
1956 !mhi_dev_channel_isempty(handle)) {
1957 mhi_log(MHI_MSG_ERROR,
1958 "Trying to close an active channel (%d)\n",
1959 ch->ch_id);
1960 mutex_unlock(&ch->ch_lock);
1961 rc = -EAGAIN;
1962 goto exit;
1963 } else if (ch->tre_loc) {
1964 mhi_log(MHI_MSG_ERROR,
1965 "Trying to close channel (%d) when a TRE is active",
1966 ch->ch_id);
1967 mutex_unlock(&ch->ch_lock);
1968 rc = -EAGAIN;
1969 goto exit;
1970 }
1971 }
1972
1973 ch->state = MHI_DEV_CH_CLOSED;
1974 ch->active_client = NULL;
1975 kfree(handle);
1976exit:
1977 mutex_unlock(&ch->ch_lock);
1978 return rc;
1979}
1980EXPORT_SYMBOL(mhi_dev_close_channel);
1981
1982static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch,
1983 struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el,
1984 uint32_t *chain)
1985{
1986 uint32_t td_done = 0;
1987
1988 /*
1989 * A full TRE worth of data was consumed.
1990 * Check if we are at a TD boundary.
1991 */
1992 if (ch->tre_bytes_left == 0) {
1993 if (el->tre.chain) {
1994 if (el->tre.ieob)
1995 mhi_dev_send_completion_event(ch,
1996 ring->rd_offset, el->tre.len,
1997 MHI_CMD_COMPL_CODE_EOB);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08001998 *chain = 1;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001999 } else {
2000 if (el->tre.ieot)
2001 mhi_dev_send_completion_event(
2002 ch, ring->rd_offset, el->tre.len,
2003 MHI_CMD_COMPL_CODE_EOT);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002004 td_done = 1;
2005 *chain = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002006 }
2007 mhi_dev_ring_inc_index(ring, ring->rd_offset);
2008 ch->tre_bytes_left = 0;
2009 ch->tre_loc = 0;
2010 }
2011
2012 return td_done;
2013}
2014
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002015int mhi_dev_read_channel(struct mhi_req *mreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002016{
2017 struct mhi_dev_channel *ch;
2018 struct mhi_dev_ring *ring;
2019 union mhi_dev_ring_element_type *el;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002020 size_t bytes_to_read, addr_offset;
2021 uint64_t read_from_loc;
2022 ssize_t bytes_read = 0;
2023 uint32_t write_to_loc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002024 size_t usr_buf_remaining;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002025 int td_done = 0, rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002026 struct mhi_dev_client *handle_client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002027
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002028 if (!mreq) {
2029 mhi_log(MHI_MSG_ERROR, "invalid mhi request\n");
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002030 return -ENXIO;
2031 }
2032
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002033 if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2034 pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info);
2035 return -ENODEV;
2036 }
2037
2038 if (!mreq->client) {
2039 mhi_log(MHI_MSG_ERROR, "invalid mhi request\n");
2040 return -ENXIO;
2041 }
2042 handle_client = mreq->client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002043 ch = handle_client->channel;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002044 usr_buf_remaining = mreq->len;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002045 ring = ch->ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002046 mreq->chain = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002047
2048 mutex_lock(&ch->ch_lock);
2049
2050 do {
2051 el = &ring->ring_cache[ring->rd_offset];
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002052 mhi_log(MHI_MSG_VERBOSE, "evtptr : 0x%llx\n",
2053 el->tre.data_buf_ptr);
2054 mhi_log(MHI_MSG_VERBOSE, "evntlen : 0x%x, offset:%d\n",
2055 el->tre.len, ring->rd_offset);
2056
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002057 if (ch->tre_loc) {
2058 bytes_to_read = min(usr_buf_remaining,
2059 ch->tre_bytes_left);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002060 mreq->chain = 1;
2061 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002062 "remaining buffered data size %d\n",
2063 (int) ch->tre_bytes_left);
2064 } else {
2065 if (ring->rd_offset == ring->wr_offset) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002066 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002067 "nothing to read, returning\n");
2068 bytes_read = 0;
2069 goto exit;
2070 }
2071
2072 if (ch->state == MHI_DEV_CH_STOPPED) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002073 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002074 "channel (%d) already stopped\n",
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002075 mreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002076 bytes_read = -1;
2077 goto exit;
2078 }
2079
2080 ch->tre_loc = el->tre.data_buf_ptr;
2081 ch->tre_size = el->tre.len;
2082 ch->tre_bytes_left = ch->tre_size;
2083
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002084 mhi_log(MHI_MSG_VERBOSE,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002085 "user_buf_remaining %d, ch->tre_size %d\n",
2086 usr_buf_remaining, ch->tre_size);
2087 bytes_to_read = min(usr_buf_remaining, ch->tre_size);
2088 }
2089
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002090 bytes_read += bytes_to_read;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002091 addr_offset = ch->tre_size - ch->tre_bytes_left;
2092 read_from_loc = ch->tre_loc + addr_offset;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002093 write_to_loc = (uint32_t) mreq->buf +
2094 (mreq->len - usr_buf_remaining);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002095 ch->tre_bytes_left -= bytes_to_read;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002096 mreq->el = el;
2097 mreq->actual_len = bytes_read;
2098 mreq->rd_offset = ring->rd_offset;
2099 mhi_log(MHI_MSG_VERBOSE, "reading %d bytes from chan %d\n",
2100 bytes_to_read, mreq->chan);
2101 rc = mhi_transfer_host_to_device((void *) write_to_loc,
2102 read_from_loc, bytes_to_read, mhi_ctx, mreq);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002103 if (rc) {
2104 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002105 "Error while reading chan (%d) rc %d\n",
2106 mreq->chan, rc);
2107 mutex_unlock(&ch->ch_lock);
2108 return rc;
2109 }
2110 usr_buf_remaining -= bytes_to_read;
2111
2112 if (mreq->mode == IPA_DMA_ASYNC) {
2113 ch->tre_bytes_left = 0;
2114 ch->tre_loc = 0;
2115 goto exit;
2116 } else {
2117 td_done = mhi_dev_check_tre_bytes_left(ch, ring,
2118 el, &mreq->chain);
2119 }
2120 } while (usr_buf_remaining && !td_done);
2121 if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) {
2122 ch->state = MHI_DEV_CH_STOPPED;
2123 rc = mhi_dev_process_stop_cmd(ring, mreq->chan, mhi_ctx);
2124 if (rc) {
2125 mhi_log(MHI_MSG_ERROR,
2126 "Error while stopping channel (%d)\n",
2127 mreq->chan);
2128 bytes_read = -EIO;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002129 }
2130 }
2131exit:
2132 mutex_unlock(&ch->ch_lock);
2133 return bytes_read;
2134}
2135EXPORT_SYMBOL(mhi_dev_read_channel);
2136
2137static void skip_to_next_td(struct mhi_dev_channel *ch)
2138{
2139 struct mhi_dev_ring *ring = ch->ring;
2140 union mhi_dev_ring_element_type *el;
2141 uint32_t td_boundary_reached = 0;
2142
2143 ch->skip_td = 1;
2144 el = &ring->ring_cache[ring->rd_offset];
2145 while (ring->rd_offset != ring->wr_offset) {
2146 if (td_boundary_reached) {
2147 ch->skip_td = 0;
2148 break;
2149 }
2150 if (!el->tre.chain)
2151 td_boundary_reached = 1;
2152 mhi_dev_ring_inc_index(ring, ring->rd_offset);
2153 el = &ring->ring_cache[ring->rd_offset];
2154 }
2155}
2156
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002157int mhi_dev_write_channel(struct mhi_req *wreq)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002158{
2159 struct mhi_dev_channel *ch;
2160 struct mhi_dev_ring *ring;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002161 struct mhi_dev_client *handle_client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002162 union mhi_dev_ring_element_type *el;
2163 enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
2164 int rc = 0;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002165 uint64_t skip_tres = 0, write_to_loc;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002166 uint32_t read_from_loc;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002167 size_t usr_buf_remaining;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002168 size_t usr_buf_offset = 0;
2169 size_t bytes_to_write = 0;
2170 size_t bytes_written = 0;
2171 uint32_t tre_len = 0, suspend_wait_timeout = 0;
2172
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002173 if (!wreq || !wreq->client || !wreq->buf) {
2174 pr_err("%s: invalid parameters\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002175 return -ENXIO;
2176 }
2177
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002178 if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2179 pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info);
2180 return -ENODEV;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002181 }
2182
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002183 usr_buf_remaining = wreq->len;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002184 mutex_lock(&mhi_ctx->mhi_write_test);
2185
2186 if (atomic_read(&mhi_ctx->is_suspended)) {
2187 /*
2188 * Expected usage is when there is a write
2189 * to the MHI core -> notify SM.
2190 */
2191 rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP);
2192 if (rc) {
2193 pr_err("error sending core wakeup event\n");
2194 mutex_unlock(&mhi_ctx->mhi_write_test);
2195 return rc;
2196 }
2197 }
2198
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002199 while (atomic_read(&mhi_ctx->is_suspended) &&
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07002200 suspend_wait_timeout < MHI_WAKEUP_TIMEOUT_CNT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002201 /* wait for the suspend to finish */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002202 msleep(MHI_SUSPEND_MIN);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002203 suspend_wait_timeout++;
2204 }
Siddartha Mohanadoss1a1d8f02018-04-02 19:52:35 -07002205
2206 if (suspend_wait_timeout >= MHI_WAKEUP_TIMEOUT_CNT ||
2207 mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) {
2208 pr_err("Failed to wake up core\n");
2209 mutex_unlock(&mhi_ctx->mhi_write_test);
2210 return -ENODEV;
2211 }
2212
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002213 handle_client = wreq->client;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002214 ch = handle_client->channel;
2215 ch->wr_request_active = true;
2216
2217 ring = ch->ring;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002218
2219 mutex_lock(&ch->ch_lock);
2220
2221 if (ch->state == MHI_DEV_CH_STOPPED) {
2222 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002223 "channel %d already stopped\n", wreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002224 bytes_written = -1;
2225 goto exit;
2226 }
2227
2228 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002229 if (mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx) < 0)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002230 bytes_written = -1;
2231 goto exit;
2232 }
2233
2234 if (ch->skip_td)
2235 skip_to_next_td(ch);
2236
2237 do {
2238 if (ring->rd_offset == ring->wr_offset) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002239 mhi_log(MHI_MSG_ERROR,
2240 "%s():rd & wr offsets are equal\n",
2241 __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002242 mhi_log(MHI_MSG_INFO, "No TREs available\n");
2243 break;
2244 }
2245
2246 el = &ring->ring_cache[ring->rd_offset];
2247 tre_len = el->tre.len;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002248 if (wreq->len > tre_len) {
2249 pr_err("%s(): rlen = %d, tlen = %d: client buf > tre len\n",
2250 __func__, wreq->len, tre_len);
2251 bytes_written = -ENOMEM;
2252 goto exit;
2253 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002254
2255 bytes_to_write = min(usr_buf_remaining, tre_len);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002256 usr_buf_offset = wreq->len - bytes_to_write;
2257 read_from_loc = (uint32_t) wreq->buf + usr_buf_offset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002258 write_to_loc = el->tre.data_buf_ptr;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002259 wreq->rd_offset = ring->rd_offset;
2260 wreq->el = el;
2261 rc = mhi_transfer_device_to_host(write_to_loc,
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002262 (void *) read_from_loc,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002263 bytes_to_write,
2264 mhi_ctx, wreq);
2265 if (rc) {
2266 mhi_log(MHI_MSG_ERROR,
2267 "Error while writing chan (%d) rc %d\n",
2268 wreq->chan, rc);
2269 goto exit;
2270 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002271 bytes_written += bytes_to_write;
2272 usr_buf_remaining -= bytes_to_write;
2273
2274 if (usr_buf_remaining) {
2275 if (!el->tre.chain)
2276 code = MHI_CMD_COMPL_CODE_OVERFLOW;
2277 else if (el->tre.ieob)
2278 code = MHI_CMD_COMPL_CODE_EOB;
2279 } else {
2280 if (el->tre.chain)
2281 skip_tres = 1;
2282 code = MHI_CMD_COMPL_CODE_EOT;
2283 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002284 if (wreq->mode == IPA_DMA_SYNC) {
2285 rc = mhi_dev_send_completion_event(ch,
2286 ring->rd_offset, bytes_to_write, code);
2287 if (rc)
2288 mhi_log(MHI_MSG_VERBOSE,
2289 "err in snding cmpl evt ch:%d\n",
2290 wreq->chan);
2291 mhi_dev_ring_inc_index(ring, ring->rd_offset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002292 }
2293
2294 if (ch->state == MHI_DEV_CH_PENDING_STOP)
2295 break;
2296
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002297 } while (!skip_tres && usr_buf_remaining);
2298
2299 if (skip_tres)
2300 skip_to_next_td(ch);
2301
2302 if (ch->state == MHI_DEV_CH_PENDING_STOP) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002303 rc = mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002304 if (rc) {
2305 mhi_log(MHI_MSG_ERROR,
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002306 "channel %d stop failed\n", wreq->chan);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002307 }
2308 }
2309exit:
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002310 ch->wr_request_active = false;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002311 mutex_unlock(&ch->ch_lock);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002312 mutex_unlock(&mhi_ctx->mhi_write_test);
2313 return bytes_written;
2314}
2315EXPORT_SYMBOL(mhi_dev_write_channel);
2316
2317static void mhi_dev_enable(struct work_struct *work)
2318{
2319 int rc = 0;
2320 struct ep_pcie_msi_config msi_cfg;
2321 struct mhi_dev *mhi = container_of(work,
2322 struct mhi_dev, ring_init_cb_work);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002323 bool mhi_reset;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002324 enum mhi_dev_state state;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002325 uint32_t max_cnt = 0, bhi_intvec = 0;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002326
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002327 if (mhi->use_ipa) {
2328 rc = ipa_dma_init();
2329 if (rc) {
2330 pr_err("ipa dma init failed\n");
2331 return;
2332 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002333
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002334 rc = ipa_dma_enable();
2335 if (rc) {
2336 pr_err("ipa enable failed\n");
2337 return;
2338 }
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002339 }
2340
2341 rc = mhi_dev_ring_init(mhi);
2342 if (rc) {
2343 pr_err("MHI dev ring init failed\n");
2344 return;
2345 }
2346
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002347 /*Enable MHI dev network stack Interface*/
2348 rc = mhi_dev_net_interface_init();
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002349 if (rc)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002350 pr_err("%s Failed to initialize mhi_dev_net iface\n", __func__);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002351
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002352 rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
2353 if (rc)
2354 return;
2355
2356 if (bhi_intvec != 0xffffffff) {
2357 /* Indicate the host that the device is ready */
2358 rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg);
2359 if (!rc) {
2360 rc = ep_pcie_trigger_msi(mhi_ctx->phandle, bhi_intvec);
2361 if (rc) {
2362 pr_err("%s: error sending msi\n", __func__);
2363 return;
2364 }
2365 } else {
2366 pr_err("MHI: error geting msi configs\n");
2367 }
2368 }
2369
2370 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002371 if (rc) {
2372 pr_err("%s: get mhi state failed\n", __func__);
2373 return;
2374 }
2375
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002376 while (state != MHI_DEV_M0_STATE && max_cnt < MHI_SUSPEND_TIMEOUT) {
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002377 /* Wait for Host to set the M0 state */
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002378 msleep(MHI_SUSPEND_MIN);
2379 rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002380 if (rc) {
2381 pr_err("%s: get mhi state failed\n", __func__);
2382 return;
2383 }
2384 max_cnt++;
2385 }
2386
2387 mhi_log(MHI_MSG_INFO, "state:%d\n", state);
2388
2389 if (state == MHI_DEV_M0_STATE) {
2390 rc = mhi_dev_cache_host_cfg(mhi);
2391 if (rc) {
2392 pr_err("Failed to cache the host config\n");
2393 return;
2394 }
2395
2396 rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
2397 if (rc) {
2398 pr_err("%s: env setting failed\n", __func__);
2399 return;
2400 }
2401 } else {
2402 pr_err("MHI device failed to enter M0\n");
2403 return;
2404 }
2405
2406 rc = mhi_hwc_init(mhi_ctx);
2407 if (rc) {
2408 pr_err("error during hwc_init\n");
2409 return;
2410 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002411
Rama Krishna Phani A69494ec2018-06-05 19:15:56 +05302412 if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
2413 mhi_ctx->mhi_int_en = true;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002414 enable_irq(mhi_ctx->mhi_irq);
Rama Krishna Phani A69494ec2018-06-05 19:15:56 +05302415 }
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002416
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002417 mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002418}
2419
2420static void mhi_ring_init_cb(void *data)
2421{
2422 struct mhi_dev *mhi = data;
2423
2424 if (!mhi) {
2425 pr_err("Invalid MHI ctx\n");
2426 return;
2427 }
2428
2429 queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work);
2430}
2431
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002432int mhi_register_state_cb(void (*mhi_state_cb)
2433 (struct mhi_dev_client_cb_data *cb_data),
2434 void *data, enum mhi_client_channel channel)
2435{
2436 struct mhi_dev_ready_cb_info *cb_info = NULL;
2437
2438 if (!mhi_ctx) {
2439 pr_err("MHI device not ready\n");
2440 return -ENXIO;
2441 }
2442
2443 if (channel > MHI_MAX_CHANNELS) {
2444 pr_err("Invalid channel :%d\n", channel);
2445 return -EINVAL;
2446 }
2447
2448 mutex_lock(&mhi_ctx->mhi_lock);
2449 cb_info = kmalloc(sizeof(struct mhi_dev_ready_cb_info), GFP_KERNEL);
2450 if (!cb_info) {
2451 mutex_unlock(&mhi_ctx->mhi_lock);
2452 return -ENOMEM;
2453 }
2454
2455 cb_info->cb = mhi_state_cb;
2456 cb_info->cb_data.user_data = data;
2457 cb_info->cb_data.channel = channel;
2458
2459 list_add_tail(&cb_info->list, &mhi_ctx->client_cb_list);
2460
2461 /**
2462 * If channel is open during registration, no callback is issued.
2463 * Instead return -EEXIST to notify the client. Clients request
2464 * is added to the list to notify future state change notification.
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302465 * Channel struct may not be allocated yet if this function is called
2466 * early during boot - add an explicit check for non-null "ch".
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002467 */
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302468 if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002469 mutex_unlock(&mhi_ctx->mhi_lock);
2470 return -EEXIST;
2471 }
2472
2473 mutex_unlock(&mhi_ctx->mhi_lock);
2474
2475 return 0;
2476}
2477EXPORT_SYMBOL(mhi_register_state_cb);
2478
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002479static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info)
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002480{
2481 struct mhi_dev_client_cb_reason reason;
2482
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002483 if (uevent_idx == MHI_DEV_UEVENT_CTRL)
2484 mhi_ctx->ctrl_info = info;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002485
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002486 channel_state_info[uevent_idx].ctrl_info = info;
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002487
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002488 if (uevent_idx == MHI_CLIENT_QMI_OUT ||
2489 uevent_idx == MHI_CLIENT_QMI_IN) {
2490 /* For legacy reasons for QTI client */
2491 reason.reason = MHI_DEV_CTRL_UPDATE;
2492 uci_ctrl_update(&reason);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002493 }
2494
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07002495}
2496
2497int mhi_ctrl_state_info(uint32_t idx, uint32_t *info)
2498{
2499 if (idx == MHI_DEV_UEVENT_CTRL)
2500 *info = mhi_ctx->ctrl_info;
2501 else
2502 *info = channel_state_info[idx].ctrl_info;
2503
2504 mhi_log(MHI_MSG_VERBOSE, "idx:%d, ctrl:%d", idx, *info);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002505
2506 return 0;
2507}
2508EXPORT_SYMBOL(mhi_ctrl_state_info);
2509
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002510static int get_device_tree_data(struct platform_device *pdev)
2511{
2512 struct mhi_dev *mhi;
2513 int rc = 0;
2514 struct resource *res_mem = NULL;
2515
2516 mhi = devm_kzalloc(&pdev->dev,
2517 sizeof(struct mhi_dev), GFP_KERNEL);
2518 if (!mhi)
2519 return -ENOMEM;
2520
2521 mhi->pdev = pdev;
2522 mhi->dev = &pdev->dev;
2523 res_mem = platform_get_resource_byname(pdev,
2524 IORESOURCE_MEM, "mhi_mmio_base");
2525 if (!res_mem) {
2526 rc = -EINVAL;
2527 pr_err("Request MHI MMIO physical memory region failed\n");
2528 return rc;
2529 }
2530
2531 mhi->mmio_base_pa_addr = res_mem->start;
2532 mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE);
2533 if (!mhi->mmio_base_addr) {
2534 pr_err("Failed to IO map MMIO registers.\n");
2535 rc = -EINVAL;
2536 return rc;
2537 }
2538
2539 res_mem = platform_get_resource_byname(pdev,
2540 IORESOURCE_MEM, "ipa_uc_mbox_crdb");
2541 if (!res_mem) {
2542 rc = -EINVAL;
2543 pr_err("Request IPA_UC_MBOX CRDB physical region failed\n");
2544 return rc;
2545 }
2546
2547 mhi->ipa_uc_mbox_crdb = res_mem->start;
2548
2549 res_mem = platform_get_resource_byname(pdev,
2550 IORESOURCE_MEM, "ipa_uc_mbox_erdb");
2551 if (!res_mem) {
2552 rc = -EINVAL;
2553 pr_err("Request IPA_UC_MBOX ERDB physical region failed\n");
2554 return rc;
2555 }
2556
2557 mhi->ipa_uc_mbox_erdb = res_mem->start;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002558 mhi_ctx = mhi;
2559
2560 rc = of_property_read_u32((&pdev->dev)->of_node,
2561 "qcom,mhi-ifc-id",
2562 &mhi_ctx->ifc_id);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002563 if (rc) {
2564 pr_err("qcom,mhi-ifc-id does not exist.\n");
2565 return rc;
2566 }
2567
2568 rc = of_property_read_u32((&pdev->dev)->of_node,
2569 "qcom,mhi-ep-msi",
2570 &mhi_ctx->mhi_ep_msi_num);
2571 if (rc) {
2572 pr_err("qcom,mhi-ep-msi does not exist.\n");
2573 return rc;
2574 }
2575
2576 rc = of_property_read_u32((&pdev->dev)->of_node,
2577 "qcom,mhi-version",
2578 &mhi_ctx->mhi_version);
2579 if (rc) {
2580 pr_err("qcom,mhi-version does not exist.\n");
2581 return rc;
2582 }
2583
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002584 mhi_ctx->use_ipa = of_property_read_bool((&pdev->dev)->of_node,
2585 "qcom,use-ipa-software-channel");
2586
2587 mhi_ctx->config_iatu = of_property_read_bool((&pdev->dev)->of_node,
2588 "qcom,mhi-config-iatu");
2589
2590 if (mhi_ctx->config_iatu) {
2591 rc = of_property_read_u32((&pdev->dev)->of_node,
2592 "qcom,mhi-local-pa-base",
2593 &mhi_ctx->device_local_pa_base);
2594 if (rc) {
2595 pr_err("qcom,mhi-local-pa-base does not exist\n");
2596 return rc;
2597 }
2598 }
2599
2600 mhi_ctx->mhi_int = of_property_read_bool((&pdev->dev)->of_node,
2601 "qcom,mhi-interrupt");
2602
2603 if (mhi->config_iatu || mhi_ctx->mhi_int) {
2604 mhi->mhi_irq = platform_get_irq_byname(pdev, "mhi-device-inta");
2605 if (mhi->mhi_irq < 0) {
2606 pr_err("Invalid MHI device interrupt\n");
2607 rc = mhi->mhi_irq;
2608 return rc;
2609 }
2610 }
2611
2612 device_init_wakeup(mhi->dev, true);
2613 /* MHI device will be woken up from PCIe event */
2614 device_set_wakeup_capable(mhi->dev, false);
2615 /* Hold a wakelock until completion of M0 */
2616 pm_stay_awake(mhi->dev);
2617 atomic_set(&mhi->mhi_dev_wake, 1);
2618
2619 mhi_log(MHI_MSG_VERBOSE, "acquiring wakelock\n");
2620
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002621 return 0;
2622}
2623
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002624static int mhi_deinit(struct mhi_dev *mhi)
2625{
2626 int rc = 0, i = 0, ring_id = 0;
2627 struct mhi_dev_ring *ring;
2628 struct platform_device *pdev = mhi->pdev;
2629
2630 ring_id = mhi->cfg.channels + mhi->cfg.event_rings + 1;
2631
2632 for (i = 0; i < ring_id; i++) {
2633 ring = &mhi->ring[i];
2634 if (ring->state == RING_STATE_UINT)
2635 continue;
2636
2637 dma_free_coherent(mhi->dev, ring->ring_size *
2638 sizeof(union mhi_dev_ring_element_type),
2639 ring->ring_cache,
2640 ring->ring_cache_dma_handle);
2641 }
2642
2643 for (i = 0; i < mhi->cfg.channels; i++)
2644 mutex_destroy(&mhi->ch[i].ch_lock);
2645
2646 devm_kfree(&pdev->dev, mhi->mmio_backup);
2647 devm_kfree(&pdev->dev, mhi->ch);
2648 devm_kfree(&pdev->dev, mhi->ring);
2649
2650 mhi_dev_sm_exit(mhi);
2651
2652 mhi->mmio_initialized = false;
2653
2654 return rc;
2655}
2656
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002657static int mhi_init(struct mhi_dev *mhi)
2658{
2659 int rc = 0, i = 0;
2660 struct platform_device *pdev = mhi->pdev;
2661
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002662 rc = mhi_dev_mmio_init(mhi);
2663 if (rc) {
2664 pr_err("Failed to update the MMIO init\n");
2665 return rc;
2666 }
2667
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002668 mhi->ring = devm_kzalloc(&pdev->dev,
2669 (sizeof(struct mhi_dev_ring) *
2670 (mhi->cfg.channels + mhi->cfg.event_rings + 1)),
2671 GFP_KERNEL);
2672 if (!mhi->ring)
2673 return -ENOMEM;
2674
2675 mhi->ch = devm_kzalloc(&pdev->dev,
2676 (sizeof(struct mhi_dev_channel) *
2677 (mhi->cfg.channels)), GFP_KERNEL);
2678 if (!mhi->ch)
2679 return -ENOMEM;
2680
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002681
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002682 for (i = 0; i < mhi->cfg.channels; i++) {
2683 mutex_init(&mhi->ch[i].ch_lock);
2684 if (i == MHI_CLIENT_IP_SW_4_OUT || i == MHI_CLIENT_IP_SW_4_IN) {
2685 int nreq = 0;
2686
2687 INIT_LIST_HEAD(&mhi->ch[i].event_req_buffers);
2688 while (nreq < MHI_MAX_EVT_REQ) {
2689 struct event_req *ereq;
2690 /* Pre allocate event requests */
2691 ereq = kzalloc(sizeof(struct event_req),
2692 GFP_KERNEL);
2693 if (!ereq)
2694 return -ENOMEM;
2695
2696 /* pre allocate buffers to queue
2697 * transfer completion events
2698 */
2699 ereq->tr_events = kzalloc(RING_ELEMENT_TYPE_SZ*
2700 MAX_TR_EVENTS, GFP_KERNEL);
2701 if (!ereq->tr_events) {
2702 kfree(ereq);
2703 return -ENOMEM;
2704 }
2705 list_add_tail(&ereq->list,
2706 &mhi->ch[i].event_req_buffers);
2707 nreq++;
2708 }
2709 mhi->ch[i].curr_ereq =
2710 container_of(mhi->ch[i].event_req_buffers.next,
2711 struct event_req, list);
2712 list_del_init(&mhi->ch[i].curr_ereq->list);
2713 }
2714 }
2715
2716 spin_lock_init(&mhi->lock);
2717 mhi->mmio_backup = devm_kzalloc(&pdev->dev,
2718 MHI_DEV_MMIO_RANGE, GFP_KERNEL);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002719 if (!mhi->mmio_backup)
2720 return -ENOMEM;
2721
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002722 return 0;
2723}
2724
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002725static int mhi_dev_resume_mmio_mhi_reinit(struct mhi_dev *mhi_ctx)
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002726{
2727 int rc = 0;
2728
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002729 mutex_lock(&mhi_ctx->mhi_lock);
2730 if (atomic_read(&mhi_ctx->re_init_done)) {
2731 mhi_log(MHI_MSG_INFO, "Re_init done, return\n");
2732 mutex_unlock(&mhi_ctx->mhi_lock);
2733 return 0;
2734 }
2735
2736 rc = mhi_init(mhi_ctx);
2737 if (rc) {
2738 pr_err("Error initializing MHI MMIO with %d\n", rc);
2739 goto fail;
2740 }
2741
2742 mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
2743 EP_PCIE_EVENT_PM_D3_COLD |
2744 EP_PCIE_EVENT_PM_D0 |
2745 EP_PCIE_EVENT_PM_RST_DEAST |
2746 EP_PCIE_EVENT_MHI_A7 |
2747 EP_PCIE_EVENT_LINKDOWN;
2748 mhi_ctx->event_reg.user = mhi_ctx;
2749 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
2750 mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
2751
2752 rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
2753 if (rc) {
2754 pr_err("Failed to register for events from PCIe\n");
2755 goto fail;
2756 }
2757
2758 rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
2759 if (rc < 0) {
2760 if (rc == -EEXIST) {
2761 mhi_ring_init_cb(mhi_ctx);
2762 } else {
2763 pr_err("Error calling IPA cb with %d\n", rc);
2764 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002765 }
2766 }
2767
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002768 /* Invoke MHI SM when device is in RESET state */
2769 rc = mhi_dev_sm_init(mhi_ctx);
2770 if (rc) {
2771 pr_err("%s: Error during SM init\n", __func__);
2772 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002773 }
2774
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002775 /* set the env before setting the ready bit */
2776 rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE);
2777 if (rc) {
2778 pr_err("%s: env setting failed\n", __func__);
2779 goto fail;
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002780 }
2781
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002782 /* All set, notify the host */
2783 rc = mhi_dev_sm_set_ready();
2784 if (rc) {
2785 pr_err("%s: unable to set ready bit\n", __func__);
2786 goto fail;
2787 }
2788
2789 atomic_set(&mhi_ctx->is_suspended, 0);
2790fail:
2791 atomic_set(&mhi_ctx->re_init_done, 1);
2792 mutex_unlock(&mhi_ctx->mhi_lock);
2793 return rc;
2794}
2795
2796static void mhi_dev_reinit(struct work_struct *work)
2797{
2798 struct mhi_dev *mhi_ctx = container_of(work,
2799 struct mhi_dev, re_init);
2800 enum ep_pcie_link_status link_state;
2801 int rc = 0;
2802
2803 link_state = ep_pcie_get_linkstatus(mhi_ctx->phandle);
2804 if (link_state == EP_PCIE_LINK_ENABLED) {
2805 /* PCIe link is up with BME set */
2806 rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx);
2807 if (rc) {
2808 pr_err("Failed to register for events from PCIe\n");
2809 return;
2810 }
2811 }
2812
2813 mhi_log(MHI_MSG_VERBOSE, "Wait for PCIe linkup\n");
2814}
2815
2816static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx)
2817{
2818 struct platform_device *pdev;
2819 int rc = 0;
2820
2821 pdev = mhi_ctx->pdev;
2822
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002823 INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
2824
2825 mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq",
2826 WQ_HIGHPRI, 0);
2827 if (!mhi_ctx->pending_ring_wq) {
2828 rc = -ENOMEM;
2829 return rc;
2830 }
2831
2832 INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending);
2833
2834 INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable);
2835
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002836 INIT_WORK(&mhi_ctx->re_init, mhi_dev_reinit);
2837
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002838 mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq",
2839 WQ_HIGHPRI, 0);
2840 if (!mhi_ctx->ring_init_wq) {
2841 rc = -ENOMEM;
2842 return rc;
2843 }
2844
2845 INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
2846 INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002847 mutex_init(&mhi_ctx->mhi_event_lock);
2848 mutex_init(&mhi_ctx->mhi_write_test);
2849
2850 rc = mhi_init(mhi_ctx);
2851 if (rc)
2852 return rc;
2853
2854 mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev,
2855 (TRB_MAX_DATA_SIZE * 4),
2856 &mhi_ctx->cache_dma_handle, GFP_KERNEL);
2857 if (!mhi_ctx->dma_cache)
2858 return -ENOMEM;
2859
2860 mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev,
2861 (TRB_MAX_DATA_SIZE * 4),
2862 &mhi_ctx->read_dma_handle,
2863 GFP_KERNEL);
2864 if (!mhi_ctx->read_handle)
2865 return -ENOMEM;
2866
2867 mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev,
2868 (TRB_MAX_DATA_SIZE * 24),
2869 &mhi_ctx->write_dma_handle,
2870 GFP_KERNEL);
2871 if (!mhi_ctx->write_handle)
2872 return -ENOMEM;
2873
2874 rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version);
2875 if (rc) {
2876 pr_err("Failed to update the MHI version\n");
2877 return rc;
2878 }
2879
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002880 mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
2881 if (!mhi_ctx->phandle) {
2882 pr_err("PCIe driver get handle failed.\n");
2883 return -EINVAL;
2884 }
2885
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08002886 mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
2887 EP_PCIE_EVENT_PM_D3_COLD |
2888 EP_PCIE_EVENT_PM_D0 |
2889 EP_PCIE_EVENT_PM_RST_DEAST |
2890 EP_PCIE_EVENT_MHI_A7 |
2891 EP_PCIE_EVENT_LINKDOWN;
2892 mhi_ctx->event_reg.user = mhi_ctx;
2893 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
2894 mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
2895
2896 rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
2897 if (rc) {
2898 pr_err("Failed to register for events from PCIe\n");
2899 return rc;
2900 }
2901
2902 pr_err("Registering with IPA\n");
2903
2904 rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
2905 if (rc < 0) {
2906 if (rc == -EEXIST) {
2907 mhi_ring_init_cb(mhi_ctx);
2908 } else {
2909 pr_err("Error calling IPA cb with %d\n", rc);
2910 return rc;
2911 }
2912 }
2913
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08002914 /* Invoke MHI SM when device is in RESET state */
2915 rc = mhi_dev_sm_init(mhi_ctx);
2916 if (rc) {
2917 pr_err("%s: Error during SM init\n", __func__);
2918 return rc;
2919 }
2920
2921 /* set the env before setting the ready bit */
2922 rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE);
2923 if (rc) {
2924 pr_err("%s: env setting failed\n", __func__);
2925 return rc;
2926 }
2927
2928 /* All set, notify the host */
2929 mhi_dev_sm_set_ready();
2930
2931 if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) {
2932 rc = devm_request_irq(&pdev->dev, mhi_ctx->mhi_irq, mhi_dev_isr,
2933 IRQF_TRIGGER_HIGH, "mhi_isr", mhi_ctx);
2934 if (rc) {
2935 dev_err(&pdev->dev, "request mhi irq failed %d\n", rc);
2936 return -EINVAL;
2937 }
2938
2939 disable_irq(mhi_ctx->mhi_irq);
2940 }
2941
2942 return 0;
2943}
2944
2945static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify)
2946{
2947 if (!notify || !notify->user) {
2948 pr_err("Null argument for notify\n");
2949 return;
2950 }
2951
2952 mhi_ctx = notify->user;
2953 mhi_dev_pcie_notify_event = notify->options;
2954 mhi_log(MHI_MSG_INFO,
2955 "PCIe event=0x%x\n", notify->options);
2956 queue_work(mhi_ctx->pcie_event_wq, &mhi_ctx->pcie_event);
2957}
2958
2959static void mhi_dev_pcie_handle_event(struct work_struct *work)
2960{
2961 struct mhi_dev *mhi_ctx = container_of(work, struct mhi_dev,
2962 pcie_event);
2963 int rc = 0;
2964
2965 if (mhi_dev_pcie_notify_event == MHI_INIT) {
2966 rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx);
2967 if (rc) {
2968 pr_err("Error during MHI device initialization\n");
2969 return;
2970 }
2971 } else if (mhi_dev_pcie_notify_event == MHI_REINIT) {
2972 rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx);
2973 if (rc) {
2974 pr_err("Error during MHI device re-initialization\n");
2975 return;
2976 }
2977 }
2978}
2979
2980static int mhi_dev_probe(struct platform_device *pdev)
2981{
2982 int rc = 0;
2983
2984 if (pdev->dev.of_node) {
2985 rc = get_device_tree_data(pdev);
2986 if (rc) {
2987 pr_err("Error reading MHI Dev DT\n");
2988 return rc;
2989 }
2990 mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
2991 "mhi", 0);
2992 if (mhi_ipc_log == NULL) {
2993 dev_err(&pdev->dev,
2994 "Failed to create IPC logging context\n");
2995 }
Siva Kumar Akkireddi8fd5e6c2018-05-21 14:53:10 +05302996 /*
2997 * The below list and mutex should be initialized
2998 * before calling mhi_uci_init to avoid crash in
2999 * mhi_register_state_cb when accessing these.
3000 */
3001 INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
3002 mutex_init(&mhi_ctx->mhi_lock);
3003
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003004 mhi_uci_init();
Siddartha Mohanadossba314f22018-03-19 15:43:50 -07003005 mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
3006 MHI_STATE_CONFIGURED);
Siddartha Mohanadosse0954332018-01-15 14:03:03 -08003007 }
3008
3009 INIT_WORK(&mhi_ctx->pcie_event, mhi_dev_pcie_handle_event);
3010 mhi_ctx->pcie_event_wq = alloc_workqueue("mhi_dev_pcie_event_wq",
3011 WQ_HIGHPRI, 0);
3012 if (!mhi_ctx->pcie_event_wq) {
3013 pr_err("no memory\n");
3014 rc = -ENOMEM;
3015 return rc;
3016 }
3017
3018 mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
3019 if (mhi_ctx->phandle) {
3020 /* PCIe link is already up */
3021 rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx);
3022 if (rc) {
3023 pr_err("Error during MHI device initialization\n");
3024 return rc;
3025 }
3026 } else {
3027 pr_debug("Register a PCIe callback\n");
3028 mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP;
3029 mhi_ctx->event_reg.user = mhi_ctx;
3030 mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
3031 mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up;
3032 mhi_ctx->event_reg.options = MHI_INIT;
3033
3034 rc = ep_pcie_register_event(mhi_ctx->phandle,
3035 &mhi_ctx->event_reg);
3036 if (rc) {
3037 pr_err("Failed to register for events from PCIe\n");
3038 return rc;
3039 }
3040 }
3041
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08003042 return 0;
3043}
3044
3045static int mhi_dev_remove(struct platform_device *pdev)
3046{
3047 platform_set_drvdata(pdev, NULL);
3048
3049 return 0;
3050}
3051
3052static const struct of_device_id mhi_dev_match_table[] = {
3053 { .compatible = "qcom,msm-mhi-dev" },
3054 {}
3055};
3056
3057static struct platform_driver mhi_dev_driver = {
3058 .driver = {
3059 .name = "qcom,msm-mhi-dev",
3060 .of_match_table = mhi_dev_match_table,
3061 },
3062 .probe = mhi_dev_probe,
3063 .remove = mhi_dev_remove,
3064};
3065
3066module_param(mhi_msg_lvl, uint, 0644);
3067module_param(mhi_ipc_msg_lvl, uint, 0644);
3068
3069MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl");
3070MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl");
3071
3072static int __init mhi_dev_init(void)
3073{
3074 return platform_driver_register(&mhi_dev_driver);
3075}
3076module_init(mhi_dev_init);
3077
3078static void __exit mhi_dev_exit(void)
3079{
3080 platform_driver_unregister(&mhi_dev_driver);
3081}
3082module_exit(mhi_dev_exit);
3083
3084MODULE_DESCRIPTION("MHI device driver");
3085MODULE_LICENSE("GPL v2");