blob: 815da18b5fc2c780f4c01699fc7168779aef0ea9 [file] [log] [blame]
Sayali Lokhande25cadc32016-11-30 10:01:59 +05301/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/highmem.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/dma-mapping.h>
18#include <linux/slab.h>
19#include <linux/scatterlist.h>
20#include <linux/platform_device.h>
21#include <linux/blkdev.h>
22
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +030026#include <linux/pm_runtime.h>
Gilad Broner44445992015-09-29 16:05:39 +030027#include <linux/workqueue.h>
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -070028
29#include "cmdq_hci.h"
Gilad Broner44445992015-09-29 16:05:39 +030030#include "sdhci.h"
31#include "sdhci-msm.h"
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -070032
33#define DCMD_SLOT 31
34#define NUM_SLOTS 32
35
Veerabhadrarao Badigantiaa2b2d82017-02-20 13:51:13 +053036/* 10 sec */
37#define HALT_TIMEOUT_MS 10000
Asutosh Dasaa1e1c72015-05-21 17:22:10 +053038
Ritesh Harjani903f3452015-12-14 10:07:33 +053039static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
Ritesh Harjani6217c042015-10-01 20:34:42 +053040static int cmdq_halt(struct mmc_host *mmc, bool halt);
Ritesh Harjanib0280c22015-10-27 11:51:25 +053041
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +030042#ifdef CONFIG_PM_RUNTIME
43static int cmdq_runtime_pm_get(struct cmdq_host *host)
44{
45 return pm_runtime_get_sync(host->mmc->parent);
46}
47static int cmdq_runtime_pm_put(struct cmdq_host *host)
48{
49 pm_runtime_mark_last_busy(host->mmc->parent);
50 return pm_runtime_put_autosuspend(host->mmc->parent);
51}
52#else
53static inline int cmdq_runtime_pm_get(struct cmdq_host *host)
54{
55 return 0;
56}
57static inline int cmdq_runtime_pm_put(struct cmdq_host *host)
58{
59 return 0;
60}
61#endif
Asutosh Das02e30862015-05-20 16:52:04 +053062static inline struct mmc_request *get_req_by_tag(struct cmdq_host *cq_host,
63 unsigned int tag)
64{
65 return cq_host->mrq_slot[tag];
66}
67
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -070068static inline u8 *get_desc(struct cmdq_host *cq_host, u8 tag)
69{
70 return cq_host->desc_base + (tag * cq_host->slot_sz);
71}
72
73static inline u8 *get_link_desc(struct cmdq_host *cq_host, u8 tag)
74{
75 u8 *desc = get_desc(cq_host, tag);
76
77 return desc + cq_host->task_desc_len;
78}
79
80static inline dma_addr_t get_trans_desc_dma(struct cmdq_host *cq_host, u8 tag)
81{
82 return cq_host->trans_desc_dma_base +
83 (cq_host->mmc->max_segs * tag *
84 cq_host->trans_desc_len);
85}
86
87static inline u8 *get_trans_desc(struct cmdq_host *cq_host, u8 tag)
88{
89 return cq_host->trans_desc_base +
90 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
91}
92
93static void setup_trans_desc(struct cmdq_host *cq_host, u8 tag)
94{
95 u8 *link_temp;
96 dma_addr_t trans_temp;
97
98 link_temp = get_link_desc(cq_host, tag);
99 trans_temp = get_trans_desc_dma(cq_host, tag);
100
101 memset(link_temp, 0, cq_host->link_desc_len);
102 if (cq_host->link_desc_len > 8)
103 *(link_temp + 8) = 0;
104
105 if (tag == DCMD_SLOT) {
106 *link_temp = VALID(0) | ACT(0) | END(1);
107 return;
108 }
109
110 *link_temp = VALID(1) | ACT(0x6) | END(0);
111
112 if (cq_host->dma64) {
113 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
114 data_addr[0] = cpu_to_le64(trans_temp);
115 } else {
116 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
117 data_addr[0] = cpu_to_le32(trans_temp);
118 }
119}
120
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530121static void cmdq_set_halt_irq(struct cmdq_host *cq_host, bool enable)
122{
123 u32 ier;
124
125 ier = cmdq_readl(cq_host, CQISTE);
126 if (enable) {
127 cmdq_writel(cq_host, ier | HALT, CQISTE);
128 cmdq_writel(cq_host, ier | HALT, CQISGE);
129 } else {
130 cmdq_writel(cq_host, ier & ~HALT, CQISTE);
131 cmdq_writel(cq_host, ier & ~HALT, CQISGE);
132 }
133}
134
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700135static void cmdq_clear_set_irqs(struct cmdq_host *cq_host, u32 clear, u32 set)
136{
137 u32 ier;
138
139 ier = cmdq_readl(cq_host, CQISTE);
140 ier &= ~clear;
141 ier |= set;
142 cmdq_writel(cq_host, ier, CQISTE);
143 cmdq_writel(cq_host, ier, CQISGE);
144 /* ensure the writes are done */
145 mb();
146}
147
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +0530148static int cmdq_clear_task_poll(struct cmdq_host *cq_host, unsigned int tag)
149{
150 int retries = 100;
151
152 cmdq_clear_set_irqs(cq_host, CQIS_TCL, 0);
153 cmdq_writel(cq_host, 1<<tag, CQTCLR);
154 while (retries) {
155 /*
156 * Task Clear register and doorbell,
157 * both should indicate that task is cleared
158 */
159 if ((cmdq_readl(cq_host, CQTCLR) & 1<<tag) ||
160 (cmdq_readl(cq_host, CQTDBR) & 1<<tag)) {
161 udelay(5);
162 retries--;
163 continue;
164 } else
165 break;
166 }
167
168 cmdq_clear_set_irqs(cq_host, 0, CQIS_TCL);
169 return retries ? 0 : -ETIMEDOUT;
170}
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700171
172#define DRV_NAME "cmdq-host"
173
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700174static void cmdq_dump_task_history(struct cmdq_host *cq_host)
175{
176 int i;
177
178 if (likely(!cq_host->mmc->cmdq_thist_enabled))
179 return;
180
181 if (!cq_host->thist) {
182 pr_err("%s: %s: CMDQ task history buffer not allocated\n",
183 mmc_hostname(cq_host->mmc), __func__);
184 return;
185 }
186
187 pr_err("---- Circular Task History ----\n");
188 pr_err(DRV_NAME ": Last entry index: %d", cq_host->thist_idx - 1);
189
190 for (i = 0; i < cq_host->num_slots; i++) {
191 pr_err(DRV_NAME ": [%02d]%s Task: 0x%08x | Args: 0x%08x\n", i,
192 (cq_host->thist[i].is_dcmd) ? "DCMD" : "DATA",
193 lower_32_bits(cq_host->thist[i].task),
194 upper_32_bits(cq_host->thist[i].task));
195 }
196 pr_err("-------------------------\n");
197}
198
Ritesh Harjani6217c042015-10-01 20:34:42 +0530199static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
200{
201 struct mmc_host *mmc = cq_host->mmc;
202 dma_addr_t desc_dma;
203 int tag = 0;
204 unsigned long data_active_reqs =
205 mmc->cmdq_ctx.data_active_reqs;
206 unsigned long desc_size =
207 (cq_host->mmc->max_segs * cq_host->trans_desc_len);
208
209 for_each_set_bit(tag, &data_active_reqs, cq_host->num_slots) {
210 desc_dma = get_trans_desc_dma(cq_host, tag);
211 pr_err("%s: %s: tag = %d, trans_dma(phys) = %pad, trans_desc(virt) = 0x%p\n",
212 mmc_hostname(mmc), __func__, tag,
213 &desc_dma, get_trans_desc(cq_host, tag));
214 print_hex_dump(KERN_ERR, "cmdq-adma:", DUMP_PREFIX_ADDRESS,
215 32, 8, get_trans_desc(cq_host, tag),
216 (desc_size), false);
217 }
218}
219
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700220static void cmdq_dumpregs(struct cmdq_host *cq_host)
221{
222 struct mmc_host *mmc = cq_host->mmc;
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +0530223 int offset = 0;
224
225 if (cq_host->offset_changed)
226 offset = CQ_V5_VENDOR_CFG;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700227
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530228 MMC_TRACE(mmc,
229 "%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
230 __func__, cmdq_readl(cq_host, CQCTL), cmdq_readl(cq_host, CQIS),
231 cmdq_readl(cq_host, CQISTE), cmdq_readl(cq_host, CQISGE),
232 cmdq_readl(cq_host, CQTDBR), cmdq_readl(cq_host, CQTCN),
233 cmdq_readl(cq_host, CQDQS), cmdq_readl(cq_host, CQDPT),
234 cmdq_readl(cq_host, CQTERRI), cmdq_readl(cq_host, CQCRI),
235 cmdq_readl(cq_host, CQCRA), cmdq_readl(cq_host, CQCRDCT));
Asutosh Das02e30862015-05-20 16:52:04 +0530236 pr_err(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700237 mmc_hostname(mmc));
238
Asutosh Das02e30862015-05-20 16:52:04 +0530239 pr_err(DRV_NAME ": Caps: 0x%08x | Version: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700240 cmdq_readl(cq_host, CQCAP),
241 cmdq_readl(cq_host, CQVER));
Asutosh Das02e30862015-05-20 16:52:04 +0530242 pr_err(DRV_NAME ": Queing config: 0x%08x | Queue Ctrl: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700243 cmdq_readl(cq_host, CQCFG),
244 cmdq_readl(cq_host, CQCTL));
Asutosh Das02e30862015-05-20 16:52:04 +0530245 pr_err(DRV_NAME ": Int stat: 0x%08x | Int enab: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700246 cmdq_readl(cq_host, CQIS),
247 cmdq_readl(cq_host, CQISTE));
Asutosh Das02e30862015-05-20 16:52:04 +0530248 pr_err(DRV_NAME ": Int sig: 0x%08x | Int Coal: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700249 cmdq_readl(cq_host, CQISGE),
250 cmdq_readl(cq_host, CQIC));
Asutosh Das02e30862015-05-20 16:52:04 +0530251 pr_err(DRV_NAME ": TDL base: 0x%08x | TDL up32: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700252 cmdq_readl(cq_host, CQTDLBA),
253 cmdq_readl(cq_host, CQTDLBAU));
Asutosh Das02e30862015-05-20 16:52:04 +0530254 pr_err(DRV_NAME ": Doorbell: 0x%08x | Comp Notif: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700255 cmdq_readl(cq_host, CQTDBR),
256 cmdq_readl(cq_host, CQTCN));
Asutosh Das02e30862015-05-20 16:52:04 +0530257 pr_err(DRV_NAME ": Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700258 cmdq_readl(cq_host, CQDQS),
259 cmdq_readl(cq_host, CQDPT));
Asutosh Das02e30862015-05-20 16:52:04 +0530260 pr_err(DRV_NAME ": Task clr: 0x%08x | Send stat 1: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700261 cmdq_readl(cq_host, CQTCLR),
262 cmdq_readl(cq_host, CQSSC1));
Asutosh Das02e30862015-05-20 16:52:04 +0530263 pr_err(DRV_NAME ": Send stat 2: 0x%08x | DCMD resp: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700264 cmdq_readl(cq_host, CQSSC2),
265 cmdq_readl(cq_host, CQCRDCT));
Asutosh Das02e30862015-05-20 16:52:04 +0530266 pr_err(DRV_NAME ": Resp err mask: 0x%08x | Task err: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700267 cmdq_readl(cq_host, CQRMEM),
268 cmdq_readl(cq_host, CQTERRI));
Asutosh Das02e30862015-05-20 16:52:04 +0530269 pr_err(DRV_NAME ": Resp idx 0x%08x | Resp arg: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700270 cmdq_readl(cq_host, CQCRI),
271 cmdq_readl(cq_host, CQCRA));
Asutosh Dasc0ed9c42015-05-29 15:39:37 +0530272 pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +0530273 cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
Asutosh Das02e30862015-05-20 16:52:04 +0530274 pr_err(DRV_NAME ": ===========================================\n");
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700275
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700276 cmdq_dump_task_history(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700277 if (cq_host->ops->dump_vendor_regs)
278 cq_host->ops->dump_vendor_regs(mmc);
279}
280
281/**
282 * The allocated descriptor table for task, link & transfer descritors
283 * looks like:
284 * |----------|
285 * |task desc | |->|----------|
286 * |----------| | |trans desc|
287 * |link desc-|->| |----------|
288 * |----------| .
289 * . .
290 * no. of slots max-segs
291 * . |----------|
292 * |----------|
293 * The idea here is to create the [task+trans] table and mark & point the
294 * link desc to the transfer desc table on a per slot basis.
295 */
296static int cmdq_host_alloc_tdl(struct cmdq_host *cq_host)
297{
298
299 size_t desc_size;
300 size_t data_size;
301 int i = 0;
302
303 /* task descriptor can be 64/128 bit irrespective of arch */
304 if (cq_host->caps & CMDQ_TASK_DESC_SZ_128) {
305 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) |
306 CQ_TASK_DESC_SZ, CQCFG);
307 cq_host->task_desc_len = 16;
308 } else {
309 cq_host->task_desc_len = 8;
310 }
311
312 /*
313 * 96 bits length of transfer desc instead of 128 bits which means
314 * ADMA would expect next valid descriptor at the 96th bit
315 * or 128th bit
316 */
317 if (cq_host->dma64) {
318 if (cq_host->quirks & CMDQ_QUIRK_SHORT_TXFR_DESC_SZ)
319 cq_host->trans_desc_len = 12;
320 else
321 cq_host->trans_desc_len = 16;
322 cq_host->link_desc_len = 16;
323 } else {
324 cq_host->trans_desc_len = 8;
325 cq_host->link_desc_len = 8;
326 }
327
328 /* total size of a slot: 1 task & 1 transfer (link) */
329 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
330
331 desc_size = cq_host->slot_sz * cq_host->num_slots;
332
333 data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
334 (cq_host->num_slots - 1);
335
336 pr_info("%s: desc_size: %d data_sz: %d slot-sz: %d\n", __func__,
337 (int)desc_size, (int)data_size, cq_host->slot_sz);
338
339 /*
340 * allocate a dma-mapped chunk of memory for the descriptors
341 * allocate a dma-mapped chunk of memory for link descriptors
342 * setup each link-desc memory offset per slot-number to
343 * the descriptor table.
344 */
345 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
346 desc_size,
347 &cq_host->desc_dma_base,
348 GFP_KERNEL);
349 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
350 data_size,
351 &cq_host->trans_desc_dma_base,
352 GFP_KERNEL);
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700353 cq_host->thist = devm_kzalloc(mmc_dev(cq_host->mmc),
354 (sizeof(*cq_host->thist) *
355 cq_host->num_slots),
356 GFP_KERNEL);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700357 if (!cq_host->desc_base || !cq_host->trans_desc_base)
358 return -ENOMEM;
359
Vijay Viswanath9f5aaf92017-12-11 10:50:29 +0530360 pr_debug("desc-base: 0x%pK trans-base: 0x%pK\n desc_dma 0x%llx trans_dma: 0x%llx\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700361 cq_host->desc_base, cq_host->trans_desc_base,
362 (unsigned long long)cq_host->desc_dma_base,
363 (unsigned long long) cq_host->trans_desc_dma_base);
364
365 for (; i < (cq_host->num_slots); i++)
366 setup_trans_desc(cq_host, i);
367
368 return 0;
369}
370
371static int cmdq_enable(struct mmc_host *mmc)
372{
373 int err = 0;
374 u32 cqcfg;
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530375 u32 cqcap = 0;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700376 bool dcmd_enable;
377 struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
378
379 if (!cq_host || !mmc->card || !mmc_card_cmdq(mmc->card)) {
380 err = -EINVAL;
381 goto out;
382 }
383
384 if (cq_host->enabled)
385 goto out;
386
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300387 cmdq_runtime_pm_get(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700388 cqcfg = cmdq_readl(cq_host, CQCFG);
389 if (cqcfg & 0x1) {
390 pr_info("%s: %s: cq_host is already enabled\n",
391 mmc_hostname(mmc), __func__);
392 WARN_ON(1);
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700393 goto pm_ref_count;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700394 }
395
396 if (cq_host->quirks & CMDQ_QUIRK_NO_DCMD)
397 dcmd_enable = false;
398 else
399 dcmd_enable = true;
400
401 cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
402 (dcmd_enable ? CQ_DCMD : 0));
403
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530404 cqcap = cmdq_readl(cq_host, CQCAP);
405 if (cqcap & CQCAP_CS) {
406 /*
407 * In case host controller supports cryptographic operations
408 * then, it uses 128bit task descriptor. Upper 64 bits of task
409 * descriptor would be used to pass crypto specific informaton.
410 */
411 cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
412 CMDQ_TASK_DESC_SZ_128;
413 cqcfg |= CQ_ICE_ENABLE;
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +0530414 /*
415 * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
416 * in CQ register space, due to which few CQ registers are
417 * shifted. Set offset_changed boolean to use updated address.
418 */
419 cq_host->offset_changed = true;
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530420 }
421
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700422 cmdq_writel(cq_host, cqcfg, CQCFG);
423 /* enable CQ_HOST */
424 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
425 CQCFG);
426
427 if (!cq_host->desc_base ||
428 !cq_host->trans_desc_base) {
429 err = cmdq_host_alloc_tdl(cq_host);
430 if (err)
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700431 goto pm_ref_count;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700432 }
433
Konstantin Dorfman14c902d2015-06-11 11:33:23 +0300434 cmdq_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), CQTDLBA);
435 cmdq_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), CQTDLBAU);
436
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700437 /*
438 * disable all vendor interrupts
439 * enable CMDQ interrupts
440 * enable the vendor error interrupts
441 */
442 if (cq_host->ops->clear_set_irqs)
443 cq_host->ops->clear_set_irqs(mmc, true);
444
445 cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
446
447 /* cq_host would use this rca to address the card */
448 cmdq_writel(cq_host, mmc->card->rca, CQSSC2);
449
450 /* send QSR at lesser intervals than the default */
Asutosh Das5b81f132015-10-06 09:53:33 +0530451 cmdq_writel(cq_host, SEND_QSR_INTERVAL, CQSSC1);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700452
Dov Levenglick2b678302015-07-01 14:24:20 +0300453 /* enable bkops exception indication */
Dov Levenglickaea348b2015-07-20 11:59:52 +0300454 if (mmc_card_configured_manual_bkops(mmc->card) &&
455 !mmc_card_configured_auto_bkops(mmc->card))
Dov Levenglick2b678302015-07-01 14:24:20 +0300456 cmdq_writel(cq_host, cmdq_readl(cq_host, CQRMEM) | CQ_EXCEPTION,
457 CQRMEM);
458
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700459 /* ensure the writes are done before enabling CQE */
460 mb();
461
462 cq_host->enabled = true;
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530463 mmc_host_clr_cq_disable(mmc);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700464
Sahitya Tummala823bcfc2016-04-26 09:12:44 +0530465 if (cq_host->ops->set_transfer_params)
466 cq_host->ops->set_transfer_params(mmc);
467
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700468 if (cq_host->ops->set_block_size)
469 cq_host->ops->set_block_size(cq_host->mmc);
470
471 if (cq_host->ops->set_data_timeout)
472 cq_host->ops->set_data_timeout(mmc, 0xf);
473
474 if (cq_host->ops->clear_set_dumpregs)
475 cq_host->ops->clear_set_dumpregs(mmc, 1);
476
Ritesh Harjani6b2ea572015-07-15 13:23:05 +0530477 if (cq_host->ops->enhanced_strobe_mask)
478 cq_host->ops->enhanced_strobe_mask(mmc, true);
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700479
480pm_ref_count:
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300481 cmdq_runtime_pm_put(cq_host);
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700482out:
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530483 MMC_TRACE(mmc, "%s: CQ enabled err: %d\n", __func__, err);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700484 return err;
485}
486
Ritesh Harjani903f3452015-12-14 10:07:33 +0530487static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft)
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700488{
489 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
490
491 if (soft) {
492 cmdq_writel(cq_host, cmdq_readl(
493 cq_host, CQCFG) & ~(CQ_ENABLE),
494 CQCFG);
495 }
Ritesh Harjani6b2ea572015-07-15 13:23:05 +0530496 if (cq_host->ops->enhanced_strobe_mask)
497 cq_host->ops->enhanced_strobe_mask(mmc, false);
498
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700499 cq_host->enabled = false;
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530500 mmc_host_set_cq_disable(mmc);
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530501 MMC_TRACE(mmc, "%s: CQ disabled\n", __func__);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700502}
503
Ritesh Harjani903f3452015-12-14 10:07:33 +0530504static void cmdq_disable(struct mmc_host *mmc, bool soft)
505{
506 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
507
508 cmdq_runtime_pm_get(cq_host);
509 cmdq_disable_nosync(mmc, soft);
510 cmdq_runtime_pm_put(cq_host);
511}
512
Asutosh Das02e30862015-05-20 16:52:04 +0530513static void cmdq_reset(struct mmc_host *mmc, bool soft)
514{
515 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
516 unsigned int cqcfg;
517 unsigned int tdlba;
518 unsigned int tdlbau;
519 unsigned int rca;
520 int ret;
521
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300522 cmdq_runtime_pm_get(cq_host);
Asutosh Das02e30862015-05-20 16:52:04 +0530523 cqcfg = cmdq_readl(cq_host, CQCFG);
524 tdlba = cmdq_readl(cq_host, CQTDLBA);
525 tdlbau = cmdq_readl(cq_host, CQTDLBAU);
526 rca = cmdq_readl(cq_host, CQSSC2);
527
528 cmdq_disable(mmc, true);
529
530 if (cq_host->ops->reset) {
531 ret = cq_host->ops->reset(mmc);
532 if (ret) {
533 pr_crit("%s: reset CMDQ controller: failed\n",
534 mmc_hostname(mmc));
535 BUG();
536 }
537 }
538
539 cmdq_writel(cq_host, tdlba, CQTDLBA);
540 cmdq_writel(cq_host, tdlbau, CQTDLBAU);
541
542 if (cq_host->ops->clear_set_irqs)
543 cq_host->ops->clear_set_irqs(mmc, true);
544
545 cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
546
547 /* cq_host would use this rca to address the card */
548 cmdq_writel(cq_host, rca, CQSSC2);
549
550 /* ensure the writes are done before enabling CQE */
551 mb();
552
553 cmdq_writel(cq_host, cqcfg, CQCFG);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300554 cmdq_runtime_pm_put(cq_host);
Asutosh Das02e30862015-05-20 16:52:04 +0530555 cq_host->enabled = true;
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530556 mmc_host_clr_cq_disable(mmc);
Asutosh Das02e30862015-05-20 16:52:04 +0530557}
558
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700559static void cmdq_prep_task_desc(struct mmc_request *mrq,
560 u64 *data, bool intr, bool qbr)
561{
562 struct mmc_cmdq_req *cmdq_req = mrq->cmdq_req;
563 u32 req_flags = cmdq_req->cmdq_req_flags;
564
565 pr_debug("%s: %s: data-tag: 0x%08x - dir: %d - prio: %d - cnt: 0x%08x - addr: 0x%llx\n",
566 mmc_hostname(mrq->host), __func__,
567 !!(req_flags & DAT_TAG), !!(req_flags & DIR),
568 !!(req_flags & PRIO), cmdq_req->data.blocks,
569 (u64)mrq->cmdq_req->blk_addr);
570
571 *data = VALID(1) |
572 END(1) |
573 INT(intr) |
574 ACT(0x5) |
575 FORCED_PROG(!!(req_flags & FORCED_PRG)) |
576 CONTEXT(mrq->cmdq_req->ctx_id) |
577 DATA_TAG(!!(req_flags & DAT_TAG)) |
578 DATA_DIR(!!(req_flags & DIR)) |
579 PRIORITY(!!(req_flags & PRIO)) |
580 QBAR(qbr) |
581 REL_WRITE(!!(req_flags & REL_WR)) |
582 BLK_COUNT(mrq->cmdq_req->data.blocks) |
583 BLK_ADDR((u64)mrq->cmdq_req->blk_addr);
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530584
585 MMC_TRACE(mrq->host,
586 "%s: Task: 0x%08x | Args: 0x%08x | cnt: 0x%08x\n", __func__,
587 lower_32_bits(*data),
588 upper_32_bits(*data),
589 mrq->cmdq_req->data.blocks);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700590}
591
592static int cmdq_dma_map(struct mmc_host *host, struct mmc_request *mrq)
593{
594 int sg_count;
595 struct mmc_data *data = mrq->data;
596
597 if (!data)
598 return -EINVAL;
599
600 sg_count = dma_map_sg(mmc_dev(host), data->sg,
601 data->sg_len,
602 (data->flags & MMC_DATA_WRITE) ?
603 DMA_TO_DEVICE : DMA_FROM_DEVICE);
604 if (!sg_count) {
605 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
606 return -ENOMEM;
607 }
608
609 return sg_count;
610}
611
Sahitya Tummala78a68e52015-09-30 15:55:41 +0530612static void cmdq_set_tran_desc(u8 *desc, dma_addr_t addr, int len,
613 bool end, bool is_dma64)
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700614{
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700615 __le32 *attr = (__le32 __force *)desc;
616
617 *attr = (VALID(1) |
618 END(end ? 1 : 0) |
619 INT(0) |
620 ACT(0x4) |
621 DAT_LENGTH(len));
622
Sahitya Tummala78a68e52015-09-30 15:55:41 +0530623 if (is_dma64) {
624 __le64 *dataddr = (__le64 __force *)(desc + 4);
625
626 dataddr[0] = cpu_to_le64(addr);
627 } else {
628 __le32 *dataddr = (__le32 __force *)(desc + 4);
629
630 dataddr[0] = cpu_to_le32(addr);
631 }
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700632}
633
634static int cmdq_prep_tran_desc(struct mmc_request *mrq,
635 struct cmdq_host *cq_host, int tag)
636{
637 struct mmc_data *data = mrq->data;
638 int i, sg_count, len;
639 bool end = false;
640 dma_addr_t addr;
641 u8 *desc;
642 struct scatterlist *sg;
643
644 sg_count = cmdq_dma_map(mrq->host, mrq);
645 if (sg_count < 0) {
646 pr_err("%s: %s: unable to map sg lists, %d\n",
647 mmc_hostname(mrq->host), __func__, sg_count);
648 return sg_count;
649 }
650
651 desc = get_trans_desc(cq_host, tag);
652 memset(desc, 0, cq_host->trans_desc_len * cq_host->mmc->max_segs);
653
654 for_each_sg(data->sg, sg, sg_count, i) {
655 addr = sg_dma_address(sg);
656 len = sg_dma_len(sg);
657
658 if ((i+1) == sg_count)
659 end = true;
Sahitya Tummala78a68e52015-09-30 15:55:41 +0530660 cmdq_set_tran_desc(desc, addr, len, end, cq_host->dma64);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700661 desc += cq_host->trans_desc_len;
662 }
663
664 pr_debug("%s: req: 0x%p tag: %d calc_trans_des: 0x%p sg-cnt: %d\n",
665 __func__, mrq->req, tag, desc, sg_count);
666
667 return 0;
668}
669
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700670static void cmdq_log_task_desc_history(struct cmdq_host *cq_host, u64 task,
671 bool is_dcmd)
672{
673 if (likely(!cq_host->mmc->cmdq_thist_enabled))
674 return;
675
676 if (!cq_host->thist) {
677 pr_err("%s: %s: CMDQ task history buffer not allocated\n",
678 mmc_hostname(cq_host->mmc), __func__);
679 return;
680 }
681
682 if (cq_host->thist_idx >= cq_host->num_slots)
683 cq_host->thist_idx = 0;
684
685 cq_host->thist[cq_host->thist_idx].is_dcmd = is_dcmd;
686 memcpy(&cq_host->thist[cq_host->thist_idx++].task,
687 &task, cq_host->task_desc_len);
688}
689
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700690static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
691 struct mmc_request *mrq)
692{
693 u64 *task_desc = NULL;
694 u64 data = 0;
695 u8 resp_type;
696 u8 *desc;
697 __le64 *dataddr;
698 struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
699 u8 timing;
700
701 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
702 resp_type = 0x0;
703 timing = 0x1;
704 } else {
Sahitya Tummala72bd8402015-05-29 13:27:38 +0530705 if (mrq->cmd->flags & MMC_RSP_BUSY) {
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700706 resp_type = 0x3;
707 timing = 0x0;
708 } else {
709 resp_type = 0x2;
710 timing = 0x1;
711 }
712 }
713
714 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
715 memset(task_desc, 0, cq_host->task_desc_len);
716 data |= (VALID(1) |
717 END(1) |
718 INT(1) |
719 QBAR(1) |
720 ACT(0x5) |
721 CMD_INDEX(mrq->cmd->opcode) |
722 CMD_TIMING(timing) | RESP_TYPE(resp_type));
723 *task_desc |= data;
724 desc = (u8 *)task_desc;
725 pr_debug("cmdq: dcmd: cmd: %d timing: %d resp: %d\n",
726 mrq->cmd->opcode, timing, resp_type);
727 dataddr = (__le64 __force *)(desc + 4);
728 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700729 cmdq_log_task_desc_history(cq_host, *task_desc, true);
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530730 MMC_TRACE(mrq->host,
731 "%s: DCMD: Task: 0x%08x | Args: 0x%08x\n",
732 __func__,
733 lower_32_bits(*task_desc),
734 upper_32_bits(*task_desc));
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700735}
736
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530737static inline
738void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc,
739 u64 ice_ctx)
740{
741 u64 *ice_desc = NULL;
742
743 if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) {
744 /*
745 * Get the address of ice context for the given task descriptor.
746 * ice context is present in the upper 64bits of task descriptor
747 * ice_conext_base_address = task_desc + 8-bytes
748 */
749 ice_desc = (__le64 *)((u8 *)task_desc +
750 CQ_TASK_DESC_TASK_PARAMS_SIZE);
751 memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE);
752
753 /*
754 * Assign upper 64bits data of task descritor with ice context
755 */
756 if (ice_ctx)
757 *ice_desc = cpu_to_le64(ice_ctx);
758 }
759}
760
Gilad Broner44445992015-09-29 16:05:39 +0300761static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
762{
763 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
764 struct sdhci_msm_host *msm_host = pltfm_host->priv;
765
766 sdhci_msm_pm_qos_cpu_vote(host,
767 msm_host->pdata->pm_qos_data.cmdq_latency, mrq->req->cpu);
768}
769
770static void cmdq_pm_qos_unvote(struct sdhci_host *host, struct mmc_request *mrq)
771{
772 /* use async as we're inside an atomic context (soft-irq) */
773 sdhci_msm_pm_qos_cpu_unvote(host, mrq->req->cpu, true);
774}
775
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700776static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
777{
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300778 int err = 0;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700779 u64 data = 0;
780 u64 *task_desc = NULL;
781 u32 tag = mrq->cmdq_req->tag;
782 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Gilad Broner44445992015-09-29 16:05:39 +0300783 struct sdhci_host *host = mmc_priv(mmc);
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530784 u64 ice_ctx = 0;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700785
786 if (!cq_host->enabled) {
787 pr_err("%s: CMDQ host not enabled yet !!!\n",
788 mmc_hostname(mmc));
789 err = -EINVAL;
790 goto out;
791 }
792
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300793 cmdq_runtime_pm_get(cq_host);
794
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700795 if (mrq->cmdq_req->cmdq_req_flags & DCMD) {
796 cmdq_prep_dcmd_desc(mmc, mrq);
797 cq_host->mrq_slot[DCMD_SLOT] = mrq;
Venkat Gopalakrishnanf1329ce2015-08-10 14:55:23 -0700798 /* DCMD's are always issued on a fixed slot */
799 tag = DCMD_SLOT;
800 goto ring_doorbell;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700801 }
802
Sahitya Tummala02b59422015-05-06 10:41:16 +0530803 if (cq_host->ops->crypto_cfg) {
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530804 err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx);
Sahitya Tummala02b59422015-05-06 10:41:16 +0530805 if (err) {
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +0530806 mmc->err_stats[MMC_ERR_ICE_CFG]++;
Sahitya Tummala02b59422015-05-06 10:41:16 +0530807 pr_err("%s: failed to configure crypto: err %d tag %d\n",
808 mmc_hostname(mmc), err, tag);
Veerabhadrarao Badiganti9aab11e2017-10-09 19:51:46 +0530809 goto ice_err;
Sahitya Tummala02b59422015-05-06 10:41:16 +0530810 }
811 }
812
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700813 task_desc = (__le64 __force *)get_desc(cq_host, tag);
814
815 cmdq_prep_task_desc(mrq, &data, 1,
816 (mrq->cmdq_req->cmdq_req_flags & QBR));
817 *task_desc = cpu_to_le64(data);
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530818
819 cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx);
820
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700821 cmdq_log_task_desc_history(cq_host, *task_desc, false);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700822
823 err = cmdq_prep_tran_desc(mrq, cq_host, tag);
824 if (err) {
825 pr_err("%s: %s: failed to setup tx desc: %d\n",
826 mmc_hostname(mmc), __func__, err);
Veerabhadrarao Badiganti9aab11e2017-10-09 19:51:46 +0530827 goto desc_err;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700828 }
829
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700830 cq_host->mrq_slot[tag] = mrq;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700831
Gilad Broner44445992015-09-29 16:05:39 +0300832 /* PM QoS */
833 sdhci_msm_pm_qos_irq_vote(host);
834 cmdq_pm_qos_vote(host, mrq);
Venkat Gopalakrishnanf1329ce2015-08-10 14:55:23 -0700835ring_doorbell:
836 /* Ensure the task descriptor list is flushed before ringing doorbell */
837 wmb();
Venkat Gopalakrishnan7d53e832015-10-01 14:34:10 -0700838 if (cmdq_readl(cq_host, CQTDBR) & (1 << tag)) {
839 cmdq_dumpregs(cq_host);
840 BUG_ON(1);
841 }
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530842 MMC_TRACE(mmc, "%s: tag: %d\n", __func__, tag);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700843 cmdq_writel(cq_host, 1 << tag, CQTDBR);
Venkat Gopalakrishnanf1329ce2015-08-10 14:55:23 -0700844 /* Commit the doorbell write immediately */
845 wmb();
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700846
Veerabhadrarao Badiganti9aab11e2017-10-09 19:51:46 +0530847 return err;
848
849desc_err:
850 if (cq_host->ops->crypto_cfg_end) {
851 err = cq_host->ops->crypto_cfg_end(mmc, mrq);
852 if (err) {
853 pr_err("%s: failed to end ice config: err %d tag %d\n",
854 mmc_hostname(mmc), err, tag);
855 }
856 }
857 if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
858 cq_host->ops->crypto_cfg_reset)
859 cq_host->ops->crypto_cfg_reset(mmc, tag);
860ice_err:
861 if (err)
862 cmdq_runtime_pm_put(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700863out:
864 return err;
865}
866
867static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
868{
869 struct mmc_request *mrq;
870 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +0530871 int offset = 0;
Veerabhadrarao Badiganti57056d52017-03-08 07:04:10 +0530872 int err = 0;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700873
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +0530874 if (cq_host->offset_changed)
875 offset = CQ_V5_VENDOR_CFG;
Asutosh Das02e30862015-05-20 16:52:04 +0530876 mrq = get_req_by_tag(cq_host, tag);
Sahitya Tummala9549d562015-05-29 15:41:18 +0530877 if (tag == cq_host->dcmd_slot)
878 mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
879
Asutosh Dasc0ed9c42015-05-29 15:39:37 +0530880 if (mrq->cmdq_req->cmdq_req_flags & DCMD)
Sayali Lokhande6e7e6d52017-01-04 12:00:35 +0530881 cmdq_writel(cq_host,
882 cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
883 CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
Konstantin Dorfman27af9a92015-08-02 17:06:18 +0300884
885 cmdq_runtime_pm_put(cq_host);
Veerabhadrarao Badiganti57056d52017-03-08 07:04:10 +0530886
887 if (cq_host->ops->crypto_cfg_end) {
888 err = cq_host->ops->crypto_cfg_end(mmc, mrq);
889 if (err) {
890 pr_err("%s: failed to end ice config: err %d tag %d\n",
891 mmc_hostname(mmc), err, tag);
892 }
893 }
Veerabhadrarao Badiganti87ddf1b2016-12-11 20:16:58 +0530894 if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
895 cq_host->ops->crypto_cfg_reset)
Sahitya Tummala82a19752015-09-01 16:44:08 +0530896 cq_host->ops->crypto_cfg_reset(mmc, tag);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700897 mrq->done(mrq);
898}
899
Asutosh Das02e30862015-05-20 16:52:04 +0530900irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700901{
902 u32 status;
903 unsigned long tag = 0, comp_status;
904 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Asutosh Das02e30862015-05-20 16:52:04 +0530905 unsigned long err_info = 0;
906 struct mmc_request *mrq;
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530907 int ret;
Ritesh Harjani6217c042015-10-01 20:34:42 +0530908 u32 dbr_set = 0;
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +0530909 u32 dev_pend_set = 0;
910 int stat_err = 0;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700911
912 status = cmdq_readl(cq_host, CQIS);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700913
Asutosh Das02e30862015-05-20 16:52:04 +0530914 if (!status && !err)
915 return IRQ_NONE;
Sayali Lokhande25cadc32016-11-30 10:01:59 +0530916 MMC_TRACE(mmc, "%s: CQIS: 0x%x err: %d\n",
917 __func__, status, err);
Asutosh Das02e30862015-05-20 16:52:04 +0530918
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +0530919 stat_err = status & (CQIS_RED | CQIS_GCE | CQIS_ICCE);
920
921 if (err || stat_err) {
Asutosh Das02e30862015-05-20 16:52:04 +0530922 err_info = cmdq_readl(cq_host, CQTERRI);
923 pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
924 mmc_hostname(mmc), err, status, err_info);
925
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530926 /*
927 * Need to halt CQE in case of error in interrupt context itself
928 * otherwise CQE may proceed with sending CMD to device even if
929 * CQE/card is in error state.
930 * CMDQ error handling will make sure that it is unhalted after
931 * handling all the errors.
932 */
Ritesh Harjani903f3452015-12-14 10:07:33 +0530933 ret = cmdq_halt_poll(mmc, true);
Ritesh Harjanib0280c22015-10-27 11:51:25 +0530934 if (ret)
935 pr_err("%s: %s: halt failed ret=%d\n",
936 mmc_hostname(mmc), __func__, ret);
Vijay Viswanath7d004862016-11-02 16:17:30 +0530937
938 /*
939 * Clear the CQIS after halting incase of error. This is done
940 * because if CQIS is cleared before halting, the CQ will
941 * continue with issueing commands for rest of requests with
942 * Doorbell rung. This will overwrite the Resp Arg register.
943 * So CQ must be halted first and then CQIS cleared incase
944 * of error
945 */
946 cmdq_writel(cq_host, status, CQIS);
947
Asutosh Das02e30862015-05-20 16:52:04 +0530948 cmdq_dumpregs(cq_host);
949
Ritesh Harjani6217c042015-10-01 20:34:42 +0530950 if (!err_info) {
951 /*
952 * It may so happen sometimes for few errors(like ADMA)
953 * that HW cannot give CQTERRI info.
954 * Thus below is a HW WA for recovering from such
955 * scenario.
956 * - To halt/disable CQE and do reset_all.
957 * Since there is no way to know which tag would
958 * have caused such error, so check for any first
959 * bit set in doorbell and proceed with an error.
960 */
961 dbr_set = cmdq_readl(cq_host, CQTDBR);
962 if (!dbr_set) {
963 pr_err("%s: spurious/force error interrupt\n",
964 mmc_hostname(mmc));
Ritesh Harjani903f3452015-12-14 10:07:33 +0530965 cmdq_halt_poll(mmc, false);
Ritesh Harjani6217c042015-10-01 20:34:42 +0530966 mmc_host_clr_halt(mmc);
967 return IRQ_HANDLED;
968 }
969
970 tag = ffs(dbr_set) - 1;
971 pr_err("%s: error tag selected: tag = %lu\n",
972 mmc_hostname(mmc), tag);
973 mrq = get_req_by_tag(cq_host, tag);
974 if (mrq->data)
975 mrq->data->error = err;
976 else
977 mrq->cmd->error = err;
978 /*
979 * Get ADMA descriptor memory in case of ADMA
980 * error for debug.
981 */
982 if (err == -EIO)
983 cmdq_dump_adma_mem(cq_host);
984 goto skip_cqterri;
985 }
986
Asutosh Das02e30862015-05-20 16:52:04 +0530987 if (err_info & CQ_RMEFV) {
988 tag = GET_CMD_ERR_TAG(err_info);
989 pr_err("%s: CMD err tag: %lu\n", __func__, tag);
990
991 mrq = get_req_by_tag(cq_host, tag);
992 /* CMD44/45/46/47 will not have a valid cmd */
993 if (mrq->cmd)
994 mrq->cmd->error = err;
995 else
996 mrq->data->error = err;
997 } else {
998 tag = GET_DAT_ERR_TAG(err_info);
999 pr_err("%s: Dat err tag: %lu\n", __func__, tag);
1000 mrq = get_req_by_tag(cq_host, tag);
1001 mrq->data->error = err;
1002 }
1003
Ritesh Harjani6217c042015-10-01 20:34:42 +05301004skip_cqterri:
1005 /*
1006 * If CQE halt fails then, disable CQE
1007 * from processing any further requests
1008 */
Sahitya Tummalad0333822016-04-26 15:01:04 +05301009 if (ret) {
Ritesh Harjani903f3452015-12-14 10:07:33 +05301010 cmdq_disable_nosync(mmc, true);
Sahitya Tummalad0333822016-04-26 15:01:04 +05301011 /*
1012 * Enable legacy interrupts as CQE halt has failed.
1013 * This is needed to send legacy commands like status
1014 * cmd as part of error handling work.
1015 */
1016 if (cq_host->ops->clear_set_irqs)
1017 cq_host->ops->clear_set_irqs(mmc, false);
1018 }
Ritesh Harjani6217c042015-10-01 20:34:42 +05301019
Asutosh Das02e30862015-05-20 16:52:04 +05301020 /*
1021 * CQE detected a response error from device
1022 * In most cases, this would require a reset.
1023 */
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +05301024 if (stat_err & CQIS_RED) {
Dov Levenglick2b678302015-07-01 14:24:20 +03001025 /*
1026 * will check if the RED error is due to a bkops
1027 * exception once the queue is empty
1028 */
1029 BUG_ON(!mmc->card);
Konstantin Dorfman56380b12015-12-31 14:45:48 +02001030 if (mmc_card_configured_manual_bkops(mmc->card) ||
1031 mmc_card_configured_auto_bkops(mmc->card))
Dov Levenglick2b678302015-07-01 14:24:20 +03001032 mmc->card->bkops.needs_check = true;
1033
Asutosh Das02e30862015-05-20 16:52:04 +05301034 mrq->cmdq_req->resp_err = true;
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301035 mmc->err_stats[MMC_ERR_CMDQ_RED]++;
Asutosh Das02e30862015-05-20 16:52:04 +05301036 pr_err("%s: Response error (0x%08x) from card !!!",
Vijay Viswanath7d004862016-11-02 16:17:30 +05301037 mmc_hostname(mmc), cmdq_readl(cq_host, CQCRA));
1038
Asutosh Das02e30862015-05-20 16:52:04 +05301039 } else {
1040 mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
1041 mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
1042 }
1043
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +05301044 /*
1045 * Generic Crypto error detected by CQE.
1046 * Its a fatal, would require cmdq reset.
1047 */
1048 if (stat_err & CQIS_GCE) {
1049 if (mrq->data)
1050 mrq->data->error = -EIO;
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301051 mmc->err_stats[MMC_ERR_CMDQ_GCE]++;
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +05301052 pr_err("%s: Crypto generic error while processing task %lu!",
1053 mmc_hostname(mmc), tag);
1054 MMC_TRACE(mmc, "%s: GCE error detected with tag %lu\n",
1055 __func__, tag);
1056 }
1057 /*
1058 * Invalid crypto config error detected by CQE, clear the task.
1059 * Task can be cleared only when CQE is halt state.
1060 */
1061 if (stat_err & CQIS_ICCE) {
1062 /*
1063 * Invalid Crypto Config Error is detected at the
1064 * beginning of the transfer before the actual execution
1065 * started. So just clear the task in CQE. No need to
1066 * clear in device. Only the task which caused ICCE has
1067 * to be cleared. Other tasks can be continue processing
1068 * The first task which is about to be prepared would
1069 * cause ICCE Error.
1070 */
1071 dbr_set = cmdq_readl(cq_host, CQTDBR);
1072 dev_pend_set = cmdq_readl(cq_host, CQDPT);
1073 if (dbr_set ^ dev_pend_set)
1074 tag = ffs(dbr_set ^ dev_pend_set) - 1;
1075 mrq = get_req_by_tag(cq_host, tag);
Veerabhadrarao Badiganticd78bbb2017-10-17 08:41:01 +05301076 mmc->err_stats[MMC_ERR_CMDQ_ICCE]++;
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +05301077 pr_err("%s: Crypto config error while processing task %lu!",
1078 mmc_hostname(mmc), tag);
1079 MMC_TRACE(mmc, "%s: ICCE error with tag %lu\n",
1080 __func__, tag);
1081 if (mrq->data)
1082 mrq->data->error = -EIO;
1083 else if (mrq->cmd)
1084 mrq->cmd->error = -EIO;
1085 /*
1086 * If CQE is halted and tag is valid then clear the task
1087 * then un-halt CQE and set flag to skip error recovery.
1088 * If any of the condtions is not met thene it will
1089 * enter into default error recovery path.
1090 */
1091 if (!ret && (dbr_set ^ dev_pend_set)) {
1092 ret = cmdq_clear_task_poll(cq_host, tag);
1093 if (ret) {
1094 pr_err("%s: %s: task[%lu] clear failed ret=%d\n",
1095 mmc_hostname(mmc),
1096 __func__, tag, ret);
1097 } else if (!cmdq_halt_poll(mmc, false)) {
1098 mrq->cmdq_req->skip_err_handling = true;
1099 }
1100 }
1101 }
Asutosh Das02e30862015-05-20 16:52:04 +05301102 cmdq_finish_data(mmc, tag);
Vijay Viswanath7d004862016-11-02 16:17:30 +05301103 } else {
1104 cmdq_writel(cq_host, status, CQIS);
Asutosh Das02e30862015-05-20 16:52:04 +05301105 }
1106
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001107 if (status & CQIS_TCC) {
Konstantin Dorfmanaf1713c2015-10-06 13:25:45 +03001108 /* read CQTCN and complete the request */
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001109 comp_status = cmdq_readl(cq_host, CQTCN);
1110 if (!comp_status)
Veerabhadrarao Badiganti31f73a12018-10-23 13:11:40 +05301111 goto hac;
Konstantin Dorfmanaf1713c2015-10-06 13:25:45 +03001112 /*
1113 * The CQTCN must be cleared before notifying req completion
1114 * to upper layers to avoid missing completion notification
1115 * of new requests with the same tag.
1116 */
1117 cmdq_writel(cq_host, comp_status, CQTCN);
1118 /*
1119 * A write memory barrier is necessary to guarantee that CQTCN
1120 * gets cleared first before next doorbell for the same tag is
1121 * set but that is already achieved by the barrier present
1122 * before setting doorbell, hence one is not needed here.
1123 */
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001124 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
Vijay Viswanath5b1c80b2017-04-27 15:09:45 +05301125 mrq = get_req_by_tag(cq_host, tag);
1126 if (!((mrq->cmd && mrq->cmd->error) ||
1127 mrq->cmdq_req->resp_err ||
1128 (mrq->data && mrq->data->error))) {
1129 /* complete the corresponding mrq */
1130 pr_debug("%s: completing tag -> %lu\n",
1131 mmc_hostname(mmc), tag);
1132 MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
1133 __func__, tag);
Sayali Lokhande25cadc32016-11-30 10:01:59 +05301134 cmdq_finish_data(mmc, tag);
Vijay Viswanath5b1c80b2017-04-27 15:09:45 +05301135 }
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001136 }
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001137 }
Veerabhadrarao Badiganti31f73a12018-10-23 13:11:40 +05301138hac:
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301139 if (status & CQIS_HAC) {
Konstantin Dorfmanfa321072015-05-31 10:10:13 +03001140 if (cq_host->ops->post_cqe_halt)
1141 cq_host->ops->post_cqe_halt(mmc);
Veerabhadrarao Badiganti01b6db52017-02-20 14:08:29 +05301142 /* halt done: re-enable legacy interrupts */
1143 if (cq_host->ops->clear_set_irqs)
1144 cq_host->ops->clear_set_irqs(mmc, false);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301145 /* halt is completed, wakeup waiting thread */
1146 complete(&cq_host->halt_comp);
1147 }
1148
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001149 return IRQ_HANDLED;
1150}
1151EXPORT_SYMBOL(cmdq_irq);
1152
Ritesh Harjanib0280c22015-10-27 11:51:25 +05301153/* cmdq_halt_poll - Halting CQE using polling method.
1154 * @mmc: struct mmc_host
Ritesh Harjani903f3452015-12-14 10:07:33 +05301155 * @halt: bool halt
1156 * This is used mainly from interrupt context to halt/unhalt
Ritesh Harjanib0280c22015-10-27 11:51:25 +05301157 * CQE engine.
1158 */
Ritesh Harjani903f3452015-12-14 10:07:33 +05301159static int cmdq_halt_poll(struct mmc_host *mmc, bool halt)
Ritesh Harjanib0280c22015-10-27 11:51:25 +05301160{
1161 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
1162 int retries = 100;
1163
Ritesh Harjani903f3452015-12-14 10:07:33 +05301164 if (!halt) {
1165 if (cq_host->ops->set_data_timeout)
1166 cq_host->ops->set_data_timeout(mmc, 0xf);
1167 if (cq_host->ops->clear_set_irqs)
1168 cq_host->ops->clear_set_irqs(mmc, true);
1169 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
1170 CQCTL);
Veerabhadrarao Badigantib6782992016-12-11 20:38:20 +05301171 mmc_host_clr_halt(mmc);
Ritesh Harjani903f3452015-12-14 10:07:33 +05301172 return 0;
1173 }
1174
Ritesh Harjanib0280c22015-10-27 11:51:25 +05301175 cmdq_set_halt_irq(cq_host, false);
1176 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT, CQCTL);
1177 while (retries) {
1178 if (!(cmdq_readl(cq_host, CQCTL) & HALT)) {
1179 udelay(5);
1180 retries--;
1181 continue;
1182 } else {
1183 if (cq_host->ops->post_cqe_halt)
1184 cq_host->ops->post_cqe_halt(mmc);
1185 /* halt done: re-enable legacy interrupts */
1186 if (cq_host->ops->clear_set_irqs)
1187 cq_host->ops->clear_set_irqs(mmc,
1188 false);
1189 mmc_host_set_halt(mmc);
1190 break;
1191 }
1192 }
1193 cmdq_set_halt_irq(cq_host, true);
1194 return retries ? 0 : -ETIMEDOUT;
1195}
1196
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301197/* May sleep */
1198static int cmdq_halt(struct mmc_host *mmc, bool halt)
1199{
1200 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +03001201 u32 ret = 0;
Veerabhadrarao Badigantiaa2b2d82017-02-20 13:51:13 +05301202 u32 config = 0;
Ritesh Harjani442c60a2015-09-15 19:21:32 +05301203 int retries = 3;
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301204
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +03001205 cmdq_runtime_pm_get(cq_host);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301206 if (halt) {
Ritesh Harjani442c60a2015-09-15 19:21:32 +05301207 while (retries) {
1208 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT,
1209 CQCTL);
1210 ret = wait_for_completion_timeout(&cq_host->halt_comp,
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301211 msecs_to_jiffies(HALT_TIMEOUT_MS));
Veerabhadrarao Badigantiaa2b2d82017-02-20 13:51:13 +05301212 if (!ret) {
1213 pr_warn("%s: %s: HAC int timeout\n",
1214 mmc_hostname(mmc), __func__);
1215 if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
1216 /*
1217 * Don't retry if CQE is halted but irq
1218 * is not triggered in timeout period.
1219 * And since we are returning error,
1220 * un-halt CQE. Since irq was not fired
1221 * yet, no need to set other params
1222 */
1223 retries = 0;
1224 config = cmdq_readl(cq_host, CQCTL);
1225 config &= ~HALT;
1226 cmdq_writel(cq_host, config, CQCTL);
1227 } else {
1228 pr_warn("%s: %s: retryng halt (%d)\n",
1229 mmc_hostname(mmc), __func__,
1230 retries);
1231 retries--;
1232 continue;
1233 }
Ritesh Harjani442c60a2015-09-15 19:21:32 +05301234 } else {
Sayali Lokhande25cadc32016-11-30 10:01:59 +05301235 MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
1236 __func__, retries);
Ritesh Harjani442c60a2015-09-15 19:21:32 +05301237 break;
1238 }
1239 }
Subhash Jadavani6a718e12015-10-19 17:25:22 -07001240 ret = retries ? 0 : -ETIMEDOUT;
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301241 } else {
Sahitya Tummala87231ce2016-04-12 13:24:51 +05301242 if (cq_host->ops->set_transfer_params)
1243 cq_host->ops->set_transfer_params(mmc);
Sahitya Tummala2af92822016-04-05 13:38:12 +05301244 if (cq_host->ops->set_block_size)
1245 cq_host->ops->set_block_size(mmc);
Asutosh Das3f730d12015-07-08 11:41:35 +05301246 if (cq_host->ops->set_data_timeout)
1247 cq_host->ops->set_data_timeout(mmc, 0xf);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301248 if (cq_host->ops->clear_set_irqs)
1249 cq_host->ops->clear_set_irqs(mmc, true);
Sayali Lokhande25cadc32016-11-30 10:01:59 +05301250 MMC_TRACE(mmc, "%s: unhalt done\n", __func__);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301251 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
1252 CQCTL);
1253 }
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +03001254 cmdq_runtime_pm_put(cq_host);
1255 return ret;
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301256}
1257
Ritesh Harjani121cdec2015-09-27 21:51:01 +05301258static void cmdq_post_req(struct mmc_host *mmc, int tag, int err)
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001259{
Ritesh Harjani121cdec2015-09-27 21:51:01 +05301260 struct cmdq_host *cq_host;
1261 struct mmc_request *mrq;
1262 struct mmc_data *data;
1263 struct sdhci_host *sdhci_host = mmc_priv(mmc);
1264
1265 if (WARN_ON(!mmc))
1266 return;
1267
1268 cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
1269 mrq = get_req_by_tag(cq_host, tag);
1270 data = mrq->data;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001271
1272 if (data) {
1273 data->error = err;
Ritesh Harjani121cdec2015-09-27 21:51:01 +05301274 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001275 (data->flags & MMC_DATA_READ) ?
1276 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1277 if (err)
1278 data->bytes_xfered = 0;
1279 else
1280 data->bytes_xfered = blk_rq_bytes(mrq->req);
Gilad Broner44445992015-09-29 16:05:39 +03001281
1282 /* we're in atomic context (soft-irq) so unvote async. */
1283 sdhci_msm_pm_qos_irq_unvote(sdhci_host, true);
1284 cmdq_pm_qos_unvote(sdhci_host, mrq);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001285 }
1286}
1287
Asutosh Dasfa8836b2015-03-02 23:14:05 +05301288static void cmdq_dumpstate(struct mmc_host *mmc)
1289{
1290 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +03001291 cmdq_runtime_pm_get(cq_host);
Asutosh Dasfa8836b2015-03-02 23:14:05 +05301292 cmdq_dumpregs(cq_host);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +03001293 cmdq_runtime_pm_put(cq_host);
Asutosh Dasfa8836b2015-03-02 23:14:05 +05301294}
1295
Gilad Broner44445992015-09-29 16:05:39 +03001296static int cmdq_late_init(struct mmc_host *mmc)
1297{
1298 struct sdhci_host *host = mmc_priv(mmc);
1299 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1300 struct sdhci_msm_host *msm_host = pltfm_host->priv;
1301
1302 /*
1303 * TODO: This should basically move to something like "sdhci-cmdq-msm"
1304 * for msm specific implementation.
1305 */
1306 sdhci_msm_pm_qos_irq_init(host);
1307
1308 if (msm_host->pdata->pm_qos_data.cmdq_valid)
1309 sdhci_msm_pm_qos_cpu_init(host,
1310 msm_host->pdata->pm_qos_data.cmdq_latency);
1311 return 0;
1312}
1313
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001314static const struct mmc_cmdq_host_ops cmdq_host_ops = {
Gilad Broner44445992015-09-29 16:05:39 +03001315 .init = cmdq_late_init,
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001316 .enable = cmdq_enable,
1317 .disable = cmdq_disable,
1318 .request = cmdq_request,
1319 .post_req = cmdq_post_req,
Asutosh Dasaa1e1c72015-05-21 17:22:10 +05301320 .halt = cmdq_halt,
Asutosh Das02e30862015-05-20 16:52:04 +05301321 .reset = cmdq_reset,
Asutosh Dasfa8836b2015-03-02 23:14:05 +05301322 .dumpstate = cmdq_dumpstate,
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001323};
1324
1325struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev)
1326{
1327 struct cmdq_host *cq_host;
1328 struct resource *cmdq_memres = NULL;
1329
1330 /* check and setup CMDQ interface */
1331 cmdq_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1332 "cmdq_mem");
1333 if (!cmdq_memres) {
1334 dev_dbg(&pdev->dev, "CMDQ not supported\n");
1335 return ERR_PTR(-EINVAL);
1336 }
1337
1338 cq_host = kzalloc(sizeof(*cq_host), GFP_KERNEL);
1339 if (!cq_host) {
1340 dev_err(&pdev->dev, "failed to allocate memory for CMDQ\n");
1341 return ERR_PTR(-ENOMEM);
1342 }
1343 cq_host->mmio = devm_ioremap(&pdev->dev,
1344 cmdq_memres->start,
1345 resource_size(cmdq_memres));
1346 if (!cq_host->mmio) {
1347 dev_err(&pdev->dev, "failed to remap cmdq regs\n");
1348 kfree(cq_host);
1349 return ERR_PTR(-EBUSY);
1350 }
1351 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1352
1353 return cq_host;
1354}
1355EXPORT_SYMBOL(cmdq_pltfm_init);
1356
1357int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
1358 bool dma64)
1359{
1360 int err = 0;
1361
1362 cq_host->dma64 = dma64;
1363 cq_host->mmc = mmc;
1364 cq_host->mmc->cmdq_private = cq_host;
1365
1366 cq_host->num_slots = NUM_SLOTS;
1367 cq_host->dcmd_slot = DCMD_SLOT;
1368
1369 mmc->cmdq_ops = &cmdq_host_ops;
Ritesh Harjanib0280c22015-10-27 11:51:25 +05301370 mmc->num_cq_slots = NUM_SLOTS;
1371 mmc->dcmd_cq_slot = DCMD_SLOT;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001372
1373 cq_host->mrq_slot = kzalloc(sizeof(cq_host->mrq_slot) *
1374 cq_host->num_slots, GFP_KERNEL);
1375 if (!cq_host->mrq_slot)
1376 return -ENOMEM;
1377
1378 init_completion(&cq_host->halt_comp);
1379 return err;
1380}
1381EXPORT_SYMBOL(cmdq_init);