blob: 3082d67a3fead8dbbf29f37b47defce84f0382b2 [file] [log] [blame]
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -07001/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/delay.h>
14#include <linux/highmem.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/dma-mapping.h>
18#include <linux/slab.h>
19#include <linux/scatterlist.h>
20#include <linux/platform_device.h>
21#include <linux/blkdev.h>
22
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +030026#include <linux/pm_runtime.h>
Gilad Broner44445992015-09-29 16:05:39 +030027#include <linux/workqueue.h>
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -070028
29#include "cmdq_hci.h"
Gilad Broner44445992015-09-29 16:05:39 +030030#include "sdhci.h"
31#include "sdhci-msm.h"
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -070032
33#define DCMD_SLOT 31
34#define NUM_SLOTS 32
35
Asutosh Dasaa1e1c72015-05-21 17:22:10 +053036/* 1 sec */
37#define HALT_TIMEOUT_MS 1000
38
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +030039#ifdef CONFIG_PM_RUNTIME
40static int cmdq_runtime_pm_get(struct cmdq_host *host)
41{
42 return pm_runtime_get_sync(host->mmc->parent);
43}
44static int cmdq_runtime_pm_put(struct cmdq_host *host)
45{
46 pm_runtime_mark_last_busy(host->mmc->parent);
47 return pm_runtime_put_autosuspend(host->mmc->parent);
48}
49#else
50static inline int cmdq_runtime_pm_get(struct cmdq_host *host)
51{
52 return 0;
53}
54static inline int cmdq_runtime_pm_put(struct cmdq_host *host)
55{
56 return 0;
57}
58#endif
Asutosh Das02e30862015-05-20 16:52:04 +053059static inline struct mmc_request *get_req_by_tag(struct cmdq_host *cq_host,
60 unsigned int tag)
61{
62 return cq_host->mrq_slot[tag];
63}
64
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -070065static inline u8 *get_desc(struct cmdq_host *cq_host, u8 tag)
66{
67 return cq_host->desc_base + (tag * cq_host->slot_sz);
68}
69
70static inline u8 *get_link_desc(struct cmdq_host *cq_host, u8 tag)
71{
72 u8 *desc = get_desc(cq_host, tag);
73
74 return desc + cq_host->task_desc_len;
75}
76
77static inline dma_addr_t get_trans_desc_dma(struct cmdq_host *cq_host, u8 tag)
78{
79 return cq_host->trans_desc_dma_base +
80 (cq_host->mmc->max_segs * tag *
81 cq_host->trans_desc_len);
82}
83
84static inline u8 *get_trans_desc(struct cmdq_host *cq_host, u8 tag)
85{
86 return cq_host->trans_desc_base +
87 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
88}
89
90static void setup_trans_desc(struct cmdq_host *cq_host, u8 tag)
91{
92 u8 *link_temp;
93 dma_addr_t trans_temp;
94
95 link_temp = get_link_desc(cq_host, tag);
96 trans_temp = get_trans_desc_dma(cq_host, tag);
97
98 memset(link_temp, 0, cq_host->link_desc_len);
99 if (cq_host->link_desc_len > 8)
100 *(link_temp + 8) = 0;
101
102 if (tag == DCMD_SLOT) {
103 *link_temp = VALID(0) | ACT(0) | END(1);
104 return;
105 }
106
107 *link_temp = VALID(1) | ACT(0x6) | END(0);
108
109 if (cq_host->dma64) {
110 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
111 data_addr[0] = cpu_to_le64(trans_temp);
112 } else {
113 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
114 data_addr[0] = cpu_to_le32(trans_temp);
115 }
116}
117
118static void cmdq_clear_set_irqs(struct cmdq_host *cq_host, u32 clear, u32 set)
119{
120 u32 ier;
121
122 ier = cmdq_readl(cq_host, CQISTE);
123 ier &= ~clear;
124 ier |= set;
125 cmdq_writel(cq_host, ier, CQISTE);
126 cmdq_writel(cq_host, ier, CQISGE);
127 /* ensure the writes are done */
128 mb();
129}
130
131
132#define DRV_NAME "cmdq-host"
133
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700134static void cmdq_dump_task_history(struct cmdq_host *cq_host)
135{
136 int i;
137
138 if (likely(!cq_host->mmc->cmdq_thist_enabled))
139 return;
140
141 if (!cq_host->thist) {
142 pr_err("%s: %s: CMDQ task history buffer not allocated\n",
143 mmc_hostname(cq_host->mmc), __func__);
144 return;
145 }
146
147 pr_err("---- Circular Task History ----\n");
148 pr_err(DRV_NAME ": Last entry index: %d", cq_host->thist_idx - 1);
149
150 for (i = 0; i < cq_host->num_slots; i++) {
151 pr_err(DRV_NAME ": [%02d]%s Task: 0x%08x | Args: 0x%08x\n", i,
152 (cq_host->thist[i].is_dcmd) ? "DCMD" : "DATA",
153 lower_32_bits(cq_host->thist[i].task),
154 upper_32_bits(cq_host->thist[i].task));
155 }
156 pr_err("-------------------------\n");
157}
158
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700159static void cmdq_dumpregs(struct cmdq_host *cq_host)
160{
161 struct mmc_host *mmc = cq_host->mmc;
162
Asutosh Das02e30862015-05-20 16:52:04 +0530163 pr_err(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700164 mmc_hostname(mmc));
165
Asutosh Das02e30862015-05-20 16:52:04 +0530166 pr_err(DRV_NAME ": Caps: 0x%08x | Version: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700167 cmdq_readl(cq_host, CQCAP),
168 cmdq_readl(cq_host, CQVER));
Asutosh Das02e30862015-05-20 16:52:04 +0530169 pr_err(DRV_NAME ": Queing config: 0x%08x | Queue Ctrl: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700170 cmdq_readl(cq_host, CQCFG),
171 cmdq_readl(cq_host, CQCTL));
Asutosh Das02e30862015-05-20 16:52:04 +0530172 pr_err(DRV_NAME ": Int stat: 0x%08x | Int enab: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700173 cmdq_readl(cq_host, CQIS),
174 cmdq_readl(cq_host, CQISTE));
Asutosh Das02e30862015-05-20 16:52:04 +0530175 pr_err(DRV_NAME ": Int sig: 0x%08x | Int Coal: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700176 cmdq_readl(cq_host, CQISGE),
177 cmdq_readl(cq_host, CQIC));
Asutosh Das02e30862015-05-20 16:52:04 +0530178 pr_err(DRV_NAME ": TDL base: 0x%08x | TDL up32: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700179 cmdq_readl(cq_host, CQTDLBA),
180 cmdq_readl(cq_host, CQTDLBAU));
Asutosh Das02e30862015-05-20 16:52:04 +0530181 pr_err(DRV_NAME ": Doorbell: 0x%08x | Comp Notif: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700182 cmdq_readl(cq_host, CQTDBR),
183 cmdq_readl(cq_host, CQTCN));
Asutosh Das02e30862015-05-20 16:52:04 +0530184 pr_err(DRV_NAME ": Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700185 cmdq_readl(cq_host, CQDQS),
186 cmdq_readl(cq_host, CQDPT));
Asutosh Das02e30862015-05-20 16:52:04 +0530187 pr_err(DRV_NAME ": Task clr: 0x%08x | Send stat 1: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700188 cmdq_readl(cq_host, CQTCLR),
189 cmdq_readl(cq_host, CQSSC1));
Asutosh Das02e30862015-05-20 16:52:04 +0530190 pr_err(DRV_NAME ": Send stat 2: 0x%08x | DCMD resp: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700191 cmdq_readl(cq_host, CQSSC2),
192 cmdq_readl(cq_host, CQCRDCT));
Asutosh Das02e30862015-05-20 16:52:04 +0530193 pr_err(DRV_NAME ": Resp err mask: 0x%08x | Task err: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700194 cmdq_readl(cq_host, CQRMEM),
195 cmdq_readl(cq_host, CQTERRI));
Asutosh Das02e30862015-05-20 16:52:04 +0530196 pr_err(DRV_NAME ": Resp idx 0x%08x | Resp arg: 0x%08x\n",
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700197 cmdq_readl(cq_host, CQCRI),
198 cmdq_readl(cq_host, CQCRA));
Asutosh Dasc0ed9c42015-05-29 15:39:37 +0530199 pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
200 cmdq_readl(cq_host, CQ_VENDOR_CFG));
Asutosh Das02e30862015-05-20 16:52:04 +0530201 pr_err(DRV_NAME ": ===========================================\n");
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700202
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700203 cmdq_dump_task_history(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700204 if (cq_host->ops->dump_vendor_regs)
205 cq_host->ops->dump_vendor_regs(mmc);
206}
207
208/**
209 * The allocated descriptor table for task, link & transfer descritors
210 * looks like:
211 * |----------|
212 * |task desc | |->|----------|
213 * |----------| | |trans desc|
214 * |link desc-|->| |----------|
215 * |----------| .
216 * . .
217 * no. of slots max-segs
218 * . |----------|
219 * |----------|
220 * The idea here is to create the [task+trans] table and mark & point the
221 * link desc to the transfer desc table on a per slot basis.
222 */
223static int cmdq_host_alloc_tdl(struct cmdq_host *cq_host)
224{
225
226 size_t desc_size;
227 size_t data_size;
228 int i = 0;
229
230 /* task descriptor can be 64/128 bit irrespective of arch */
231 if (cq_host->caps & CMDQ_TASK_DESC_SZ_128) {
232 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) |
233 CQ_TASK_DESC_SZ, CQCFG);
234 cq_host->task_desc_len = 16;
235 } else {
236 cq_host->task_desc_len = 8;
237 }
238
239 /*
240 * 96 bits length of transfer desc instead of 128 bits which means
241 * ADMA would expect next valid descriptor at the 96th bit
242 * or 128th bit
243 */
244 if (cq_host->dma64) {
245 if (cq_host->quirks & CMDQ_QUIRK_SHORT_TXFR_DESC_SZ)
246 cq_host->trans_desc_len = 12;
247 else
248 cq_host->trans_desc_len = 16;
249 cq_host->link_desc_len = 16;
250 } else {
251 cq_host->trans_desc_len = 8;
252 cq_host->link_desc_len = 8;
253 }
254
255 /* total size of a slot: 1 task & 1 transfer (link) */
256 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
257
258 desc_size = cq_host->slot_sz * cq_host->num_slots;
259
260 data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
261 (cq_host->num_slots - 1);
262
263 pr_info("%s: desc_size: %d data_sz: %d slot-sz: %d\n", __func__,
264 (int)desc_size, (int)data_size, cq_host->slot_sz);
265
266 /*
267 * allocate a dma-mapped chunk of memory for the descriptors
268 * allocate a dma-mapped chunk of memory for link descriptors
269 * setup each link-desc memory offset per slot-number to
270 * the descriptor table.
271 */
272 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
273 desc_size,
274 &cq_host->desc_dma_base,
275 GFP_KERNEL);
276 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
277 data_size,
278 &cq_host->trans_desc_dma_base,
279 GFP_KERNEL);
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700280 cq_host->thist = devm_kzalloc(mmc_dev(cq_host->mmc),
281 (sizeof(*cq_host->thist) *
282 cq_host->num_slots),
283 GFP_KERNEL);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700284 if (!cq_host->desc_base || !cq_host->trans_desc_base)
285 return -ENOMEM;
286
287 pr_info("desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
288 cq_host->desc_base, cq_host->trans_desc_base,
289 (unsigned long long)cq_host->desc_dma_base,
290 (unsigned long long) cq_host->trans_desc_dma_base);
291
292 for (; i < (cq_host->num_slots); i++)
293 setup_trans_desc(cq_host, i);
294
295 return 0;
296}
297
298static int cmdq_enable(struct mmc_host *mmc)
299{
300 int err = 0;
301 u32 cqcfg;
302 bool dcmd_enable;
303 struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
304
305 if (!cq_host || !mmc->card || !mmc_card_cmdq(mmc->card)) {
306 err = -EINVAL;
307 goto out;
308 }
309
310 if (cq_host->enabled)
311 goto out;
312
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300313 cmdq_runtime_pm_get(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700314 cqcfg = cmdq_readl(cq_host, CQCFG);
315 if (cqcfg & 0x1) {
316 pr_info("%s: %s: cq_host is already enabled\n",
317 mmc_hostname(mmc), __func__);
318 WARN_ON(1);
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700319 goto pm_ref_count;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700320 }
321
322 if (cq_host->quirks & CMDQ_QUIRK_NO_DCMD)
323 dcmd_enable = false;
324 else
325 dcmd_enable = true;
326
327 cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
328 (dcmd_enable ? CQ_DCMD : 0));
329
330 cmdq_writel(cq_host, cqcfg, CQCFG);
331 /* enable CQ_HOST */
332 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
333 CQCFG);
334
335 if (!cq_host->desc_base ||
336 !cq_host->trans_desc_base) {
337 err = cmdq_host_alloc_tdl(cq_host);
338 if (err)
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700339 goto pm_ref_count;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700340 }
341
Konstantin Dorfman14c902d2015-06-11 11:33:23 +0300342 cmdq_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), CQTDLBA);
343 cmdq_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), CQTDLBAU);
344
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700345 /*
346 * disable all vendor interrupts
347 * enable CMDQ interrupts
348 * enable the vendor error interrupts
349 */
350 if (cq_host->ops->clear_set_irqs)
351 cq_host->ops->clear_set_irqs(mmc, true);
352
353 cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
354
355 /* cq_host would use this rca to address the card */
356 cmdq_writel(cq_host, mmc->card->rca, CQSSC2);
357
358 /* send QSR at lesser intervals than the default */
Asutosh Das5b81f132015-10-06 09:53:33 +0530359 cmdq_writel(cq_host, SEND_QSR_INTERVAL, CQSSC1);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700360
Dov Levenglick2b678302015-07-01 14:24:20 +0300361 /* enable bkops exception indication */
Dov Levenglickaea348b2015-07-20 11:59:52 +0300362 if (mmc_card_configured_manual_bkops(mmc->card) &&
363 !mmc_card_configured_auto_bkops(mmc->card))
Dov Levenglick2b678302015-07-01 14:24:20 +0300364 cmdq_writel(cq_host, cmdq_readl(cq_host, CQRMEM) | CQ_EXCEPTION,
365 CQRMEM);
366
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700367 /* ensure the writes are done before enabling CQE */
368 mb();
369
370 cq_host->enabled = true;
371
372 if (cq_host->ops->set_block_size)
373 cq_host->ops->set_block_size(cq_host->mmc);
374
375 if (cq_host->ops->set_data_timeout)
376 cq_host->ops->set_data_timeout(mmc, 0xf);
377
378 if (cq_host->ops->clear_set_dumpregs)
379 cq_host->ops->clear_set_dumpregs(mmc, 1);
380
Ritesh Harjani6b2ea572015-07-15 13:23:05 +0530381 if (cq_host->ops->enhanced_strobe_mask)
382 cq_host->ops->enhanced_strobe_mask(mmc, true);
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700383
384pm_ref_count:
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300385 cmdq_runtime_pm_put(cq_host);
Venkat Gopalakrishnan632b13b2015-08-24 14:36:59 -0700386out:
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700387 return err;
388}
389
390static void cmdq_disable(struct mmc_host *mmc, bool soft)
391{
392 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
393
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300394 cmdq_runtime_pm_get(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700395 if (soft) {
396 cmdq_writel(cq_host, cmdq_readl(
397 cq_host, CQCFG) & ~(CQ_ENABLE),
398 CQCFG);
399 }
Ritesh Harjani6b2ea572015-07-15 13:23:05 +0530400 if (cq_host->ops->enhanced_strobe_mask)
401 cq_host->ops->enhanced_strobe_mask(mmc, false);
402
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300403 cmdq_runtime_pm_put(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700404 cq_host->enabled = false;
405}
406
Asutosh Das02e30862015-05-20 16:52:04 +0530407static void cmdq_reset(struct mmc_host *mmc, bool soft)
408{
409 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
410 unsigned int cqcfg;
411 unsigned int tdlba;
412 unsigned int tdlbau;
413 unsigned int rca;
414 int ret;
415
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300416 cmdq_runtime_pm_get(cq_host);
Asutosh Das02e30862015-05-20 16:52:04 +0530417 cqcfg = cmdq_readl(cq_host, CQCFG);
418 tdlba = cmdq_readl(cq_host, CQTDLBA);
419 tdlbau = cmdq_readl(cq_host, CQTDLBAU);
420 rca = cmdq_readl(cq_host, CQSSC2);
421
422 cmdq_disable(mmc, true);
423
424 if (cq_host->ops->reset) {
425 ret = cq_host->ops->reset(mmc);
426 if (ret) {
427 pr_crit("%s: reset CMDQ controller: failed\n",
428 mmc_hostname(mmc));
429 BUG();
430 }
431 }
432
433 cmdq_writel(cq_host, tdlba, CQTDLBA);
434 cmdq_writel(cq_host, tdlbau, CQTDLBAU);
435
436 if (cq_host->ops->clear_set_irqs)
437 cq_host->ops->clear_set_irqs(mmc, true);
438
439 cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
440
441 /* cq_host would use this rca to address the card */
442 cmdq_writel(cq_host, rca, CQSSC2);
443
444 /* ensure the writes are done before enabling CQE */
445 mb();
446
447 cmdq_writel(cq_host, cqcfg, CQCFG);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300448 cmdq_runtime_pm_put(cq_host);
Asutosh Das02e30862015-05-20 16:52:04 +0530449 cq_host->enabled = true;
450}
451
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700452static void cmdq_prep_task_desc(struct mmc_request *mrq,
453 u64 *data, bool intr, bool qbr)
454{
455 struct mmc_cmdq_req *cmdq_req = mrq->cmdq_req;
456 u32 req_flags = cmdq_req->cmdq_req_flags;
457
458 pr_debug("%s: %s: data-tag: 0x%08x - dir: %d - prio: %d - cnt: 0x%08x - addr: 0x%llx\n",
459 mmc_hostname(mrq->host), __func__,
460 !!(req_flags & DAT_TAG), !!(req_flags & DIR),
461 !!(req_flags & PRIO), cmdq_req->data.blocks,
462 (u64)mrq->cmdq_req->blk_addr);
463
464 *data = VALID(1) |
465 END(1) |
466 INT(intr) |
467 ACT(0x5) |
468 FORCED_PROG(!!(req_flags & FORCED_PRG)) |
469 CONTEXT(mrq->cmdq_req->ctx_id) |
470 DATA_TAG(!!(req_flags & DAT_TAG)) |
471 DATA_DIR(!!(req_flags & DIR)) |
472 PRIORITY(!!(req_flags & PRIO)) |
473 QBAR(qbr) |
474 REL_WRITE(!!(req_flags & REL_WR)) |
475 BLK_COUNT(mrq->cmdq_req->data.blocks) |
476 BLK_ADDR((u64)mrq->cmdq_req->blk_addr);
477}
478
479static int cmdq_dma_map(struct mmc_host *host, struct mmc_request *mrq)
480{
481 int sg_count;
482 struct mmc_data *data = mrq->data;
483
484 if (!data)
485 return -EINVAL;
486
487 sg_count = dma_map_sg(mmc_dev(host), data->sg,
488 data->sg_len,
489 (data->flags & MMC_DATA_WRITE) ?
490 DMA_TO_DEVICE : DMA_FROM_DEVICE);
491 if (!sg_count) {
492 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
493 return -ENOMEM;
494 }
495
496 return sg_count;
497}
498
Sahitya Tummala78a68e52015-09-30 15:55:41 +0530499static void cmdq_set_tran_desc(u8 *desc, dma_addr_t addr, int len,
500 bool end, bool is_dma64)
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700501{
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700502 __le32 *attr = (__le32 __force *)desc;
503
504 *attr = (VALID(1) |
505 END(end ? 1 : 0) |
506 INT(0) |
507 ACT(0x4) |
508 DAT_LENGTH(len));
509
Sahitya Tummala78a68e52015-09-30 15:55:41 +0530510 if (is_dma64) {
511 __le64 *dataddr = (__le64 __force *)(desc + 4);
512
513 dataddr[0] = cpu_to_le64(addr);
514 } else {
515 __le32 *dataddr = (__le32 __force *)(desc + 4);
516
517 dataddr[0] = cpu_to_le32(addr);
518 }
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700519}
520
521static int cmdq_prep_tran_desc(struct mmc_request *mrq,
522 struct cmdq_host *cq_host, int tag)
523{
524 struct mmc_data *data = mrq->data;
525 int i, sg_count, len;
526 bool end = false;
527 dma_addr_t addr;
528 u8 *desc;
529 struct scatterlist *sg;
530
531 sg_count = cmdq_dma_map(mrq->host, mrq);
532 if (sg_count < 0) {
533 pr_err("%s: %s: unable to map sg lists, %d\n",
534 mmc_hostname(mrq->host), __func__, sg_count);
535 return sg_count;
536 }
537
538 desc = get_trans_desc(cq_host, tag);
539 memset(desc, 0, cq_host->trans_desc_len * cq_host->mmc->max_segs);
540
541 for_each_sg(data->sg, sg, sg_count, i) {
542 addr = sg_dma_address(sg);
543 len = sg_dma_len(sg);
544
545 if ((i+1) == sg_count)
546 end = true;
Sahitya Tummala78a68e52015-09-30 15:55:41 +0530547 cmdq_set_tran_desc(desc, addr, len, end, cq_host->dma64);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700548 desc += cq_host->trans_desc_len;
549 }
550
551 pr_debug("%s: req: 0x%p tag: %d calc_trans_des: 0x%p sg-cnt: %d\n",
552 __func__, mrq->req, tag, desc, sg_count);
553
554 return 0;
555}
556
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700557static void cmdq_log_task_desc_history(struct cmdq_host *cq_host, u64 task,
558 bool is_dcmd)
559{
560 if (likely(!cq_host->mmc->cmdq_thist_enabled))
561 return;
562
563 if (!cq_host->thist) {
564 pr_err("%s: %s: CMDQ task history buffer not allocated\n",
565 mmc_hostname(cq_host->mmc), __func__);
566 return;
567 }
568
569 if (cq_host->thist_idx >= cq_host->num_slots)
570 cq_host->thist_idx = 0;
571
572 cq_host->thist[cq_host->thist_idx].is_dcmd = is_dcmd;
573 memcpy(&cq_host->thist[cq_host->thist_idx++].task,
574 &task, cq_host->task_desc_len);
575}
576
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700577static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
578 struct mmc_request *mrq)
579{
580 u64 *task_desc = NULL;
581 u64 data = 0;
582 u8 resp_type;
583 u8 *desc;
584 __le64 *dataddr;
585 struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
586 u8 timing;
587
588 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
589 resp_type = 0x0;
590 timing = 0x1;
591 } else {
Sahitya Tummala72bd8402015-05-29 13:27:38 +0530592 if (mrq->cmd->flags & MMC_RSP_BUSY) {
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700593 resp_type = 0x3;
594 timing = 0x0;
595 } else {
596 resp_type = 0x2;
597 timing = 0x1;
598 }
599 }
600
601 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
602 memset(task_desc, 0, cq_host->task_desc_len);
603 data |= (VALID(1) |
604 END(1) |
605 INT(1) |
606 QBAR(1) |
607 ACT(0x5) |
608 CMD_INDEX(mrq->cmd->opcode) |
609 CMD_TIMING(timing) | RESP_TYPE(resp_type));
610 *task_desc |= data;
611 desc = (u8 *)task_desc;
612 pr_debug("cmdq: dcmd: cmd: %d timing: %d resp: %d\n",
613 mrq->cmd->opcode, timing, resp_type);
614 dataddr = (__le64 __force *)(desc + 4);
615 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700616 cmdq_log_task_desc_history(cq_host, *task_desc, true);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700617}
618
Gilad Broner44445992015-09-29 16:05:39 +0300619static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
620{
621 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
622 struct sdhci_msm_host *msm_host = pltfm_host->priv;
623
624 sdhci_msm_pm_qos_cpu_vote(host,
625 msm_host->pdata->pm_qos_data.cmdq_latency, mrq->req->cpu);
626}
627
628static void cmdq_pm_qos_unvote(struct sdhci_host *host, struct mmc_request *mrq)
629{
630 /* use async as we're inside an atomic context (soft-irq) */
631 sdhci_msm_pm_qos_cpu_unvote(host, mrq->req->cpu, true);
632}
633
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700634static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
635{
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300636 int err = 0;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700637 u64 data = 0;
638 u64 *task_desc = NULL;
639 u32 tag = mrq->cmdq_req->tag;
640 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Gilad Broner44445992015-09-29 16:05:39 +0300641 struct sdhci_host *host = mmc_priv(mmc);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700642
643 if (!cq_host->enabled) {
644 pr_err("%s: CMDQ host not enabled yet !!!\n",
645 mmc_hostname(mmc));
646 err = -EINVAL;
647 goto out;
648 }
649
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300650 cmdq_runtime_pm_get(cq_host);
651
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700652 if (mrq->cmdq_req->cmdq_req_flags & DCMD) {
653 cmdq_prep_dcmd_desc(mmc, mrq);
654 cq_host->mrq_slot[DCMD_SLOT] = mrq;
Venkat Gopalakrishnanf1329ce2015-08-10 14:55:23 -0700655 /* DCMD's are always issued on a fixed slot */
656 tag = DCMD_SLOT;
657 goto ring_doorbell;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700658 }
659
660 task_desc = (__le64 __force *)get_desc(cq_host, tag);
661
662 cmdq_prep_task_desc(mrq, &data, 1,
663 (mrq->cmdq_req->cmdq_req_flags & QBR));
664 *task_desc = cpu_to_le64(data);
Venkat Gopalakrishnane77c64d2015-09-28 18:53:18 -0700665 cmdq_log_task_desc_history(cq_host, *task_desc, false);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700666
667 err = cmdq_prep_tran_desc(mrq, cq_host, tag);
668 if (err) {
669 pr_err("%s: %s: failed to setup tx desc: %d\n",
670 mmc_hostname(mmc), __func__, err);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300671 goto out;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700672 }
673
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700674 cq_host->mrq_slot[tag] = mrq;
675 if (cq_host->ops->set_tranfer_params)
676 cq_host->ops->set_tranfer_params(mmc);
677
Gilad Broner44445992015-09-29 16:05:39 +0300678 /* PM QoS */
679 sdhci_msm_pm_qos_irq_vote(host);
680 cmdq_pm_qos_vote(host, mrq);
Venkat Gopalakrishnanf1329ce2015-08-10 14:55:23 -0700681ring_doorbell:
682 /* Ensure the task descriptor list is flushed before ringing doorbell */
683 wmb();
Venkat Gopalakrishnan7d53e832015-10-01 14:34:10 -0700684 if (cmdq_readl(cq_host, CQTDBR) & (1 << tag)) {
685 cmdq_dumpregs(cq_host);
686 BUG_ON(1);
687 }
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700688 cmdq_writel(cq_host, 1 << tag, CQTDBR);
Venkat Gopalakrishnanf1329ce2015-08-10 14:55:23 -0700689 /* Commit the doorbell write immediately */
690 wmb();
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700691
692out:
693 return err;
694}
695
696static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
697{
698 struct mmc_request *mrq;
699 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
700
Asutosh Das02e30862015-05-20 16:52:04 +0530701 mrq = get_req_by_tag(cq_host, tag);
Sahitya Tummala9549d562015-05-29 15:41:18 +0530702 if (tag == cq_host->dcmd_slot)
703 mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
704
Asutosh Dasc0ed9c42015-05-29 15:39:37 +0530705 if (mrq->cmdq_req->cmdq_req_flags & DCMD)
706 cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
Sahitya Tummalab9ed5612015-10-05 16:20:10 +0530707 CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
Konstantin Dorfman27af9a92015-08-02 17:06:18 +0300708
709 cmdq_runtime_pm_put(cq_host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700710 mrq->done(mrq);
711}
712
Asutosh Das02e30862015-05-20 16:52:04 +0530713irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700714{
715 u32 status;
716 unsigned long tag = 0, comp_status;
717 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Asutosh Das02e30862015-05-20 16:52:04 +0530718 unsigned long err_info = 0;
719 struct mmc_request *mrq;
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700720
721 status = cmdq_readl(cq_host, CQIS);
722 cmdq_writel(cq_host, status, CQIS);
723
Asutosh Das02e30862015-05-20 16:52:04 +0530724 if (!status && !err)
725 return IRQ_NONE;
726
727 if (err || (status & CQIS_RED)) {
728 err_info = cmdq_readl(cq_host, CQTERRI);
729 pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
730 mmc_hostname(mmc), err, status, err_info);
731
732 cmdq_dumpregs(cq_host);
733
734 if (err_info & CQ_RMEFV) {
735 tag = GET_CMD_ERR_TAG(err_info);
736 pr_err("%s: CMD err tag: %lu\n", __func__, tag);
737
738 mrq = get_req_by_tag(cq_host, tag);
739 /* CMD44/45/46/47 will not have a valid cmd */
740 if (mrq->cmd)
741 mrq->cmd->error = err;
742 else
743 mrq->data->error = err;
744 } else {
745 tag = GET_DAT_ERR_TAG(err_info);
746 pr_err("%s: Dat err tag: %lu\n", __func__, tag);
747 mrq = get_req_by_tag(cq_host, tag);
748 mrq->data->error = err;
749 }
750
Asutosh Das02e30862015-05-20 16:52:04 +0530751 /*
752 * CQE detected a response error from device
753 * In most cases, this would require a reset.
754 */
755 if (status & CQIS_RED) {
Dov Levenglick2b678302015-07-01 14:24:20 +0300756 /*
757 * will check if the RED error is due to a bkops
758 * exception once the queue is empty
759 */
760 BUG_ON(!mmc->card);
761 if (mmc_card_configured_manual_bkops(mmc->card) &&
762 !mmc_card_configured_auto_bkops(mmc->card))
763 mmc->card->bkops.needs_check = true;
764
Asutosh Das02e30862015-05-20 16:52:04 +0530765 mrq->cmdq_req->resp_err = true;
766 pr_err("%s: Response error (0x%08x) from card !!!",
Dov Levenglick2b678302015-07-01 14:24:20 +0300767 mmc_hostname(mmc), status);
Asutosh Das02e30862015-05-20 16:52:04 +0530768 } else {
769 mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
770 mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
771 }
772
773 mmc->err_mrq = mrq;
774 cmdq_finish_data(mmc, tag);
775 }
776
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700777 if (status & CQIS_TCC) {
Konstantin Dorfmanaf1713c2015-10-06 13:25:45 +0300778 /* read CQTCN and complete the request */
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700779 comp_status = cmdq_readl(cq_host, CQTCN);
780 if (!comp_status)
781 goto out;
Konstantin Dorfmanaf1713c2015-10-06 13:25:45 +0300782 /*
783 * The CQTCN must be cleared before notifying req completion
784 * to upper layers to avoid missing completion notification
785 * of new requests with the same tag.
786 */
787 cmdq_writel(cq_host, comp_status, CQTCN);
788 /*
789 * A write memory barrier is necessary to guarantee that CQTCN
790 * gets cleared first before next doorbell for the same tag is
791 * set but that is already achieved by the barrier present
792 * before setting doorbell, hence one is not needed here.
793 */
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700794 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
795 /* complete the corresponding mrq */
796 pr_debug("%s: completing tag -> %lu\n",
797 mmc_hostname(mmc), tag);
798 cmdq_finish_data(mmc, tag);
799 }
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700800 }
801
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530802 if (status & CQIS_HAC) {
Konstantin Dorfmanfa321072015-05-31 10:10:13 +0300803 if (cq_host->ops->post_cqe_halt)
804 cq_host->ops->post_cqe_halt(mmc);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530805 /* halt is completed, wakeup waiting thread */
806 complete(&cq_host->halt_comp);
807 }
808
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700809out:
810 return IRQ_HANDLED;
811}
812EXPORT_SYMBOL(cmdq_irq);
813
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530814/* May sleep */
815static int cmdq_halt(struct mmc_host *mmc, bool halt)
816{
817 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300818 u32 ret = 0;
Ritesh Harjani442c60a2015-09-15 19:21:32 +0530819 int retries = 3;
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530820
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300821 cmdq_runtime_pm_get(cq_host);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530822 if (halt) {
Ritesh Harjani442c60a2015-09-15 19:21:32 +0530823 while (retries) {
824 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT,
825 CQCTL);
826 ret = wait_for_completion_timeout(&cq_host->halt_comp,
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530827 msecs_to_jiffies(HALT_TIMEOUT_MS));
Ritesh Harjani442c60a2015-09-15 19:21:32 +0530828 if (!ret && !(cmdq_readl(cq_host, CQCTL) & HALT)) {
829 retries--;
830 continue;
831 } else {
832 /* halt done: re-enable legacy interrupts */
833 if (cq_host->ops->clear_set_irqs)
834 cq_host->ops->clear_set_irqs(mmc,
835 false);
836 break;
837 }
838 }
Subhash Jadavani6a718e12015-10-19 17:25:22 -0700839 ret = retries ? 0 : -ETIMEDOUT;
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530840 } else {
Asutosh Das3f730d12015-07-08 11:41:35 +0530841 if (cq_host->ops->set_data_timeout)
842 cq_host->ops->set_data_timeout(mmc, 0xf);
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530843 if (cq_host->ops->clear_set_irqs)
844 cq_host->ops->clear_set_irqs(mmc, true);
845 cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
846 CQCTL);
847 }
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300848 cmdq_runtime_pm_put(cq_host);
849 return ret;
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530850}
851
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700852static void cmdq_post_req(struct mmc_host *host, struct mmc_request *mrq,
853 int err)
854{
855 struct mmc_data *data = mrq->data;
Gilad Broner44445992015-09-29 16:05:39 +0300856 struct sdhci_host *sdhci_host = mmc_priv(host);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700857
858 if (data) {
859 data->error = err;
860 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
861 (data->flags & MMC_DATA_READ) ?
862 DMA_FROM_DEVICE : DMA_TO_DEVICE);
863 if (err)
864 data->bytes_xfered = 0;
865 else
866 data->bytes_xfered = blk_rq_bytes(mrq->req);
Gilad Broner44445992015-09-29 16:05:39 +0300867
868 /* we're in atomic context (soft-irq) so unvote async. */
869 sdhci_msm_pm_qos_irq_unvote(sdhci_host, true);
870 cmdq_pm_qos_unvote(sdhci_host, mrq);
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700871 }
872}
873
Asutosh Dasfa8836b2015-03-02 23:14:05 +0530874static void cmdq_dumpstate(struct mmc_host *mmc)
875{
876 struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300877 cmdq_runtime_pm_get(cq_host);
Asutosh Dasfa8836b2015-03-02 23:14:05 +0530878 cmdq_dumpregs(cq_host);
Konstantin Dorfman4d40cf22015-06-11 11:41:53 +0300879 cmdq_runtime_pm_put(cq_host);
Asutosh Dasfa8836b2015-03-02 23:14:05 +0530880}
881
Gilad Broner44445992015-09-29 16:05:39 +0300882static int cmdq_late_init(struct mmc_host *mmc)
883{
884 struct sdhci_host *host = mmc_priv(mmc);
885 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
886 struct sdhci_msm_host *msm_host = pltfm_host->priv;
887
888 /*
889 * TODO: This should basically move to something like "sdhci-cmdq-msm"
890 * for msm specific implementation.
891 */
892 sdhci_msm_pm_qos_irq_init(host);
893
894 if (msm_host->pdata->pm_qos_data.cmdq_valid)
895 sdhci_msm_pm_qos_cpu_init(host,
896 msm_host->pdata->pm_qos_data.cmdq_latency);
897 return 0;
898}
899
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700900static const struct mmc_cmdq_host_ops cmdq_host_ops = {
Gilad Broner44445992015-09-29 16:05:39 +0300901 .init = cmdq_late_init,
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700902 .enable = cmdq_enable,
903 .disable = cmdq_disable,
904 .request = cmdq_request,
905 .post_req = cmdq_post_req,
Asutosh Dasaa1e1c72015-05-21 17:22:10 +0530906 .halt = cmdq_halt,
Asutosh Das02e30862015-05-20 16:52:04 +0530907 .reset = cmdq_reset,
Asutosh Dasfa8836b2015-03-02 23:14:05 +0530908 .dumpstate = cmdq_dumpstate,
Venkat Gopalakrishnan0225ff92015-05-29 17:25:46 -0700909};
910
911struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev)
912{
913 struct cmdq_host *cq_host;
914 struct resource *cmdq_memres = NULL;
915
916 /* check and setup CMDQ interface */
917 cmdq_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
918 "cmdq_mem");
919 if (!cmdq_memres) {
920 dev_dbg(&pdev->dev, "CMDQ not supported\n");
921 return ERR_PTR(-EINVAL);
922 }
923
924 cq_host = kzalloc(sizeof(*cq_host), GFP_KERNEL);
925 if (!cq_host) {
926 dev_err(&pdev->dev, "failed to allocate memory for CMDQ\n");
927 return ERR_PTR(-ENOMEM);
928 }
929 cq_host->mmio = devm_ioremap(&pdev->dev,
930 cmdq_memres->start,
931 resource_size(cmdq_memres));
932 if (!cq_host->mmio) {
933 dev_err(&pdev->dev, "failed to remap cmdq regs\n");
934 kfree(cq_host);
935 return ERR_PTR(-EBUSY);
936 }
937 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
938
939 return cq_host;
940}
941EXPORT_SYMBOL(cmdq_pltfm_init);
942
943int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
944 bool dma64)
945{
946 int err = 0;
947
948 cq_host->dma64 = dma64;
949 cq_host->mmc = mmc;
950 cq_host->mmc->cmdq_private = cq_host;
951
952 cq_host->num_slots = NUM_SLOTS;
953 cq_host->dcmd_slot = DCMD_SLOT;
954
955 mmc->cmdq_ops = &cmdq_host_ops;
956
957 cq_host->mrq_slot = kzalloc(sizeof(cq_host->mrq_slot) *
958 cq_host->num_slots, GFP_KERNEL);
959 if (!cq_host->mrq_slot)
960 return -ENOMEM;
961
962 init_completion(&cq_host->halt_comp);
963 return err;
964}
965EXPORT_SYMBOL(cmdq_init);