blob: 9a5149573298454298ea2771cfc59a96ab348b38 [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
32#include <linux/soc/qcom/smem.h>
33#include <linux/soc/qcom/smem_state.h>
34
35#include "remoteproc_internal.h"
36#include "qcom_mdt_loader.h"
37
38#include <linux/qcom_scm.h>
39
Bjorn Andersson051fb702016-06-20 14:28:41 -070040#define MPSS_FIRMWARE_NAME "modem.mdt"
41
42#define MPSS_CRASH_REASON_SMEM 421
43
44/* RMB Status Register Values */
45#define RMB_PBL_SUCCESS 0x1
46
47#define RMB_MBA_XPU_UNLOCKED 0x1
48#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50#define RMB_MBA_AUTH_COMPLETE 0x4
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE_REG 0x00
54#define RMB_PBL_STATUS_REG 0x04
55#define RMB_MBA_COMMAND_REG 0x08
56#define RMB_MBA_STATUS_REG 0x0C
57#define RMB_PMI_META_DATA_REG 0x10
58#define RMB_PMI_CODE_START_REG 0x14
59#define RMB_PMI_CODE_LENGTH_REG 0x18
60
61#define RMB_CMD_META_DATA_READY 0x1
62#define RMB_CMD_LOAD_READY 0x2
63
64/* QDSP6SS Register Offsets */
65#define QDSP6SS_RESET_REG 0x014
66#define QDSP6SS_GFMUX_CTL_REG 0x020
67#define QDSP6SS_PWR_CTL_REG 0x030
68
69/* AXI Halt Register Offsets */
70#define AXI_HALTREQ_REG 0x0
71#define AXI_HALTACK_REG 0x4
72#define AXI_IDLE_REG 0x8
73
74#define HALT_ACK_TIMEOUT_MS 100
75
76/* QDSP6SS_RESET */
77#define Q6SS_STOP_CORE BIT(0)
78#define Q6SS_CORE_ARES BIT(1)
79#define Q6SS_BUS_ARES_ENABLE BIT(2)
80
81/* QDSP6SS_GFMUX_CTL */
82#define Q6SS_CLK_ENABLE BIT(1)
83
84/* QDSP6SS_PWR_CTL */
85#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
86#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
87#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
88#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
89#define Q6SS_ETB_SLP_NRET_N BIT(17)
90#define Q6SS_L2DATA_STBY_N BIT(18)
91#define Q6SS_SLP_RET_N BIT(19)
92#define Q6SS_CLAMP_IO BIT(20)
93#define QDSS_BHS_ON BIT(21)
94#define QDSS_LDO_BYP BIT(22)
95
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +053096struct reg_info {
97 struct regulator *reg;
98 int uV;
99 int uA;
100};
101
102struct qcom_mss_reg_res {
103 const char *supply;
104 int uV;
105 int uA;
106};
107
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530108struct rproc_hexagon_res {
109 const char *hexagon_mba_image;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530110 struct qcom_mss_reg_res proxy_supply[4];
111 struct qcom_mss_reg_res active_supply[2];
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530112 char **proxy_clk_names;
113 char **active_clk_names;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530114};
115
Bjorn Andersson051fb702016-06-20 14:28:41 -0700116struct q6v5 {
117 struct device *dev;
118 struct rproc *rproc;
119
120 void __iomem *reg_base;
121 void __iomem *rmb_base;
122
123 struct regmap *halt_map;
124 u32 halt_q6;
125 u32 halt_modem;
126 u32 halt_nc;
127
128 struct reset_control *mss_restart;
129
130 struct qcom_smem_state *state;
131 unsigned stop_bit;
132
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530133 struct clk *active_clks[8];
134 struct clk *proxy_clks[4];
135 int active_clk_count;
136 int proxy_clk_count;
137
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530138 struct reg_info active_regs[1];
139 struct reg_info proxy_regs[3];
140 int active_reg_count;
141 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700142
143 struct completion start_done;
144 struct completion stop_done;
145 bool running;
146
147 phys_addr_t mba_phys;
148 void *mba_region;
149 size_t mba_size;
150
151 phys_addr_t mpss_phys;
152 phys_addr_t mpss_reloc;
153 void *mpss_region;
154 size_t mpss_size;
155};
156
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530157static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
158 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700159{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530160 int rc;
161 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700162
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530163 for (i = 0; reg_res[i].supply; i++) {
164 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
165 if (IS_ERR(regs[i].reg)) {
166 rc = PTR_ERR(regs[i].reg);
167 if (rc != -EPROBE_DEFER)
168 dev_err(dev, "Failed to get %s\n regulator",
169 reg_res[i].supply);
170 return rc;
171 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700172
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530173 regs[i].uV = reg_res[i].uV;
174 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700175 }
176
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530177 return i;
178}
179
180static int q6v5_regulator_enable(struct q6v5 *qproc,
181 struct reg_info *regs, int count)
182{
183 int ret;
184 int i;
185
186 for (i = 0; i < count; i++) {
187 if (regs[i].uV > 0) {
188 ret = regulator_set_voltage(regs[i].reg,
189 regs[i].uV, INT_MAX);
190 if (ret) {
191 dev_err(qproc->dev,
192 "Failed to request voltage for %d.\n",
193 i);
194 goto err;
195 }
196 }
197
198 if (regs[i].uA > 0) {
199 ret = regulator_set_load(regs[i].reg,
200 regs[i].uA);
201 if (ret < 0) {
202 dev_err(qproc->dev,
203 "Failed to set regulator mode\n");
204 goto err;
205 }
206 }
207
208 ret = regulator_enable(regs[i].reg);
209 if (ret) {
210 dev_err(qproc->dev, "Regulator enable failed\n");
211 goto err;
212 }
213 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700214
215 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530216err:
217 for (; i >= 0; i--) {
218 if (regs[i].uV > 0)
219 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
220
221 if (regs[i].uA > 0)
222 regulator_set_load(regs[i].reg, 0);
223
224 regulator_disable(regs[i].reg);
225 }
226
227 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700228}
229
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530230static void q6v5_regulator_disable(struct q6v5 *qproc,
231 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700232{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530233 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700234
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530235 for (i = 0; i < count; i++) {
236 if (regs[i].uV > 0)
237 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700238
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530239 if (regs[i].uA > 0)
240 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700241
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530242 regulator_disable(regs[i].reg);
243 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700244}
245
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530246static int q6v5_clk_enable(struct device *dev,
247 struct clk **clks, int count)
248{
249 int rc;
250 int i;
251
252 for (i = 0; i < count; i++) {
253 rc = clk_prepare_enable(clks[i]);
254 if (rc) {
255 dev_err(dev, "Clock enable failed\n");
256 goto err;
257 }
258 }
259
260 return 0;
261err:
262 for (i--; i >= 0; i--)
263 clk_disable_unprepare(clks[i]);
264
265 return rc;
266}
267
268static void q6v5_clk_disable(struct device *dev,
269 struct clk **clks, int count)
270{
271 int i;
272
273 for (i = 0; i < count; i++)
274 clk_disable_unprepare(clks[i]);
275}
276
Bjorn Andersson051fb702016-06-20 14:28:41 -0700277static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
278{
279 struct q6v5 *qproc = rproc->priv;
280
281 memcpy(qproc->mba_region, fw->data, fw->size);
282
283 return 0;
284}
285
286static const struct rproc_fw_ops q6v5_fw_ops = {
287 .find_rsc_table = qcom_mdt_find_rsc_table,
288 .load = q6v5_load,
289};
290
291static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
292{
293 unsigned long timeout;
294 s32 val;
295
296 timeout = jiffies + msecs_to_jiffies(ms);
297 for (;;) {
298 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
299 if (val)
300 break;
301
302 if (time_after(jiffies, timeout))
303 return -ETIMEDOUT;
304
305 msleep(1);
306 }
307
308 return val;
309}
310
311static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
312{
313
314 unsigned long timeout;
315 s32 val;
316
317 timeout = jiffies + msecs_to_jiffies(ms);
318 for (;;) {
319 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
320 if (val < 0)
321 break;
322
323 if (!status && val)
324 break;
325 else if (status && val == status)
326 break;
327
328 if (time_after(jiffies, timeout))
329 return -ETIMEDOUT;
330
331 msleep(1);
332 }
333
334 return val;
335}
336
337static int q6v5proc_reset(struct q6v5 *qproc)
338{
339 u32 val;
340 int ret;
341
342 /* Assert resets, stop core */
343 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
344 val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
345 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
346
347 /* Enable power block headswitch, and wait for it to stabilize */
348 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
349 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
350 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
351 udelay(1);
352
353 /*
354 * Turn on memories. L2 banks should be done individually
355 * to minimize inrush current.
356 */
357 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
358 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
359 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
360 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
361 val |= Q6SS_L2DATA_SLP_NRET_N_2;
362 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
363 val |= Q6SS_L2DATA_SLP_NRET_N_1;
364 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
365 val |= Q6SS_L2DATA_SLP_NRET_N_0;
366 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
367
368 /* Remove IO clamp */
369 val &= ~Q6SS_CLAMP_IO;
370 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
371
372 /* Bring core out of reset */
373 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
374 val &= ~Q6SS_CORE_ARES;
375 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
376
377 /* Turn on core clock */
378 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
379 val |= Q6SS_CLK_ENABLE;
380 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
381
382 /* Start core execution */
383 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
384 val &= ~Q6SS_STOP_CORE;
385 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
386
387 /* Wait for PBL status */
388 ret = q6v5_rmb_pbl_wait(qproc, 1000);
389 if (ret == -ETIMEDOUT) {
390 dev_err(qproc->dev, "PBL boot timed out\n");
391 } else if (ret != RMB_PBL_SUCCESS) {
392 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
393 ret = -EINVAL;
394 } else {
395 ret = 0;
396 }
397
398 return ret;
399}
400
401static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
402 struct regmap *halt_map,
403 u32 offset)
404{
405 unsigned long timeout;
406 unsigned int val;
407 int ret;
408
409 /* Check if we're already idle */
410 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
411 if (!ret && val)
412 return;
413
414 /* Assert halt request */
415 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
416
417 /* Wait for halt */
418 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
419 for (;;) {
420 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
421 if (ret || val || time_after(jiffies, timeout))
422 break;
423
424 msleep(1);
425 }
426
427 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
428 if (ret || !val)
429 dev_err(qproc->dev, "port failed halt\n");
430
431 /* Clear halt request (port will remain halted until reset) */
432 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
433}
434
435static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
436{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700437 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700438 dma_addr_t phys;
439 void *ptr;
440 int ret;
441
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700442 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700443 if (!ptr) {
444 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
445 return -ENOMEM;
446 }
447
448 memcpy(ptr, fw->data, fw->size);
449
450 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
451 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
452
453 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
454 if (ret == -ETIMEDOUT)
455 dev_err(qproc->dev, "MPSS header authentication timed out\n");
456 else if (ret < 0)
457 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
458
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700459 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700460
461 return ret < 0 ? ret : 0;
462}
463
464static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw)
465{
466 const struct elf32_phdr *phdrs;
467 const struct elf32_phdr *phdr;
468 struct elf32_hdr *ehdr;
469 phys_addr_t boot_addr;
470 phys_addr_t fw_addr;
471 bool relocate;
472 size_t size;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700473 int ret;
474 int i;
475
476 ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate);
477 if (ret) {
478 dev_err(qproc->dev, "failed to parse mdt header\n");
479 return ret;
480 }
481
482 if (relocate)
483 boot_addr = qproc->mpss_phys;
484 else
485 boot_addr = fw_addr;
486
487 ehdr = (struct elf32_hdr *)fw->data;
488 phdrs = (struct elf32_phdr *)(ehdr + 1);
489 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
490 phdr = &phdrs[i];
491
492 if (phdr->p_type != PT_LOAD)
493 continue;
494
495 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
496 continue;
497
498 if (!phdr->p_memsz)
499 continue;
500
501 size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
502 if (!size) {
503 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
504 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
505 }
506
507 size += phdr->p_memsz;
508 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
509 }
510
Bjorn Andersson72beb492016-07-12 17:15:45 -0700511 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
512 if (ret == -ETIMEDOUT)
513 dev_err(qproc->dev, "MPSS authentication timed out\n");
514 else if (ret < 0)
515 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
516
517 return ret < 0 ? ret : 0;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700518}
519
520static int q6v5_mpss_load(struct q6v5 *qproc)
521{
522 const struct firmware *fw;
523 phys_addr_t fw_addr;
524 bool relocate;
525 int ret;
526
527 ret = request_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev);
528 if (ret < 0) {
529 dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n");
530 return ret;
531 }
532
533 ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate);
534 if (ret) {
535 dev_err(qproc->dev, "failed to parse mdt header\n");
536 goto release_firmware;
537 }
538
539 if (relocate)
540 qproc->mpss_reloc = fw_addr;
541
542 /* Initialize the RMB validator */
543 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
544
545 ret = q6v5_mpss_init_image(qproc, fw);
546 if (ret)
547 goto release_firmware;
548
549 ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME);
550 if (ret)
551 goto release_firmware;
552
553 ret = q6v5_mpss_validate(qproc, fw);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700554
555release_firmware:
556 release_firmware(fw);
557
558 return ret < 0 ? ret : 0;
559}
560
561static int q6v5_start(struct rproc *rproc)
562{
563 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
564 int ret;
565
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530566 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
567 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700568 if (ret) {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530569 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700570 return ret;
571 }
572
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530573 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
574 qproc->proxy_clk_count);
575 if (ret) {
576 dev_err(qproc->dev, "failed to enable proxy clocks\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530577 goto disable_proxy_reg;
578 }
579
580 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
581 qproc->active_reg_count);
582 if (ret) {
583 dev_err(qproc->dev, "failed to enable supplies\n");
584 goto disable_proxy_clk;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530585 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700586 ret = reset_control_deassert(qproc->mss_restart);
587 if (ret) {
588 dev_err(qproc->dev, "failed to deassert mss restart\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530589 goto disable_vdd;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700590 }
591
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530592 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
593 qproc->active_clk_count);
594 if (ret) {
595 dev_err(qproc->dev, "failed to enable clocks\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700596 goto assert_reset;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530597 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700598
599 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
600
601 ret = q6v5proc_reset(qproc);
602 if (ret)
603 goto halt_axi_ports;
604
605 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
606 if (ret == -ETIMEDOUT) {
607 dev_err(qproc->dev, "MBA boot timed out\n");
608 goto halt_axi_ports;
609 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
610 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
611 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
612 ret = -EINVAL;
613 goto halt_axi_ports;
614 }
615
616 dev_info(qproc->dev, "MBA booted, loading mpss\n");
617
618 ret = q6v5_mpss_load(qproc);
619 if (ret)
620 goto halt_axi_ports;
621
622 ret = wait_for_completion_timeout(&qproc->start_done,
623 msecs_to_jiffies(5000));
624 if (ret == 0) {
625 dev_err(qproc->dev, "start timed out\n");
626 ret = -ETIMEDOUT;
627 goto halt_axi_ports;
628 }
629
630 qproc->running = true;
631
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530632 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
633 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530634 q6v5_regulator_disable(qproc, qproc->proxy_regs,
635 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700636
637 return 0;
638
639halt_axi_ports:
640 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
641 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
642 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530643 q6v5_clk_disable(qproc->dev, qproc->active_clks,
644 qproc->active_clk_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700645assert_reset:
646 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530647disable_vdd:
648 q6v5_regulator_disable(qproc, qproc->active_regs,
649 qproc->active_reg_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530650disable_proxy_clk:
651 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
652 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530653disable_proxy_reg:
654 q6v5_regulator_disable(qproc, qproc->proxy_regs,
655 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700656
657 return ret;
658}
659
660static int q6v5_stop(struct rproc *rproc)
661{
662 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
663 int ret;
664
665 qproc->running = false;
666
667 qcom_smem_state_update_bits(qproc->state,
668 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
669
670 ret = wait_for_completion_timeout(&qproc->stop_done,
671 msecs_to_jiffies(5000));
672 if (ret == 0)
673 dev_err(qproc->dev, "timed out on wait\n");
674
675 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
676
677 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
678 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
679 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
680
681 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530682 q6v5_clk_disable(qproc->dev, qproc->active_clks,
683 qproc->active_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530684 q6v5_regulator_disable(qproc, qproc->active_regs,
685 qproc->active_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700686
687 return 0;
688}
689
690static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
691{
692 struct q6v5 *qproc = rproc->priv;
693 int offset;
694
695 offset = da - qproc->mpss_reloc;
696 if (offset < 0 || offset + len > qproc->mpss_size)
697 return NULL;
698
699 return qproc->mpss_region + offset;
700}
701
702static const struct rproc_ops q6v5_ops = {
703 .start = q6v5_start,
704 .stop = q6v5_stop,
705 .da_to_va = q6v5_da_to_va,
706};
707
708static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
709{
710 struct q6v5 *qproc = dev;
711 size_t len;
712 char *msg;
713
714 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
715 if (!qproc->running) {
716 complete(&qproc->stop_done);
717 return IRQ_HANDLED;
718 }
719
720 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
721 if (!IS_ERR(msg) && len > 0 && msg[0])
722 dev_err(qproc->dev, "watchdog received: %s\n", msg);
723 else
724 dev_err(qproc->dev, "watchdog without message\n");
725
726 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
727
728 if (!IS_ERR(msg))
729 msg[0] = '\0';
730
731 return IRQ_HANDLED;
732}
733
734static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
735{
736 struct q6v5 *qproc = dev;
737 size_t len;
738 char *msg;
739
740 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
741 if (!IS_ERR(msg) && len > 0 && msg[0])
742 dev_err(qproc->dev, "fatal error received: %s\n", msg);
743 else
744 dev_err(qproc->dev, "fatal error without message\n");
745
746 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
747
748 if (!IS_ERR(msg))
749 msg[0] = '\0';
750
751 return IRQ_HANDLED;
752}
753
754static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
755{
756 struct q6v5 *qproc = dev;
757
758 complete(&qproc->start_done);
759 return IRQ_HANDLED;
760}
761
762static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
763{
764 struct q6v5 *qproc = dev;
765
766 complete(&qproc->stop_done);
767 return IRQ_HANDLED;
768}
769
770static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
771{
772 struct of_phandle_args args;
773 struct resource *res;
774 int ret;
775
776 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
777 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +0000778 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700779 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700780
781 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
782 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +0000783 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700784 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700785
786 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
787 "qcom,halt-regs", 3, 0, &args);
788 if (ret < 0) {
789 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
790 return -EINVAL;
791 }
792
793 qproc->halt_map = syscon_node_to_regmap(args.np);
794 of_node_put(args.np);
795 if (IS_ERR(qproc->halt_map))
796 return PTR_ERR(qproc->halt_map);
797
798 qproc->halt_q6 = args.args[0];
799 qproc->halt_modem = args.args[1];
800 qproc->halt_nc = args.args[2];
801
802 return 0;
803}
804
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530805static int q6v5_init_clocks(struct device *dev, struct clk **clks,
806 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700807{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530808 int i;
809
810 if (!clk_names)
811 return 0;
812
813 for (i = 0; clk_names[i]; i++) {
814 clks[i] = devm_clk_get(dev, clk_names[i]);
815 if (IS_ERR(clks[i])) {
816 int rc = PTR_ERR(clks[i]);
817
818 if (rc != -EPROBE_DEFER)
819 dev_err(dev, "Failed to get %s clock\n",
820 clk_names[i]);
821 return rc;
822 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700823 }
824
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530825 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700826}
827
828static int q6v5_init_reset(struct q6v5 *qproc)
829{
830 qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
831 if (IS_ERR(qproc->mss_restart)) {
832 dev_err(qproc->dev, "failed to acquire mss restart\n");
833 return PTR_ERR(qproc->mss_restart);
834 }
835
836 return 0;
837}
838
839static int q6v5_request_irq(struct q6v5 *qproc,
840 struct platform_device *pdev,
841 const char *name,
842 irq_handler_t thread_fn)
843{
844 int ret;
845
846 ret = platform_get_irq_byname(pdev, name);
847 if (ret < 0) {
848 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
849 return ret;
850 }
851
852 ret = devm_request_threaded_irq(&pdev->dev, ret,
853 NULL, thread_fn,
854 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
855 "q6v5", qproc);
856 if (ret)
857 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
858
859 return ret;
860}
861
862static int q6v5_alloc_memory_region(struct q6v5 *qproc)
863{
864 struct device_node *child;
865 struct device_node *node;
866 struct resource r;
867 int ret;
868
869 child = of_get_child_by_name(qproc->dev->of_node, "mba");
870 node = of_parse_phandle(child, "memory-region", 0);
871 ret = of_address_to_resource(node, 0, &r);
872 if (ret) {
873 dev_err(qproc->dev, "unable to resolve mba region\n");
874 return ret;
875 }
876
877 qproc->mba_phys = r.start;
878 qproc->mba_size = resource_size(&r);
879 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
880 if (!qproc->mba_region) {
881 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
882 &r.start, qproc->mba_size);
883 return -EBUSY;
884 }
885
886 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
887 node = of_parse_phandle(child, "memory-region", 0);
888 ret = of_address_to_resource(node, 0, &r);
889 if (ret) {
890 dev_err(qproc->dev, "unable to resolve mpss region\n");
891 return ret;
892 }
893
894 qproc->mpss_phys = qproc->mpss_reloc = r.start;
895 qproc->mpss_size = resource_size(&r);
896 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
897 if (!qproc->mpss_region) {
898 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
899 &r.start, qproc->mpss_size);
900 return -EBUSY;
901 }
902
903 return 0;
904}
905
906static int q6v5_probe(struct platform_device *pdev)
907{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530908 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700909 struct q6v5 *qproc;
910 struct rproc *rproc;
911 int ret;
912
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530913 desc = of_device_get_match_data(&pdev->dev);
914 if (!desc)
915 return -EINVAL;
916
Bjorn Andersson051fb702016-06-20 14:28:41 -0700917 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530918 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -0700919 if (!rproc) {
920 dev_err(&pdev->dev, "failed to allocate rproc\n");
921 return -ENOMEM;
922 }
923
924 rproc->fw_ops = &q6v5_fw_ops;
925
926 qproc = (struct q6v5 *)rproc->priv;
927 qproc->dev = &pdev->dev;
928 qproc->rproc = rproc;
929 platform_set_drvdata(pdev, qproc);
930
931 init_completion(&qproc->start_done);
932 init_completion(&qproc->stop_done);
933
934 ret = q6v5_init_mem(qproc, pdev);
935 if (ret)
936 goto free_rproc;
937
938 ret = q6v5_alloc_memory_region(qproc);
939 if (ret)
940 goto free_rproc;
941
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530942 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
943 desc->proxy_clk_names);
944 if (ret < 0) {
945 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700946 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530947 }
948 qproc->proxy_clk_count = ret;
949
950 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
951 desc->active_clk_names);
952 if (ret < 0) {
953 dev_err(&pdev->dev, "Failed to get active clocks.\n");
954 goto free_rproc;
955 }
956 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700957
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530958 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
959 desc->proxy_supply);
960 if (ret < 0) {
961 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700962 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530963 }
964 qproc->proxy_reg_count = ret;
965
966 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
967 desc->active_supply);
968 if (ret < 0) {
969 dev_err(&pdev->dev, "Failed to get active regulators.\n");
970 goto free_rproc;
971 }
972 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700973
974 ret = q6v5_init_reset(qproc);
975 if (ret)
976 goto free_rproc;
977
978 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
979 if (ret < 0)
980 goto free_rproc;
981
982 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
983 if (ret < 0)
984 goto free_rproc;
985
986 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
987 if (ret < 0)
988 goto free_rproc;
989
990 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
991 if (ret < 0)
992 goto free_rproc;
993
994 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
Wei Yongjun4e968d92016-07-29 15:56:52 +0000995 if (IS_ERR(qproc->state)) {
996 ret = PTR_ERR(qproc->state);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700997 goto free_rproc;
Wei Yongjun4e968d92016-07-29 15:56:52 +0000998 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700999
1000 ret = rproc_add(rproc);
1001 if (ret)
1002 goto free_rproc;
1003
1004 return 0;
1005
1006free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001007 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001008
1009 return ret;
1010}
1011
1012static int q6v5_remove(struct platform_device *pdev)
1013{
1014 struct q6v5 *qproc = platform_get_drvdata(pdev);
1015
1016 rproc_del(qproc->rproc);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001017 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001018
1019 return 0;
1020}
1021
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301022static const struct rproc_hexagon_res msm8916_mss = {
1023 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301024 .proxy_supply = (struct qcom_mss_reg_res[]) {
1025 {
1026 .supply = "mx",
1027 .uV = 1050000,
1028 },
1029 {
1030 .supply = "cx",
1031 .uA = 100000,
1032 },
1033 {
1034 .supply = "pll",
1035 .uA = 100000,
1036 },
1037 {}
1038 },
1039 .active_supply = (struct qcom_mss_reg_res[]) {
1040 {
1041 .supply = "mss",
1042 .uV = 1050000,
1043 .uA = 100000,
1044 },
1045 {}
1046 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301047 .proxy_clk_names = (char*[]){
1048 "xo",
1049 NULL
1050 },
1051 .active_clk_names = (char*[]){
1052 "iface",
1053 "bus",
1054 "mem",
1055 NULL
1056 },
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301057};
1058
1059static const struct rproc_hexagon_res msm8974_mss = {
1060 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301061 .proxy_supply = (struct qcom_mss_reg_res[]) {
1062 {
1063 .supply = "mx",
1064 .uV = 1050000,
1065 },
1066 {
1067 .supply = "cx",
1068 .uA = 100000,
1069 },
1070 {
1071 .supply = "pll",
1072 .uA = 100000,
1073 },
1074 {}
1075 },
1076 .active_supply = (struct qcom_mss_reg_res[]) {
1077 {
1078 .supply = "mss",
1079 .uV = 1050000,
1080 .uA = 100000,
1081 },
1082 {}
1083 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301084 .proxy_clk_names = (char*[]){
1085 "xo",
1086 NULL
1087 },
1088 .active_clk_names = (char*[]){
1089 "iface",
1090 "bus",
1091 "mem",
1092 NULL
1093 },
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301094};
1095
Bjorn Andersson051fb702016-06-20 14:28:41 -07001096static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301097 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1098 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1099 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001100 { },
1101};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001102MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001103
1104static struct platform_driver q6v5_driver = {
1105 .probe = q6v5_probe,
1106 .remove = q6v5_remove,
1107 .driver = {
1108 .name = "qcom-q6v5-pil",
1109 .of_match_table = q6v5_of_match,
1110 },
1111};
1112module_platform_driver(q6v5_driver);
1113
1114MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1115MODULE_LICENSE("GPL v2");