blob: ce31d6680161725810712dc21b084898abd7bd26 [file] [log] [blame]
Puja Gupta6caaa232016-05-04 12:03:31 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/firmware.h>
16#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/ioport.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/of.h>
24#include <linux/regulator/consumer.h>
25#include <linux/dma-mapping.h>
26#include <linux/highmem.h>
27#include <soc/qcom/scm.h>
28#include <soc/qcom/secure_buffer.h>
Gaurav Kohli2da45012017-05-08 15:21:43 +053029#include <trace/events/trace_msm_pil_event.h>
Kyle Yane45fa022016-08-29 11:40:26 -070030
31#include "peripheral-loader.h"
32#include "pil-q6v5.h"
33#include "pil-msa.h"
34
35/* Q6 Register Offsets */
36#define QDSP6SS_RST_EVB 0x010
37#define QDSP6SS_DBG_CFG 0x018
38
39/* AXI Halting Registers */
40#define MSS_Q6_HALT_BASE 0x180
41#define MSS_MODEM_HALT_BASE 0x200
42#define MSS_NC_HALT_BASE 0x280
43
44/* RMB Status Register Values */
45#define STATUS_PBL_SUCCESS 0x1
46#define STATUS_XPU_UNLOCKED 0x1
47#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
48
49/* PBL/MBA interface registers */
50#define RMB_MBA_IMAGE 0x00
51#define RMB_PBL_STATUS 0x04
52#define RMB_MBA_COMMAND 0x08
53#define RMB_MBA_STATUS 0x0C
54#define RMB_PMI_META_DATA 0x10
55#define RMB_PMI_CODE_START 0x14
56#define RMB_PMI_CODE_LENGTH 0x18
57#define RMB_PROTOCOL_VERSION 0x1C
58#define RMB_MBA_DEBUG_INFORMATION 0x20
59
60#define POLL_INTERVAL_US 50
61
62#define CMD_META_DATA_READY 0x1
63#define CMD_LOAD_READY 0x2
64#define CMD_PILFAIL_NFY_MBA 0xffffdead
65
66#define STATUS_META_DATA_AUTH_SUCCESS 0x3
67#define STATUS_AUTH_COMPLETE 0x4
68#define STATUS_MBA_UNLOCKED 0x6
69
70/* External BHS */
71#define EXTERNAL_BHS_ON BIT(0)
72#define EXTERNAL_BHS_STATUS BIT(4)
73#define BHS_TIMEOUT_US 50
74
75#define MSS_RESTART_PARAM_ID 0x2
76#define MSS_RESTART_ID 0xA
77
78#define MSS_MAGIC 0XAABADEAD
Kyle Yan0e660cb2017-05-01 11:13:14 -070079
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +053080/* Timeout value for MBA boot when minidump is enabled */
81#define MBA_ENCRYPTION_TIMEOUT 3000
Kyle Yane45fa022016-08-29 11:40:26 -070082enum scm_cmd {
83 PAS_MEM_SETUP_CMD = 2,
84};
85
86static int pbl_mba_boot_timeout_ms = 1000;
87module_param(pbl_mba_boot_timeout_ms, int, 0644);
88
89static int modem_auth_timeout_ms = 10000;
90module_param(modem_auth_timeout_ms, int, 0644);
91
92/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
93static uint modem_trigger_panic;
94module_param(modem_trigger_panic, uint, 0644);
95
96/* To set the modem debug cookie in DBG_CFG register for debugging */
97static uint modem_dbg_cfg;
98module_param(modem_dbg_cfg, uint, 0644);
99
100static void modem_log_rmb_regs(void __iomem *base)
101{
102 pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
103 pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
104 pr_err("RMB_MBA_COMMAND: %08x\n",
105 readl_relaxed(base + RMB_MBA_COMMAND));
106 pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
107 pr_err("RMB_PMI_META_DATA: %08x\n",
108 readl_relaxed(base + RMB_PMI_META_DATA));
109 pr_err("RMB_PMI_CODE_START: %08x\n",
110 readl_relaxed(base + RMB_PMI_CODE_START));
111 pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
112 readl_relaxed(base + RMB_PMI_CODE_LENGTH));
113 pr_err("RMB_PROTOCOL_VERSION: %08x\n",
114 readl_relaxed(base + RMB_PROTOCOL_VERSION));
115 pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
116 readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
117
118 if (modem_trigger_panic == MSS_MAGIC)
119 panic("%s: System ramdump is needed!!!\n", __func__);
120}
121
122static int pil_mss_power_up(struct q6v5_data *drv)
123{
124 int ret = 0;
125 u32 regval;
126
Kyle Yane45fa022016-08-29 11:40:26 -0700127 if (drv->cxrail_bhs) {
128 regval = readl_relaxed(drv->cxrail_bhs);
129 regval |= EXTERNAL_BHS_ON;
130 writel_relaxed(regval, drv->cxrail_bhs);
131
132 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
133 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
134 }
135
136 return ret;
137}
138
139static int pil_mss_power_down(struct q6v5_data *drv)
140{
141 u32 regval;
142
143 if (drv->cxrail_bhs) {
144 regval = readl_relaxed(drv->cxrail_bhs);
145 regval &= ~EXTERNAL_BHS_ON;
146 writel_relaxed(regval, drv->cxrail_bhs);
147 }
148
Kyle Yane45fa022016-08-29 11:40:26 -0700149 return 0;
150}
151
152static int pil_mss_enable_clks(struct q6v5_data *drv)
153{
154 int ret;
155
156 ret = clk_prepare_enable(drv->ahb_clk);
157 if (ret)
158 goto err_ahb_clk;
159 ret = clk_prepare_enable(drv->axi_clk);
160 if (ret)
161 goto err_axi_clk;
162 ret = clk_prepare_enable(drv->rom_clk);
163 if (ret)
164 goto err_rom_clk;
165 ret = clk_prepare_enable(drv->gpll0_mss_clk);
166 if (ret)
167 goto err_gpll0_mss_clk;
168 ret = clk_prepare_enable(drv->snoc_axi_clk);
169 if (ret)
170 goto err_snoc_axi_clk;
171 ret = clk_prepare_enable(drv->mnoc_axi_clk);
172 if (ret)
173 goto err_mnoc_axi_clk;
174 return 0;
175err_mnoc_axi_clk:
176 clk_disable_unprepare(drv->mnoc_axi_clk);
177err_snoc_axi_clk:
178 clk_disable_unprepare(drv->snoc_axi_clk);
179err_gpll0_mss_clk:
180 clk_disable_unprepare(drv->gpll0_mss_clk);
181err_rom_clk:
182 clk_disable_unprepare(drv->rom_clk);
183err_axi_clk:
184 clk_disable_unprepare(drv->axi_clk);
185err_ahb_clk:
186 clk_disable_unprepare(drv->ahb_clk);
187 return ret;
188}
189
190static void pil_mss_disable_clks(struct q6v5_data *drv)
191{
192 clk_disable_unprepare(drv->mnoc_axi_clk);
193 clk_disable_unprepare(drv->snoc_axi_clk);
194 clk_disable_unprepare(drv->gpll0_mss_clk);
195 clk_disable_unprepare(drv->rom_clk);
196 clk_disable_unprepare(drv->axi_clk);
197 if (!drv->ahb_clk_vote)
198 clk_disable_unprepare(drv->ahb_clk);
199}
200
Kyle Yan0e660cb2017-05-01 11:13:14 -0700201static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
202{
203 u32 val = 0;
Kyle Yan506c07f2017-09-14 11:20:59 -0700204 u32 mss_pdc_mask = BIT(drv->mss_pdc_offset);
Kyle Yan0e660cb2017-05-01 11:13:14 -0700205
206 if (drv->pdc_sync) {
207 val = readl_relaxed(drv->pdc_sync);
208 if (pdc_sync)
Kyle Yan506c07f2017-09-14 11:20:59 -0700209 val |= mss_pdc_mask;
Kyle Yan0e660cb2017-05-01 11:13:14 -0700210 else
Kyle Yan506c07f2017-09-14 11:20:59 -0700211 val &= ~mss_pdc_mask;
Kyle Yan0e660cb2017-05-01 11:13:14 -0700212 writel_relaxed(val, drv->pdc_sync);
213 /* Ensure PDC is written before next write */
214 wmb();
215 udelay(2);
216 }
217}
218
Kyle Yan05f6e102017-04-25 18:30:04 -0700219static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
220{
221 if (drv->alt_reset) {
222 writel_relaxed(val, drv->alt_reset);
223 /* Ensure alt reset is written before restart reg */
224 wmb();
225 udelay(2);
226 }
227}
228
Kyle Yane45fa022016-08-29 11:40:26 -0700229static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
230{
231 int ret = 0;
232 int scm_ret = 0;
233 struct scm_desc desc = {0};
234
235 desc.args[0] = mss_restart;
236 desc.args[1] = 0;
237 desc.arginfo = SCM_ARGS(2);
238
239 if (drv->restart_reg && !drv->restart_reg_sec) {
240 writel_relaxed(mss_restart, drv->restart_reg);
241 mb();
242 udelay(2);
243 } else if (drv->restart_reg_sec) {
244 if (!is_scm_armv8()) {
245 ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
246 &mss_restart, sizeof(mss_restart),
247 &scm_ret, sizeof(scm_ret));
248 } else {
249 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
250 MSS_RESTART_ID), &desc);
251 scm_ret = desc.ret[0];
252 }
253 if (ret || scm_ret)
254 pr_err("Secure MSS restart failed\n");
255 }
256
257 return ret;
258}
259
Kyle Yanca0da632017-09-11 14:54:11 -0700260int pil_mss_assert_resets(struct q6v5_data *drv)
Kyle Yan05f6e102017-04-25 18:30:04 -0700261{
262 int ret = 0;
263
264 pil_mss_pdc_sync(drv, 1);
265 pil_mss_alt_reset(drv, 1);
266 ret = pil_mss_restart_reg(drv, true);
267
268 return ret;
269}
270
Kyle Yanca0da632017-09-11 14:54:11 -0700271int pil_mss_deassert_resets(struct q6v5_data *drv)
Kyle Yan05f6e102017-04-25 18:30:04 -0700272{
273 int ret = 0;
274
275 ret = pil_mss_restart_reg(drv, 0);
276 if (ret)
277 return ret;
278 /* Wait 6 32kHz sleep cycles for reset */
279 udelay(200);
280 pil_mss_alt_reset(drv, 0);
281 pil_mss_pdc_sync(drv, false);
282
283 return ret;
284}
285
Kyle Yane45fa022016-08-29 11:40:26 -0700286static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
287{
288 struct device *dev = drv->desc.dev;
289 int ret;
290 u32 status;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530291 u64 val;
292
293 if (of_property_read_bool(dev->of_node, "qcom,minidump-id"))
294 pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT;
295
296 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
Kyle Yane45fa022016-08-29 11:40:26 -0700297
298 /* Wait for PBL completion. */
299 ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
300 status != 0, POLL_INTERVAL_US, val);
301 if (ret) {
302 dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
303 return ret;
304 }
305 if (status != STATUS_PBL_SUCCESS) {
306 dev_err(dev, "PBL returned unexpected status %d\n", status);
307 return -EINVAL;
308 }
309
310 /* Wait for MBA completion. */
311 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
312 status != 0, POLL_INTERVAL_US, val);
313 if (ret) {
314 dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
315 return ret;
316 }
317 if (status != STATUS_XPU_UNLOCKED &&
318 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
319 dev_err(dev, "MBA returned unexpected status %d\n", status);
320 return -EINVAL;
321 }
322
323 return 0;
324}
325
326int pil_mss_shutdown(struct pil_desc *pil)
327{
328 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
329 int ret = 0;
330
331 if (drv->axi_halt_base) {
332 pil_q6v5_halt_axi_port(pil,
333 drv->axi_halt_base + MSS_Q6_HALT_BASE);
334 pil_q6v5_halt_axi_port(pil,
335 drv->axi_halt_base + MSS_MODEM_HALT_BASE);
336 pil_q6v5_halt_axi_port(pil,
337 drv->axi_halt_base + MSS_NC_HALT_BASE);
338 }
339
340 if (drv->axi_halt_q6)
341 pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
342 if (drv->axi_halt_mss)
343 pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
344 if (drv->axi_halt_nc)
345 pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
346
347 /*
348 * Software workaround to avoid high MX current during LPASS/MSS
349 * restart.
350 */
351 if (drv->mx_spike_wa && drv->ahb_clk_vote) {
352 ret = clk_prepare_enable(drv->ahb_clk);
353 if (!ret)
354 assert_clamps(pil);
355 else
356 dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
357 ret);
358 }
359
Kyle Yan05f6e102017-04-25 18:30:04 -0700360 pil_mss_assert_resets(drv);
361 /* Wait 6 32kHz sleep cycles for reset */
362 udelay(200);
363 ret = pil_mss_deassert_resets(drv);
Kyle Yane45fa022016-08-29 11:40:26 -0700364
365 if (drv->is_booted) {
366 pil_mss_disable_clks(drv);
367 pil_mss_power_down(drv);
368 drv->is_booted = false;
369 }
370
371 return ret;
372}
373
374int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
375{
376 struct modem_data *drv = dev_get_drvdata(pil->dev);
377 struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
378 int ret = 0;
Puja Gupta6caaa232016-05-04 12:03:31 -0700379 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700380 s32 status;
381 u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
382
383 if (err_path) {
384 writel_relaxed(CMD_PILFAIL_NFY_MBA,
385 drv->rmb_base + RMB_MBA_COMMAND);
386 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
387 status == STATUS_MBA_UNLOCKED || status < 0,
388 POLL_INTERVAL_US, val);
389 if (ret)
390 dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
391 ret);
392 else if (status < 0)
393 dev_err(pil->dev, "MBA unlock returned err status: %d\n",
394 status);
395 }
396
397 ret = pil_mss_shutdown(pil);
398
399 if (q6_drv->ahb_clk_vote)
400 clk_disable_unprepare(q6_drv->ahb_clk);
401
402 /* In case of any failure where reclaiming MBA and DP memory
403 * could not happen, free the memory here
404 */
405 if (drv->q6->mba_dp_virt) {
406 if (pil->subsys_vmid > 0)
407 pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
408 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700409 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
Kyle Yane45fa022016-08-29 11:40:26 -0700410 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
411 drv->attrs_dma);
412 drv->q6->mba_dp_virt = NULL;
413 }
414
415 return ret;
416}
417
418int pil_mss_deinit_image(struct pil_desc *pil)
419{
420 return __pil_mss_deinit_image(pil, true);
421}
422
423int pil_mss_make_proxy_votes(struct pil_desc *pil)
424{
425 int ret;
426 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
427 int uv = 0;
428
429 ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
430 if (ret) {
431 dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
432 return ret;
433 }
434
435 ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
436 if (ret) {
437 dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
438 ret);
439 return ret;
440 }
441
442 ret = regulator_enable(drv->vreg_mx);
443 if (ret) {
444 dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
445 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
446 return ret;
447 }
448
Kyle Yan3474e222017-10-12 11:31:46 -0700449 if (drv->vreg) {
450 ret = of_property_read_u32(pil->dev->of_node, "vdd_mss-uV",
451 &uv);
452 if (ret) {
453 dev_err(pil->dev,
454 "missing vdd_mss-uV property(rc:%d)\n", ret);
455 goto out;
456 }
457
458 ret = regulator_set_voltage(drv->vreg, uv,
459 INT_MAX);
460 if (ret) {
461 dev_err(pil->dev, "Failed to set vreg voltage(rc:%d)\n",
462 ret);
463 goto out;
464 }
465
466 ret = regulator_set_load(drv->vreg, 100000);
467 if (ret < 0) {
468 dev_err(pil->dev, "Failed to set vreg mode(rc:%d)\n",
469 ret);
470 goto out;
471 }
Kyle Yan8fe19e52017-10-16 11:42:34 -0700472 ret = regulator_enable(drv->vreg);
473 if (ret) {
474 dev_err(pil->dev, "Failed to enable vreg(rc:%d)\n",
475 ret);
476 regulator_set_voltage(drv->vreg, 0, INT_MAX);
477 goto out;
478 }
Kyle Yan3474e222017-10-12 11:31:46 -0700479 }
480
Kyle Yane45fa022016-08-29 11:40:26 -0700481 ret = pil_q6v5_make_proxy_votes(pil);
Kyle Yan3474e222017-10-12 11:31:46 -0700482 if (ret && drv->vreg) {
483 regulator_disable(drv->vreg);
484 regulator_set_voltage(drv->vreg, 0, INT_MAX);
485 }
486out:
Kyle Yane45fa022016-08-29 11:40:26 -0700487 if (ret) {
488 regulator_disable(drv->vreg_mx);
489 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
490 }
491
492 return ret;
493}
494
495void pil_mss_remove_proxy_votes(struct pil_desc *pil)
496{
497 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
498
499 pil_q6v5_remove_proxy_votes(pil);
500 regulator_disable(drv->vreg_mx);
501 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
Kyle Yan3474e222017-10-12 11:31:46 -0700502 if (drv->vreg) {
503 regulator_disable(drv->vreg);
504 regulator_set_voltage(drv->vreg, 0, INT_MAX);
505 }
Kyle Yane45fa022016-08-29 11:40:26 -0700506}
507
508static int pil_mss_mem_setup(struct pil_desc *pil,
509 phys_addr_t addr, size_t size)
510{
511 struct modem_data *md = dev_get_drvdata(pil->dev);
512
513 struct pas_init_image_req {
514 u32 proc;
515 u32 start_addr;
516 u32 len;
517 } request;
518 u32 scm_ret = 0;
519 int ret;
520 struct scm_desc desc = {0};
521
522 if (!md->subsys_desc.pil_mss_memsetup)
523 return 0;
524
525 request.proc = md->pas_id;
526 request.start_addr = addr;
527 request.len = size;
528
529 if (!is_scm_armv8()) {
530 ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
531 sizeof(request), &scm_ret, sizeof(scm_ret));
532 } else {
533 desc.args[0] = md->pas_id;
534 desc.args[1] = addr;
535 desc.args[2] = size;
536 desc.arginfo = SCM_ARGS(3);
537 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
538 &desc);
539 scm_ret = desc.ret[0];
540 }
541 if (ret)
542 return ret;
543 return scm_ret;
544}
545
546static int pil_mss_reset(struct pil_desc *pil)
547{
548 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
549 phys_addr_t start_addr = pil_get_entry_addr(pil);
Kyle Yan1ad03be2017-04-28 16:34:11 -0700550 u32 debug_val;
Kyle Yane45fa022016-08-29 11:40:26 -0700551 int ret;
552
Gaurav Kohli2da45012017-05-08 15:21:43 +0530553 trace_pil_func(__func__);
Kyle Yane45fa022016-08-29 11:40:26 -0700554 if (drv->mba_dp_phys)
555 start_addr = drv->mba_dp_phys;
556
557 /*
558 * Bring subsystem out of reset and enable required
559 * regulators and clocks.
560 */
561 ret = pil_mss_power_up(drv);
562 if (ret)
563 goto err_power;
564
Kyle Yane45fa022016-08-29 11:40:26 -0700565 ret = pil_mss_enable_clks(drv);
566 if (ret)
567 goto err_clks;
568
Kyle Yan1ad03be2017-04-28 16:34:11 -0700569 /* Save state of modem debug register before full reset */
570 debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
571
Kyle Yan05f6e102017-04-25 18:30:04 -0700572 /* Assert reset to subsystem */
573 pil_mss_assert_resets(drv);
574 /* Wait 6 32kHz sleep cycles for reset */
575 udelay(200);
576 ret = pil_mss_deassert_resets(drv);
577 if (ret)
578 goto err_restart;
579
Kyle Yan1ad03be2017-04-28 16:34:11 -0700580 writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
Kyle Yane45fa022016-08-29 11:40:26 -0700581 if (modem_dbg_cfg)
582 writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
583
584 /* Program Image Address */
585 if (drv->self_auth) {
586 writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
587 /*
588 * Ensure write to RMB base occurs before reset
589 * is released.
590 */
591 mb();
592 } else {
593 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
594 drv->reg_base + QDSP6SS_RST_EVB);
595 }
596
597 /* Program DP Address */
598 if (drv->dp_size) {
599 writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
600 RMB_PMI_CODE_START);
601 writel_relaxed(drv->dp_size, drv->rmb_base +
602 RMB_PMI_CODE_LENGTH);
603 } else {
604 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
605 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
606 }
607 /* Make sure RMB regs are written before bringing modem out of reset */
608 mb();
609
610 ret = pil_q6v5_reset(pil);
611 if (ret)
612 goto err_q6v5_reset;
613
614 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
615 if (drv->self_auth) {
616 ret = pil_msa_wait_for_mba_ready(drv);
617 if (ret)
618 goto err_q6v5_reset;
619 }
620
621 dev_info(pil->dev, "MBA boot done\n");
622 drv->is_booted = true;
623
624 return 0;
625
626err_q6v5_reset:
627 modem_log_rmb_regs(drv->rmb_base);
Kyle Yan05f6e102017-04-25 18:30:04 -0700628err_restart:
Kyle Yane45fa022016-08-29 11:40:26 -0700629 pil_mss_disable_clks(drv);
630 if (drv->ahb_clk_vote)
631 clk_disable_unprepare(drv->ahb_clk);
632err_clks:
Kyle Yane45fa022016-08-29 11:40:26 -0700633 pil_mss_power_down(drv);
634err_power:
635 return ret;
636}
637
638int pil_mss_reset_load_mba(struct pil_desc *pil)
639{
640 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
641 struct modem_data *md = dev_get_drvdata(pil->dev);
642 const struct firmware *fw, *dp_fw = NULL;
643 char fw_name_legacy[10] = "mba.b00";
644 char fw_name[10] = "mba.mbn";
645 char *dp_name = "msadp";
646 char *fw_name_p;
647 void *mba_dp_virt;
648 dma_addr_t mba_dp_phys, mba_dp_phys_end;
Kaushal Kumar23320af2017-09-26 19:58:48 +0530649 int ret;
Kyle Yane45fa022016-08-29 11:40:26 -0700650 const u8 *data;
Puja Gupta6caaa232016-05-04 12:03:31 -0700651 struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700652
Gaurav Kohli2da45012017-05-08 15:21:43 +0530653 trace_pil_func(__func__);
Kyle Yane45fa022016-08-29 11:40:26 -0700654 fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
655 ret = request_firmware(&fw, fw_name_p, pil->dev);
656 if (ret) {
657 dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
658 fw_name_p, ret);
659 return ret;
660 }
661
662 data = fw ? fw->data : NULL;
663 if (!data) {
664 dev_err(pil->dev, "MBA data is NULL\n");
665 ret = -ENOMEM;
666 goto err_invalid_fw;
667 }
668
669 drv->mba_dp_size = SZ_1M;
670
Puja Gupta6caaa232016-05-04 12:03:31 -0700671 arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
Kyle Yane45fa022016-08-29 11:40:26 -0700672
Puja Gupta6caaa232016-05-04 12:03:31 -0700673 dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
674
Kyle Yane45fa022016-08-29 11:40:26 -0700675 md->attrs_dma = 0;
Puja Gupta6caaa232016-05-04 12:03:31 -0700676 md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
Kyle Yane45fa022016-08-29 11:40:26 -0700677 md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
678
679 ret = request_firmware(&dp_fw, dp_name, pil->dev);
680 if (ret) {
681 dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
682 dp_name);
683 } else {
684 if (!dp_fw || !dp_fw->data) {
685 dev_err(pil->dev, "Invalid DP firmware\n");
686 ret = -ENOMEM;
687 goto err_invalid_fw;
688 }
689 drv->dp_size = dp_fw->size;
690 drv->mba_dp_size += drv->dp_size;
Kyle Yanf00cb082017-05-11 11:15:31 -0700691 drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
Kyle Yane45fa022016-08-29 11:40:26 -0700692 }
693
Puja Gupta6caaa232016-05-04 12:03:31 -0700694 mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
695 GFP_KERNEL, md->attrs_dma);
Kyle Yane45fa022016-08-29 11:40:26 -0700696 if (!mba_dp_virt) {
Puja Gupta6caaa232016-05-04 12:03:31 -0700697 dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
698 __func__, drv->mba_dp_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700699 ret = -ENOMEM;
700 goto err_invalid_fw;
701 }
702
703 /* Make sure there are no mappings in PKMAP and fixmap */
704 kmap_flush_unused();
705 kmap_atomic_flush_unused();
706
707 drv->mba_dp_phys = mba_dp_phys;
708 drv->mba_dp_virt = mba_dp_virt;
709 mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
710
711 dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
712 &mba_dp_phys, &mba_dp_phys_end);
713
714 /* Load the MBA image into memory */
Kaushal Kumar23320af2017-09-26 19:58:48 +0530715 if (fw->size <= SZ_1M) {
Kishor PK577caa32017-03-30 14:23:37 +0530716 /* Ensures memcpy is done for max 1MB fw size */
Kaushal Kumar23320af2017-09-26 19:58:48 +0530717 memcpy(mba_dp_virt, data, fw->size);
Kishor PK577caa32017-03-30 14:23:37 +0530718 } else {
719 dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
720 __func__);
721 ret = -EINVAL;
722 goto err_mba_data;
723 }
Kyle Yane45fa022016-08-29 11:40:26 -0700724 /* Ensure memcpy of the MBA memory is done before loading the DP */
725 wmb();
726
727 /* Load the DP image into memory */
728 if (drv->mba_dp_size > SZ_1M) {
729 memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
730 /* Ensure memcpy is done before powering up modem */
731 wmb();
732 }
733
734 if (pil->subsys_vmid > 0) {
735 ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
736 drv->mba_dp_size);
737 if (ret) {
738 pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
739 ret);
740 goto err_mba_data;
741 }
742 }
743
744 ret = pil_mss_reset(pil);
745 if (ret) {
746 dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
747 goto err_mss_reset;
748 }
749
750 if (dp_fw)
751 release_firmware(dp_fw);
752 release_firmware(fw);
753
754 return 0;
755
756err_mss_reset:
757 if (pil->subsys_vmid > 0)
758 pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
759 drv->mba_dp_size);
760err_mba_data:
Puja Gupta6caaa232016-05-04 12:03:31 -0700761 dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
Kyle Yane45fa022016-08-29 11:40:26 -0700762 drv->mba_dp_phys, md->attrs_dma);
763err_invalid_fw:
764 if (dp_fw)
765 release_firmware(dp_fw);
766 release_firmware(fw);
767 drv->mba_dp_virt = NULL;
768 return ret;
769}
770
771static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
Kyle Yan95180af2017-12-01 17:05:59 -0800772 size_t size, phys_addr_t region_start,
773 void *region)
Kyle Yane45fa022016-08-29 11:40:26 -0700774{
775 struct modem_data *drv = dev_get_drvdata(pil->dev);
776 void *mdata_virt;
777 dma_addr_t mdata_phys;
778 s32 status;
779 int ret;
780 u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
Puja Gupta6caaa232016-05-04 12:03:31 -0700781 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700782 unsigned long attrs = 0;
783
Gaurav Kohli2da45012017-05-08 15:21:43 +0530784 trace_pil_func(__func__);
Puja Gupta6caaa232016-05-04 12:03:31 -0700785 dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
786 attrs |= DMA_ATTR_SKIP_ZEROING;
Kyle Yane45fa022016-08-29 11:40:26 -0700787 attrs |= DMA_ATTR_STRONGLY_ORDERED;
788 /* Make metadata physically contiguous and 4K aligned. */
Puja Gupta6caaa232016-05-04 12:03:31 -0700789 mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
Kyle Yane45fa022016-08-29 11:40:26 -0700790 GFP_KERNEL, attrs);
791 if (!mdata_virt) {
792 dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
793 ret = -ENOMEM;
794 goto fail;
795 }
796 memcpy(mdata_virt, metadata, size);
797 /* wmb() ensures copy completes prior to starting authentication. */
798 wmb();
799
800 if (pil->subsys_vmid > 0) {
801 ret = pil_assign_mem_to_subsys(pil, mdata_phys,
802 ALIGN(size, SZ_4K));
803 if (ret) {
804 pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
805 ret);
Puja Gupta6caaa232016-05-04 12:03:31 -0700806 dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
807 attrs);
Kyle Yane45fa022016-08-29 11:40:26 -0700808 goto fail;
809 }
810 }
811
812 /* Initialize length counter to 0 */
813 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
814
815 /* Pass address of meta-data to the MBA and perform authentication */
816 writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
817 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
818 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
819 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
820 POLL_INTERVAL_US, val);
821 if (ret) {
822 dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
823 ret);
824 } else if (status < 0) {
825 dev_err(pil->dev, "MBA returned error %d for headers\n",
826 status);
827 ret = -EINVAL;
828 }
829
830 if (pil->subsys_vmid > 0)
831 pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
832
Puja Gupta6caaa232016-05-04 12:03:31 -0700833 dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
Kyle Yane45fa022016-08-29 11:40:26 -0700834
835 if (!ret)
836 return ret;
837
838fail:
839 modem_log_rmb_regs(drv->rmb_base);
840 if (drv->q6) {
841 pil_mss_shutdown(pil);
842 if (pil->subsys_vmid > 0)
843 pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
844 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700845 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
Kyle Yane45fa022016-08-29 11:40:26 -0700846 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
847 drv->attrs_dma);
848 drv->q6->mba_dp_virt = NULL;
849
850 }
851 return ret;
852}
853
854static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
Kyle Yan95180af2017-12-01 17:05:59 -0800855 const u8 *metadata, size_t size,
856 phys_addr_t region_start, void *region)
Kyle Yane45fa022016-08-29 11:40:26 -0700857{
858 int ret;
859
860 ret = pil_mss_reset_load_mba(pil);
861 if (ret)
862 return ret;
863
Kyle Yan95180af2017-12-01 17:05:59 -0800864 return pil_msa_auth_modem_mdt(pil, metadata, size, region_start,
865 region);
Kyle Yane45fa022016-08-29 11:40:26 -0700866}
867
868static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
869 size_t size)
870{
871 struct modem_data *drv = dev_get_drvdata(pil->dev);
872 s32 status;
873 u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
874
875 /* Begin image authentication */
876 if (img_length == 0) {
877 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
878 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
879 }
880 /* Increment length counter */
881 img_length += size;
882 writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
883
884 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
885 if (status < 0) {
886 dev_err(pil->dev, "MBA returned error %d\n", status);
887 modem_log_rmb_regs(drv->rmb_base);
888 return -EINVAL;
889 }
890
891 return 0;
892}
893
894static int pil_msa_mba_auth(struct pil_desc *pil)
895{
896 struct modem_data *drv = dev_get_drvdata(pil->dev);
897 struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
898 int ret;
Puja Gupta6caaa232016-05-04 12:03:31 -0700899 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700900 s32 status;
901 u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
902
903 /* Wait for all segments to be authenticated or an error to occur */
904 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
905 status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
906 if (ret) {
907 dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
908 ret);
909 } else if (status < 0) {
910 dev_err(pil->dev, "MBA returned error %d for image\n", status);
911 ret = -EINVAL;
912 }
913
914 if (drv->q6) {
915 if (drv->q6->mba_dp_virt) {
916 /* Reclaim MBA and DP (if allocated) memory. */
917 if (pil->subsys_vmid > 0)
918 pil_assign_mem_to_linux(pil,
919 drv->q6->mba_dp_phys,
920 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700921 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
922 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
923 drv->attrs_dma);
Kyle Yane45fa022016-08-29 11:40:26 -0700924
925 drv->q6->mba_dp_virt = NULL;
926 }
927 }
928 if (ret)
929 modem_log_rmb_regs(drv->rmb_base);
930 if (q6_drv->ahb_clk_vote)
931 clk_disable_unprepare(q6_drv->ahb_clk);
932
933 return ret;
934}
935
936/*
937 * To be used only if self-auth is disabled, or if the
938 * MBA image is loaded as segments and not in init_image.
939 */
940struct pil_reset_ops pil_msa_mss_ops = {
941 .proxy_vote = pil_mss_make_proxy_votes,
942 .proxy_unvote = pil_mss_remove_proxy_votes,
943 .auth_and_reset = pil_mss_reset,
944 .shutdown = pil_mss_shutdown,
945};
946
947/*
948 * To be used if self-auth is enabled and the MBA is to be loaded
949 * in init_image and the modem headers are also to be authenticated
950 * in init_image. Modem segments authenticated in auth_and_reset.
951 */
952struct pil_reset_ops pil_msa_mss_ops_selfauth = {
953 .init_image = pil_msa_mss_reset_mba_load_auth_mdt,
954 .proxy_vote = pil_mss_make_proxy_votes,
955 .proxy_unvote = pil_mss_remove_proxy_votes,
956 .mem_setup = pil_mss_mem_setup,
957 .verify_blob = pil_msa_mba_verify_blob,
958 .auth_and_reset = pil_msa_mba_auth,
959 .deinit_image = pil_mss_deinit_image,
960 .shutdown = pil_mss_shutdown,
961};
962
963/*
964 * To be used if the modem headers are to be authenticated
965 * in init_image, and the modem segments in auth_and_reset.
966 */
967struct pil_reset_ops pil_msa_femto_mba_ops = {
968 .init_image = pil_msa_auth_modem_mdt,
969 .verify_blob = pil_msa_mba_verify_blob,
970 .auth_and_reset = pil_msa_mba_auth,
971};