blob: ec3063e19d25882ce3a1d98718903266ce30807b [file] [log] [blame]
Puja Gupta6caaa232016-05-04 12:03:31 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/firmware.h>
16#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/ioport.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/of.h>
24#include <linux/regulator/consumer.h>
25#include <linux/dma-mapping.h>
26#include <linux/highmem.h>
27#include <soc/qcom/scm.h>
28#include <soc/qcom/secure_buffer.h>
Gaurav Kohli2da45012017-05-08 15:21:43 +053029#include <trace/events/trace_msm_pil_event.h>
Kyle Yane45fa022016-08-29 11:40:26 -070030
31#include "peripheral-loader.h"
32#include "pil-q6v5.h"
33#include "pil-msa.h"
34
35/* Q6 Register Offsets */
36#define QDSP6SS_RST_EVB 0x010
37#define QDSP6SS_DBG_CFG 0x018
38
39/* AXI Halting Registers */
40#define MSS_Q6_HALT_BASE 0x180
41#define MSS_MODEM_HALT_BASE 0x200
42#define MSS_NC_HALT_BASE 0x280
43
44/* RMB Status Register Values */
45#define STATUS_PBL_SUCCESS 0x1
46#define STATUS_XPU_UNLOCKED 0x1
47#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
48
49/* PBL/MBA interface registers */
50#define RMB_MBA_IMAGE 0x00
51#define RMB_PBL_STATUS 0x04
52#define RMB_MBA_COMMAND 0x08
53#define RMB_MBA_STATUS 0x0C
54#define RMB_PMI_META_DATA 0x10
55#define RMB_PMI_CODE_START 0x14
56#define RMB_PMI_CODE_LENGTH 0x18
57#define RMB_PROTOCOL_VERSION 0x1C
58#define RMB_MBA_DEBUG_INFORMATION 0x20
59
60#define POLL_INTERVAL_US 50
61
62#define CMD_META_DATA_READY 0x1
63#define CMD_LOAD_READY 0x2
64#define CMD_PILFAIL_NFY_MBA 0xffffdead
65
66#define STATUS_META_DATA_AUTH_SUCCESS 0x3
67#define STATUS_AUTH_COMPLETE 0x4
68#define STATUS_MBA_UNLOCKED 0x6
69
70/* External BHS */
71#define EXTERNAL_BHS_ON BIT(0)
72#define EXTERNAL_BHS_STATUS BIT(4)
73#define BHS_TIMEOUT_US 50
74
75#define MSS_RESTART_PARAM_ID 0x2
76#define MSS_RESTART_ID 0xA
77
78#define MSS_MAGIC 0XAABADEAD
Kyle Yan0e660cb2017-05-01 11:13:14 -070079
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +053080/* Timeout value for MBA boot when minidump is enabled */
81#define MBA_ENCRYPTION_TIMEOUT 3000
Kyle Yane45fa022016-08-29 11:40:26 -070082enum scm_cmd {
83 PAS_MEM_SETUP_CMD = 2,
84};
85
86static int pbl_mba_boot_timeout_ms = 1000;
87module_param(pbl_mba_boot_timeout_ms, int, 0644);
88
89static int modem_auth_timeout_ms = 10000;
90module_param(modem_auth_timeout_ms, int, 0644);
91
92/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
93static uint modem_trigger_panic;
94module_param(modem_trigger_panic, uint, 0644);
95
96/* To set the modem debug cookie in DBG_CFG register for debugging */
97static uint modem_dbg_cfg;
98module_param(modem_dbg_cfg, uint, 0644);
99
100static void modem_log_rmb_regs(void __iomem *base)
101{
102 pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
103 pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
104 pr_err("RMB_MBA_COMMAND: %08x\n",
105 readl_relaxed(base + RMB_MBA_COMMAND));
106 pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
107 pr_err("RMB_PMI_META_DATA: %08x\n",
108 readl_relaxed(base + RMB_PMI_META_DATA));
109 pr_err("RMB_PMI_CODE_START: %08x\n",
110 readl_relaxed(base + RMB_PMI_CODE_START));
111 pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
112 readl_relaxed(base + RMB_PMI_CODE_LENGTH));
113 pr_err("RMB_PROTOCOL_VERSION: %08x\n",
114 readl_relaxed(base + RMB_PROTOCOL_VERSION));
115 pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
116 readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
117
118 if (modem_trigger_panic == MSS_MAGIC)
119 panic("%s: System ramdump is needed!!!\n", __func__);
120}
121
122static int pil_mss_power_up(struct q6v5_data *drv)
123{
124 int ret = 0;
125 u32 regval;
126
127 if (drv->vreg) {
128 ret = regulator_enable(drv->vreg);
129 if (ret)
130 dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
131 ret);
132 }
133
134 if (drv->cxrail_bhs) {
135 regval = readl_relaxed(drv->cxrail_bhs);
136 regval |= EXTERNAL_BHS_ON;
137 writel_relaxed(regval, drv->cxrail_bhs);
138
139 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
140 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
141 }
142
143 return ret;
144}
145
146static int pil_mss_power_down(struct q6v5_data *drv)
147{
148 u32 regval;
149
150 if (drv->cxrail_bhs) {
151 regval = readl_relaxed(drv->cxrail_bhs);
152 regval &= ~EXTERNAL_BHS_ON;
153 writel_relaxed(regval, drv->cxrail_bhs);
154 }
155
156 if (drv->vreg)
157 return regulator_disable(drv->vreg);
158
159 return 0;
160}
161
162static int pil_mss_enable_clks(struct q6v5_data *drv)
163{
164 int ret;
165
166 ret = clk_prepare_enable(drv->ahb_clk);
167 if (ret)
168 goto err_ahb_clk;
169 ret = clk_prepare_enable(drv->axi_clk);
170 if (ret)
171 goto err_axi_clk;
172 ret = clk_prepare_enable(drv->rom_clk);
173 if (ret)
174 goto err_rom_clk;
175 ret = clk_prepare_enable(drv->gpll0_mss_clk);
176 if (ret)
177 goto err_gpll0_mss_clk;
178 ret = clk_prepare_enable(drv->snoc_axi_clk);
179 if (ret)
180 goto err_snoc_axi_clk;
181 ret = clk_prepare_enable(drv->mnoc_axi_clk);
182 if (ret)
183 goto err_mnoc_axi_clk;
184 return 0;
185err_mnoc_axi_clk:
186 clk_disable_unprepare(drv->mnoc_axi_clk);
187err_snoc_axi_clk:
188 clk_disable_unprepare(drv->snoc_axi_clk);
189err_gpll0_mss_clk:
190 clk_disable_unprepare(drv->gpll0_mss_clk);
191err_rom_clk:
192 clk_disable_unprepare(drv->rom_clk);
193err_axi_clk:
194 clk_disable_unprepare(drv->axi_clk);
195err_ahb_clk:
196 clk_disable_unprepare(drv->ahb_clk);
197 return ret;
198}
199
200static void pil_mss_disable_clks(struct q6v5_data *drv)
201{
202 clk_disable_unprepare(drv->mnoc_axi_clk);
203 clk_disable_unprepare(drv->snoc_axi_clk);
204 clk_disable_unprepare(drv->gpll0_mss_clk);
205 clk_disable_unprepare(drv->rom_clk);
206 clk_disable_unprepare(drv->axi_clk);
207 if (!drv->ahb_clk_vote)
208 clk_disable_unprepare(drv->ahb_clk);
209}
210
Kyle Yan0e660cb2017-05-01 11:13:14 -0700211static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
212{
213 u32 val = 0;
Kyle Yan506c07f2017-09-14 11:20:59 -0700214 u32 mss_pdc_mask = BIT(drv->mss_pdc_offset);
Kyle Yan0e660cb2017-05-01 11:13:14 -0700215
216 if (drv->pdc_sync) {
217 val = readl_relaxed(drv->pdc_sync);
218 if (pdc_sync)
Kyle Yan506c07f2017-09-14 11:20:59 -0700219 val |= mss_pdc_mask;
Kyle Yan0e660cb2017-05-01 11:13:14 -0700220 else
Kyle Yan506c07f2017-09-14 11:20:59 -0700221 val &= ~mss_pdc_mask;
Kyle Yan0e660cb2017-05-01 11:13:14 -0700222 writel_relaxed(val, drv->pdc_sync);
223 /* Ensure PDC is written before next write */
224 wmb();
225 udelay(2);
226 }
227}
228
Kyle Yan05f6e102017-04-25 18:30:04 -0700229static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
230{
231 if (drv->alt_reset) {
232 writel_relaxed(val, drv->alt_reset);
233 /* Ensure alt reset is written before restart reg */
234 wmb();
235 udelay(2);
236 }
237}
238
Kyle Yane45fa022016-08-29 11:40:26 -0700239static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
240{
241 int ret = 0;
242 int scm_ret = 0;
243 struct scm_desc desc = {0};
244
245 desc.args[0] = mss_restart;
246 desc.args[1] = 0;
247 desc.arginfo = SCM_ARGS(2);
248
249 if (drv->restart_reg && !drv->restart_reg_sec) {
250 writel_relaxed(mss_restart, drv->restart_reg);
251 mb();
252 udelay(2);
253 } else if (drv->restart_reg_sec) {
254 if (!is_scm_armv8()) {
255 ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
256 &mss_restart, sizeof(mss_restart),
257 &scm_ret, sizeof(scm_ret));
258 } else {
259 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
260 MSS_RESTART_ID), &desc);
261 scm_ret = desc.ret[0];
262 }
263 if (ret || scm_ret)
264 pr_err("Secure MSS restart failed\n");
265 }
266
267 return ret;
268}
269
Kyle Yanca0da632017-09-11 14:54:11 -0700270int pil_mss_assert_resets(struct q6v5_data *drv)
Kyle Yan05f6e102017-04-25 18:30:04 -0700271{
272 int ret = 0;
273
274 pil_mss_pdc_sync(drv, 1);
275 pil_mss_alt_reset(drv, 1);
276 ret = pil_mss_restart_reg(drv, true);
277
278 return ret;
279}
280
Kyle Yanca0da632017-09-11 14:54:11 -0700281int pil_mss_deassert_resets(struct q6v5_data *drv)
Kyle Yan05f6e102017-04-25 18:30:04 -0700282{
283 int ret = 0;
284
285 ret = pil_mss_restart_reg(drv, 0);
286 if (ret)
287 return ret;
288 /* Wait 6 32kHz sleep cycles for reset */
289 udelay(200);
290 pil_mss_alt_reset(drv, 0);
291 pil_mss_pdc_sync(drv, false);
292
293 return ret;
294}
295
Kyle Yane45fa022016-08-29 11:40:26 -0700296static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
297{
298 struct device *dev = drv->desc.dev;
299 int ret;
300 u32 status;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530301 u64 val;
302
303 if (of_property_read_bool(dev->of_node, "qcom,minidump-id"))
304 pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT;
305
306 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
Kyle Yane45fa022016-08-29 11:40:26 -0700307
308 /* Wait for PBL completion. */
309 ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
310 status != 0, POLL_INTERVAL_US, val);
311 if (ret) {
312 dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
313 return ret;
314 }
315 if (status != STATUS_PBL_SUCCESS) {
316 dev_err(dev, "PBL returned unexpected status %d\n", status);
317 return -EINVAL;
318 }
319
320 /* Wait for MBA completion. */
321 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
322 status != 0, POLL_INTERVAL_US, val);
323 if (ret) {
324 dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
325 return ret;
326 }
327 if (status != STATUS_XPU_UNLOCKED &&
328 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
329 dev_err(dev, "MBA returned unexpected status %d\n", status);
330 return -EINVAL;
331 }
332
333 return 0;
334}
335
336int pil_mss_shutdown(struct pil_desc *pil)
337{
338 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
339 int ret = 0;
340
341 if (drv->axi_halt_base) {
342 pil_q6v5_halt_axi_port(pil,
343 drv->axi_halt_base + MSS_Q6_HALT_BASE);
344 pil_q6v5_halt_axi_port(pil,
345 drv->axi_halt_base + MSS_MODEM_HALT_BASE);
346 pil_q6v5_halt_axi_port(pil,
347 drv->axi_halt_base + MSS_NC_HALT_BASE);
348 }
349
350 if (drv->axi_halt_q6)
351 pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
352 if (drv->axi_halt_mss)
353 pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
354 if (drv->axi_halt_nc)
355 pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
356
357 /*
358 * Software workaround to avoid high MX current during LPASS/MSS
359 * restart.
360 */
361 if (drv->mx_spike_wa && drv->ahb_clk_vote) {
362 ret = clk_prepare_enable(drv->ahb_clk);
363 if (!ret)
364 assert_clamps(pil);
365 else
366 dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
367 ret);
368 }
369
Kyle Yan05f6e102017-04-25 18:30:04 -0700370 pil_mss_assert_resets(drv);
371 /* Wait 6 32kHz sleep cycles for reset */
372 udelay(200);
373 ret = pil_mss_deassert_resets(drv);
Kyle Yane45fa022016-08-29 11:40:26 -0700374
375 if (drv->is_booted) {
376 pil_mss_disable_clks(drv);
377 pil_mss_power_down(drv);
378 drv->is_booted = false;
379 }
380
381 return ret;
382}
383
384int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
385{
386 struct modem_data *drv = dev_get_drvdata(pil->dev);
387 struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
388 int ret = 0;
Puja Gupta6caaa232016-05-04 12:03:31 -0700389 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700390 s32 status;
391 u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
392
393 if (err_path) {
394 writel_relaxed(CMD_PILFAIL_NFY_MBA,
395 drv->rmb_base + RMB_MBA_COMMAND);
396 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
397 status == STATUS_MBA_UNLOCKED || status < 0,
398 POLL_INTERVAL_US, val);
399 if (ret)
400 dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
401 ret);
402 else if (status < 0)
403 dev_err(pil->dev, "MBA unlock returned err status: %d\n",
404 status);
405 }
406
407 ret = pil_mss_shutdown(pil);
408
409 if (q6_drv->ahb_clk_vote)
410 clk_disable_unprepare(q6_drv->ahb_clk);
411
412 /* In case of any failure where reclaiming MBA and DP memory
413 * could not happen, free the memory here
414 */
415 if (drv->q6->mba_dp_virt) {
416 if (pil->subsys_vmid > 0)
417 pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
418 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700419 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
Kyle Yane45fa022016-08-29 11:40:26 -0700420 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
421 drv->attrs_dma);
422 drv->q6->mba_dp_virt = NULL;
423 }
424
425 return ret;
426}
427
428int pil_mss_deinit_image(struct pil_desc *pil)
429{
430 return __pil_mss_deinit_image(pil, true);
431}
432
433int pil_mss_make_proxy_votes(struct pil_desc *pil)
434{
435 int ret;
436 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
437 int uv = 0;
438
439 ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
440 if (ret) {
441 dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
442 return ret;
443 }
444
445 ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
446 if (ret) {
447 dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
448 ret);
449 return ret;
450 }
451
452 ret = regulator_enable(drv->vreg_mx);
453 if (ret) {
454 dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
455 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
456 return ret;
457 }
458
459 ret = pil_q6v5_make_proxy_votes(pil);
460 if (ret) {
461 regulator_disable(drv->vreg_mx);
462 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
463 }
464
465 return ret;
466}
467
468void pil_mss_remove_proxy_votes(struct pil_desc *pil)
469{
470 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
471
472 pil_q6v5_remove_proxy_votes(pil);
473 regulator_disable(drv->vreg_mx);
474 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
475}
476
477static int pil_mss_mem_setup(struct pil_desc *pil,
478 phys_addr_t addr, size_t size)
479{
480 struct modem_data *md = dev_get_drvdata(pil->dev);
481
482 struct pas_init_image_req {
483 u32 proc;
484 u32 start_addr;
485 u32 len;
486 } request;
487 u32 scm_ret = 0;
488 int ret;
489 struct scm_desc desc = {0};
490
491 if (!md->subsys_desc.pil_mss_memsetup)
492 return 0;
493
494 request.proc = md->pas_id;
495 request.start_addr = addr;
496 request.len = size;
497
498 if (!is_scm_armv8()) {
499 ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
500 sizeof(request), &scm_ret, sizeof(scm_ret));
501 } else {
502 desc.args[0] = md->pas_id;
503 desc.args[1] = addr;
504 desc.args[2] = size;
505 desc.arginfo = SCM_ARGS(3);
506 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
507 &desc);
508 scm_ret = desc.ret[0];
509 }
510 if (ret)
511 return ret;
512 return scm_ret;
513}
514
515static int pil_mss_reset(struct pil_desc *pil)
516{
517 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
518 phys_addr_t start_addr = pil_get_entry_addr(pil);
Kyle Yan1ad03be2017-04-28 16:34:11 -0700519 u32 debug_val;
Kyle Yane45fa022016-08-29 11:40:26 -0700520 int ret;
521
Gaurav Kohli2da45012017-05-08 15:21:43 +0530522 trace_pil_func(__func__);
Kyle Yane45fa022016-08-29 11:40:26 -0700523 if (drv->mba_dp_phys)
524 start_addr = drv->mba_dp_phys;
525
526 /*
527 * Bring subsystem out of reset and enable required
528 * regulators and clocks.
529 */
530 ret = pil_mss_power_up(drv);
531 if (ret)
532 goto err_power;
533
Kyle Yane45fa022016-08-29 11:40:26 -0700534 ret = pil_mss_enable_clks(drv);
535 if (ret)
536 goto err_clks;
537
Kyle Yan1ad03be2017-04-28 16:34:11 -0700538 /* Save state of modem debug register before full reset */
539 debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
540
Kyle Yan05f6e102017-04-25 18:30:04 -0700541 /* Assert reset to subsystem */
542 pil_mss_assert_resets(drv);
543 /* Wait 6 32kHz sleep cycles for reset */
544 udelay(200);
545 ret = pil_mss_deassert_resets(drv);
546 if (ret)
547 goto err_restart;
548
Kyle Yan1ad03be2017-04-28 16:34:11 -0700549 writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
Kyle Yane45fa022016-08-29 11:40:26 -0700550 if (modem_dbg_cfg)
551 writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
552
553 /* Program Image Address */
554 if (drv->self_auth) {
555 writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
556 /*
557 * Ensure write to RMB base occurs before reset
558 * is released.
559 */
560 mb();
561 } else {
562 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
563 drv->reg_base + QDSP6SS_RST_EVB);
564 }
565
566 /* Program DP Address */
567 if (drv->dp_size) {
568 writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
569 RMB_PMI_CODE_START);
570 writel_relaxed(drv->dp_size, drv->rmb_base +
571 RMB_PMI_CODE_LENGTH);
572 } else {
573 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
574 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
575 }
576 /* Make sure RMB regs are written before bringing modem out of reset */
577 mb();
578
579 ret = pil_q6v5_reset(pil);
580 if (ret)
581 goto err_q6v5_reset;
582
583 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
584 if (drv->self_auth) {
585 ret = pil_msa_wait_for_mba_ready(drv);
586 if (ret)
587 goto err_q6v5_reset;
588 }
589
590 dev_info(pil->dev, "MBA boot done\n");
591 drv->is_booted = true;
592
593 return 0;
594
595err_q6v5_reset:
596 modem_log_rmb_regs(drv->rmb_base);
Kyle Yan05f6e102017-04-25 18:30:04 -0700597err_restart:
Kyle Yane45fa022016-08-29 11:40:26 -0700598 pil_mss_disable_clks(drv);
599 if (drv->ahb_clk_vote)
600 clk_disable_unprepare(drv->ahb_clk);
601err_clks:
Kyle Yane45fa022016-08-29 11:40:26 -0700602 pil_mss_power_down(drv);
603err_power:
604 return ret;
605}
606
607int pil_mss_reset_load_mba(struct pil_desc *pil)
608{
609 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
610 struct modem_data *md = dev_get_drvdata(pil->dev);
611 const struct firmware *fw, *dp_fw = NULL;
612 char fw_name_legacy[10] = "mba.b00";
613 char fw_name[10] = "mba.mbn";
614 char *dp_name = "msadp";
615 char *fw_name_p;
616 void *mba_dp_virt;
617 dma_addr_t mba_dp_phys, mba_dp_phys_end;
Kaushal Kumar23320af2017-09-26 19:58:48 +0530618 int ret;
Kyle Yane45fa022016-08-29 11:40:26 -0700619 const u8 *data;
Puja Gupta6caaa232016-05-04 12:03:31 -0700620 struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700621
Gaurav Kohli2da45012017-05-08 15:21:43 +0530622 trace_pil_func(__func__);
Kyle Yane45fa022016-08-29 11:40:26 -0700623 fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
624 ret = request_firmware(&fw, fw_name_p, pil->dev);
625 if (ret) {
626 dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
627 fw_name_p, ret);
628 return ret;
629 }
630
631 data = fw ? fw->data : NULL;
632 if (!data) {
633 dev_err(pil->dev, "MBA data is NULL\n");
634 ret = -ENOMEM;
635 goto err_invalid_fw;
636 }
637
638 drv->mba_dp_size = SZ_1M;
639
Puja Gupta6caaa232016-05-04 12:03:31 -0700640 arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
Kyle Yane45fa022016-08-29 11:40:26 -0700641
Puja Gupta6caaa232016-05-04 12:03:31 -0700642 dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
643
Kyle Yane45fa022016-08-29 11:40:26 -0700644 md->attrs_dma = 0;
Puja Gupta6caaa232016-05-04 12:03:31 -0700645 md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
Kyle Yane45fa022016-08-29 11:40:26 -0700646 md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
647
648 ret = request_firmware(&dp_fw, dp_name, pil->dev);
649 if (ret) {
650 dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
651 dp_name);
652 } else {
653 if (!dp_fw || !dp_fw->data) {
654 dev_err(pil->dev, "Invalid DP firmware\n");
655 ret = -ENOMEM;
656 goto err_invalid_fw;
657 }
658 drv->dp_size = dp_fw->size;
659 drv->mba_dp_size += drv->dp_size;
Kyle Yanf00cb082017-05-11 11:15:31 -0700660 drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
Kyle Yane45fa022016-08-29 11:40:26 -0700661 }
662
Puja Gupta6caaa232016-05-04 12:03:31 -0700663 mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
664 GFP_KERNEL, md->attrs_dma);
Kyle Yane45fa022016-08-29 11:40:26 -0700665 if (!mba_dp_virt) {
Puja Gupta6caaa232016-05-04 12:03:31 -0700666 dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
667 __func__, drv->mba_dp_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700668 ret = -ENOMEM;
669 goto err_invalid_fw;
670 }
671
672 /* Make sure there are no mappings in PKMAP and fixmap */
673 kmap_flush_unused();
674 kmap_atomic_flush_unused();
675
676 drv->mba_dp_phys = mba_dp_phys;
677 drv->mba_dp_virt = mba_dp_virt;
678 mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
679
680 dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
681 &mba_dp_phys, &mba_dp_phys_end);
682
683 /* Load the MBA image into memory */
Kaushal Kumar23320af2017-09-26 19:58:48 +0530684 if (fw->size <= SZ_1M) {
Kishor PK577caa32017-03-30 14:23:37 +0530685 /* Ensures memcpy is done for max 1MB fw size */
Kaushal Kumar23320af2017-09-26 19:58:48 +0530686 memcpy(mba_dp_virt, data, fw->size);
Kishor PK577caa32017-03-30 14:23:37 +0530687 } else {
688 dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
689 __func__);
690 ret = -EINVAL;
691 goto err_mba_data;
692 }
Kyle Yane45fa022016-08-29 11:40:26 -0700693 /* Ensure memcpy of the MBA memory is done before loading the DP */
694 wmb();
695
696 /* Load the DP image into memory */
697 if (drv->mba_dp_size > SZ_1M) {
698 memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
699 /* Ensure memcpy is done before powering up modem */
700 wmb();
701 }
702
703 if (pil->subsys_vmid > 0) {
704 ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
705 drv->mba_dp_size);
706 if (ret) {
707 pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
708 ret);
709 goto err_mba_data;
710 }
711 }
712
713 ret = pil_mss_reset(pil);
714 if (ret) {
715 dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
716 goto err_mss_reset;
717 }
718
719 if (dp_fw)
720 release_firmware(dp_fw);
721 release_firmware(fw);
722
723 return 0;
724
725err_mss_reset:
726 if (pil->subsys_vmid > 0)
727 pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
728 drv->mba_dp_size);
729err_mba_data:
Puja Gupta6caaa232016-05-04 12:03:31 -0700730 dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
Kyle Yane45fa022016-08-29 11:40:26 -0700731 drv->mba_dp_phys, md->attrs_dma);
732err_invalid_fw:
733 if (dp_fw)
734 release_firmware(dp_fw);
735 release_firmware(fw);
736 drv->mba_dp_virt = NULL;
737 return ret;
738}
739
740static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
741 size_t size)
742{
743 struct modem_data *drv = dev_get_drvdata(pil->dev);
744 void *mdata_virt;
745 dma_addr_t mdata_phys;
746 s32 status;
747 int ret;
748 u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
Puja Gupta6caaa232016-05-04 12:03:31 -0700749 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700750 unsigned long attrs = 0;
751
Gaurav Kohli2da45012017-05-08 15:21:43 +0530752 trace_pil_func(__func__);
Puja Gupta6caaa232016-05-04 12:03:31 -0700753 dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
754 attrs |= DMA_ATTR_SKIP_ZEROING;
Kyle Yane45fa022016-08-29 11:40:26 -0700755 attrs |= DMA_ATTR_STRONGLY_ORDERED;
756 /* Make metadata physically contiguous and 4K aligned. */
Puja Gupta6caaa232016-05-04 12:03:31 -0700757 mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
Kyle Yane45fa022016-08-29 11:40:26 -0700758 GFP_KERNEL, attrs);
759 if (!mdata_virt) {
760 dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
761 ret = -ENOMEM;
762 goto fail;
763 }
764 memcpy(mdata_virt, metadata, size);
765 /* wmb() ensures copy completes prior to starting authentication. */
766 wmb();
767
768 if (pil->subsys_vmid > 0) {
769 ret = pil_assign_mem_to_subsys(pil, mdata_phys,
770 ALIGN(size, SZ_4K));
771 if (ret) {
772 pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
773 ret);
Puja Gupta6caaa232016-05-04 12:03:31 -0700774 dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
775 attrs);
Kyle Yane45fa022016-08-29 11:40:26 -0700776 goto fail;
777 }
778 }
779
780 /* Initialize length counter to 0 */
781 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
782
783 /* Pass address of meta-data to the MBA and perform authentication */
784 writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
785 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
786 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
787 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
788 POLL_INTERVAL_US, val);
789 if (ret) {
790 dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
791 ret);
792 } else if (status < 0) {
793 dev_err(pil->dev, "MBA returned error %d for headers\n",
794 status);
795 ret = -EINVAL;
796 }
797
798 if (pil->subsys_vmid > 0)
799 pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
800
Puja Gupta6caaa232016-05-04 12:03:31 -0700801 dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
Kyle Yane45fa022016-08-29 11:40:26 -0700802
803 if (!ret)
804 return ret;
805
806fail:
807 modem_log_rmb_regs(drv->rmb_base);
808 if (drv->q6) {
809 pil_mss_shutdown(pil);
810 if (pil->subsys_vmid > 0)
811 pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
812 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700813 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
Kyle Yane45fa022016-08-29 11:40:26 -0700814 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
815 drv->attrs_dma);
816 drv->q6->mba_dp_virt = NULL;
817
818 }
819 return ret;
820}
821
822static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
823 const u8 *metadata, size_t size)
824{
825 int ret;
826
827 ret = pil_mss_reset_load_mba(pil);
828 if (ret)
829 return ret;
830
831 return pil_msa_auth_modem_mdt(pil, metadata, size);
832}
833
834static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
835 size_t size)
836{
837 struct modem_data *drv = dev_get_drvdata(pil->dev);
838 s32 status;
839 u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
840
841 /* Begin image authentication */
842 if (img_length == 0) {
843 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
844 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
845 }
846 /* Increment length counter */
847 img_length += size;
848 writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
849
850 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
851 if (status < 0) {
852 dev_err(pil->dev, "MBA returned error %d\n", status);
853 modem_log_rmb_regs(drv->rmb_base);
854 return -EINVAL;
855 }
856
857 return 0;
858}
859
860static int pil_msa_mba_auth(struct pil_desc *pil)
861{
862 struct modem_data *drv = dev_get_drvdata(pil->dev);
863 struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
864 int ret;
Puja Gupta6caaa232016-05-04 12:03:31 -0700865 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700866 s32 status;
867 u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
868
869 /* Wait for all segments to be authenticated or an error to occur */
870 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
871 status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
872 if (ret) {
873 dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
874 ret);
875 } else if (status < 0) {
876 dev_err(pil->dev, "MBA returned error %d for image\n", status);
877 ret = -EINVAL;
878 }
879
880 if (drv->q6) {
881 if (drv->q6->mba_dp_virt) {
882 /* Reclaim MBA and DP (if allocated) memory. */
883 if (pil->subsys_vmid > 0)
884 pil_assign_mem_to_linux(pil,
885 drv->q6->mba_dp_phys,
886 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700887 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
888 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
889 drv->attrs_dma);
Kyle Yane45fa022016-08-29 11:40:26 -0700890
891 drv->q6->mba_dp_virt = NULL;
892 }
893 }
894 if (ret)
895 modem_log_rmb_regs(drv->rmb_base);
896 if (q6_drv->ahb_clk_vote)
897 clk_disable_unprepare(q6_drv->ahb_clk);
898
899 return ret;
900}
901
902/*
903 * To be used only if self-auth is disabled, or if the
904 * MBA image is loaded as segments and not in init_image.
905 */
906struct pil_reset_ops pil_msa_mss_ops = {
907 .proxy_vote = pil_mss_make_proxy_votes,
908 .proxy_unvote = pil_mss_remove_proxy_votes,
909 .auth_and_reset = pil_mss_reset,
910 .shutdown = pil_mss_shutdown,
911};
912
913/*
914 * To be used if self-auth is enabled and the MBA is to be loaded
915 * in init_image and the modem headers are also to be authenticated
916 * in init_image. Modem segments authenticated in auth_and_reset.
917 */
918struct pil_reset_ops pil_msa_mss_ops_selfauth = {
919 .init_image = pil_msa_mss_reset_mba_load_auth_mdt,
920 .proxy_vote = pil_mss_make_proxy_votes,
921 .proxy_unvote = pil_mss_remove_proxy_votes,
922 .mem_setup = pil_mss_mem_setup,
923 .verify_blob = pil_msa_mba_verify_blob,
924 .auth_and_reset = pil_msa_mba_auth,
925 .deinit_image = pil_mss_deinit_image,
926 .shutdown = pil_mss_shutdown,
927};
928
929/*
930 * To be used if the modem headers are to be authenticated
931 * in init_image, and the modem segments in auth_and_reset.
932 */
933struct pil_reset_ops pil_msa_femto_mba_ops = {
934 .init_image = pil_msa_auth_modem_mdt,
935 .verify_blob = pil_msa_mba_verify_blob,
936 .auth_and_reset = pil_msa_mba_auth,
937};