blob: 20b9769195d990fe70feed5f463bf3dda98b6c03 [file] [log] [blame]
Puja Gupta6caaa232016-05-04 12:03:31 -07001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/firmware.h>
16#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/ioport.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/of.h>
24#include <linux/regulator/consumer.h>
25#include <linux/dma-mapping.h>
26#include <linux/highmem.h>
27#include <soc/qcom/scm.h>
28#include <soc/qcom/secure_buffer.h>
Gaurav Kohli2da45012017-05-08 15:21:43 +053029#include <trace/events/trace_msm_pil_event.h>
Kyle Yane45fa022016-08-29 11:40:26 -070030
31#include "peripheral-loader.h"
32#include "pil-q6v5.h"
33#include "pil-msa.h"
34
35/* Q6 Register Offsets */
36#define QDSP6SS_RST_EVB 0x010
37#define QDSP6SS_DBG_CFG 0x018
38
39/* AXI Halting Registers */
40#define MSS_Q6_HALT_BASE 0x180
41#define MSS_MODEM_HALT_BASE 0x200
42#define MSS_NC_HALT_BASE 0x280
43
44/* RMB Status Register Values */
45#define STATUS_PBL_SUCCESS 0x1
46#define STATUS_XPU_UNLOCKED 0x1
47#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
48
49/* PBL/MBA interface registers */
50#define RMB_MBA_IMAGE 0x00
51#define RMB_PBL_STATUS 0x04
52#define RMB_MBA_COMMAND 0x08
53#define RMB_MBA_STATUS 0x0C
54#define RMB_PMI_META_DATA 0x10
55#define RMB_PMI_CODE_START 0x14
56#define RMB_PMI_CODE_LENGTH 0x18
57#define RMB_PROTOCOL_VERSION 0x1C
58#define RMB_MBA_DEBUG_INFORMATION 0x20
59
60#define POLL_INTERVAL_US 50
61
62#define CMD_META_DATA_READY 0x1
63#define CMD_LOAD_READY 0x2
64#define CMD_PILFAIL_NFY_MBA 0xffffdead
65
66#define STATUS_META_DATA_AUTH_SUCCESS 0x3
67#define STATUS_AUTH_COMPLETE 0x4
68#define STATUS_MBA_UNLOCKED 0x6
69
70/* External BHS */
71#define EXTERNAL_BHS_ON BIT(0)
72#define EXTERNAL_BHS_STATUS BIT(4)
73#define BHS_TIMEOUT_US 50
74
75#define MSS_RESTART_PARAM_ID 0x2
76#define MSS_RESTART_ID 0xA
77
78#define MSS_MAGIC 0XAABADEAD
Kyle Yan0e660cb2017-05-01 11:13:14 -070079
80#define MSS_PDC_OFFSET 8
81#define MSS_PDC_MASK BIT(MSS_PDC_OFFSET)
82
Kyle Yane45fa022016-08-29 11:40:26 -070083enum scm_cmd {
84 PAS_MEM_SETUP_CMD = 2,
85};
86
87static int pbl_mba_boot_timeout_ms = 1000;
88module_param(pbl_mba_boot_timeout_ms, int, 0644);
89
90static int modem_auth_timeout_ms = 10000;
91module_param(modem_auth_timeout_ms, int, 0644);
92
93/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
94static uint modem_trigger_panic;
95module_param(modem_trigger_panic, uint, 0644);
96
97/* To set the modem debug cookie in DBG_CFG register for debugging */
98static uint modem_dbg_cfg;
99module_param(modem_dbg_cfg, uint, 0644);
100
101static void modem_log_rmb_regs(void __iomem *base)
102{
103 pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
104 pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
105 pr_err("RMB_MBA_COMMAND: %08x\n",
106 readl_relaxed(base + RMB_MBA_COMMAND));
107 pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
108 pr_err("RMB_PMI_META_DATA: %08x\n",
109 readl_relaxed(base + RMB_PMI_META_DATA));
110 pr_err("RMB_PMI_CODE_START: %08x\n",
111 readl_relaxed(base + RMB_PMI_CODE_START));
112 pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
113 readl_relaxed(base + RMB_PMI_CODE_LENGTH));
114 pr_err("RMB_PROTOCOL_VERSION: %08x\n",
115 readl_relaxed(base + RMB_PROTOCOL_VERSION));
116 pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
117 readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
118
119 if (modem_trigger_panic == MSS_MAGIC)
120 panic("%s: System ramdump is needed!!!\n", __func__);
121}
122
123static int pil_mss_power_up(struct q6v5_data *drv)
124{
125 int ret = 0;
126 u32 regval;
127
128 if (drv->vreg) {
129 ret = regulator_enable(drv->vreg);
130 if (ret)
131 dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
132 ret);
133 }
134
135 if (drv->cxrail_bhs) {
136 regval = readl_relaxed(drv->cxrail_bhs);
137 regval |= EXTERNAL_BHS_ON;
138 writel_relaxed(regval, drv->cxrail_bhs);
139
140 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
141 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
142 }
143
144 return ret;
145}
146
147static int pil_mss_power_down(struct q6v5_data *drv)
148{
149 u32 regval;
150
151 if (drv->cxrail_bhs) {
152 regval = readl_relaxed(drv->cxrail_bhs);
153 regval &= ~EXTERNAL_BHS_ON;
154 writel_relaxed(regval, drv->cxrail_bhs);
155 }
156
157 if (drv->vreg)
158 return regulator_disable(drv->vreg);
159
160 return 0;
161}
162
163static int pil_mss_enable_clks(struct q6v5_data *drv)
164{
165 int ret;
166
167 ret = clk_prepare_enable(drv->ahb_clk);
168 if (ret)
169 goto err_ahb_clk;
170 ret = clk_prepare_enable(drv->axi_clk);
171 if (ret)
172 goto err_axi_clk;
173 ret = clk_prepare_enable(drv->rom_clk);
174 if (ret)
175 goto err_rom_clk;
176 ret = clk_prepare_enable(drv->gpll0_mss_clk);
177 if (ret)
178 goto err_gpll0_mss_clk;
179 ret = clk_prepare_enable(drv->snoc_axi_clk);
180 if (ret)
181 goto err_snoc_axi_clk;
182 ret = clk_prepare_enable(drv->mnoc_axi_clk);
183 if (ret)
184 goto err_mnoc_axi_clk;
185 return 0;
186err_mnoc_axi_clk:
187 clk_disable_unprepare(drv->mnoc_axi_clk);
188err_snoc_axi_clk:
189 clk_disable_unprepare(drv->snoc_axi_clk);
190err_gpll0_mss_clk:
191 clk_disable_unprepare(drv->gpll0_mss_clk);
192err_rom_clk:
193 clk_disable_unprepare(drv->rom_clk);
194err_axi_clk:
195 clk_disable_unprepare(drv->axi_clk);
196err_ahb_clk:
197 clk_disable_unprepare(drv->ahb_clk);
198 return ret;
199}
200
201static void pil_mss_disable_clks(struct q6v5_data *drv)
202{
203 clk_disable_unprepare(drv->mnoc_axi_clk);
204 clk_disable_unprepare(drv->snoc_axi_clk);
205 clk_disable_unprepare(drv->gpll0_mss_clk);
206 clk_disable_unprepare(drv->rom_clk);
207 clk_disable_unprepare(drv->axi_clk);
208 if (!drv->ahb_clk_vote)
209 clk_disable_unprepare(drv->ahb_clk);
210}
211
Kyle Yan0e660cb2017-05-01 11:13:14 -0700212static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
213{
214 u32 val = 0;
215
216 if (drv->pdc_sync) {
217 val = readl_relaxed(drv->pdc_sync);
218 if (pdc_sync)
219 val |= MSS_PDC_MASK;
220 else
221 val &= ~MSS_PDC_MASK;
222 writel_relaxed(val, drv->pdc_sync);
223 /* Ensure PDC is written before next write */
224 wmb();
225 udelay(2);
226 }
227}
228
Kyle Yan05f6e102017-04-25 18:30:04 -0700229static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
230{
231 if (drv->alt_reset) {
232 writel_relaxed(val, drv->alt_reset);
233 /* Ensure alt reset is written before restart reg */
234 wmb();
235 udelay(2);
236 }
237}
238
Kyle Yane45fa022016-08-29 11:40:26 -0700239static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
240{
241 int ret = 0;
242 int scm_ret = 0;
243 struct scm_desc desc = {0};
244
245 desc.args[0] = mss_restart;
246 desc.args[1] = 0;
247 desc.arginfo = SCM_ARGS(2);
248
249 if (drv->restart_reg && !drv->restart_reg_sec) {
250 writel_relaxed(mss_restart, drv->restart_reg);
251 mb();
252 udelay(2);
253 } else if (drv->restart_reg_sec) {
254 if (!is_scm_armv8()) {
255 ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
256 &mss_restart, sizeof(mss_restart),
257 &scm_ret, sizeof(scm_ret));
258 } else {
259 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
260 MSS_RESTART_ID), &desc);
261 scm_ret = desc.ret[0];
262 }
263 if (ret || scm_ret)
264 pr_err("Secure MSS restart failed\n");
265 }
266
267 return ret;
268}
269
Kyle Yan05f6e102017-04-25 18:30:04 -0700270static int pil_mss_assert_resets(struct q6v5_data *drv)
271{
272 int ret = 0;
273
274 pil_mss_pdc_sync(drv, 1);
275 pil_mss_alt_reset(drv, 1);
276 ret = pil_mss_restart_reg(drv, true);
277
278 return ret;
279}
280
281static int pil_mss_deassert_resets(struct q6v5_data *drv)
282{
283 int ret = 0;
284
285 ret = pil_mss_restart_reg(drv, 0);
286 if (ret)
287 return ret;
288 /* Wait 6 32kHz sleep cycles for reset */
289 udelay(200);
290 pil_mss_alt_reset(drv, 0);
291 pil_mss_pdc_sync(drv, false);
292
293 return ret;
294}
295
Kyle Yane45fa022016-08-29 11:40:26 -0700296static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
297{
298 struct device *dev = drv->desc.dev;
299 int ret;
300 u32 status;
301 u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
302
303 /* Wait for PBL completion. */
304 ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
305 status != 0, POLL_INTERVAL_US, val);
306 if (ret) {
307 dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
308 return ret;
309 }
310 if (status != STATUS_PBL_SUCCESS) {
311 dev_err(dev, "PBL returned unexpected status %d\n", status);
312 return -EINVAL;
313 }
314
315 /* Wait for MBA completion. */
316 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
317 status != 0, POLL_INTERVAL_US, val);
318 if (ret) {
319 dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
320 return ret;
321 }
322 if (status != STATUS_XPU_UNLOCKED &&
323 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
324 dev_err(dev, "MBA returned unexpected status %d\n", status);
325 return -EINVAL;
326 }
327
328 return 0;
329}
330
331int pil_mss_shutdown(struct pil_desc *pil)
332{
333 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
334 int ret = 0;
335
336 if (drv->axi_halt_base) {
337 pil_q6v5_halt_axi_port(pil,
338 drv->axi_halt_base + MSS_Q6_HALT_BASE);
339 pil_q6v5_halt_axi_port(pil,
340 drv->axi_halt_base + MSS_MODEM_HALT_BASE);
341 pil_q6v5_halt_axi_port(pil,
342 drv->axi_halt_base + MSS_NC_HALT_BASE);
343 }
344
345 if (drv->axi_halt_q6)
346 pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
347 if (drv->axi_halt_mss)
348 pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
349 if (drv->axi_halt_nc)
350 pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
351
352 /*
353 * Software workaround to avoid high MX current during LPASS/MSS
354 * restart.
355 */
356 if (drv->mx_spike_wa && drv->ahb_clk_vote) {
357 ret = clk_prepare_enable(drv->ahb_clk);
358 if (!ret)
359 assert_clamps(pil);
360 else
361 dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
362 ret);
363 }
364
Kyle Yan05f6e102017-04-25 18:30:04 -0700365 pil_mss_assert_resets(drv);
366 /* Wait 6 32kHz sleep cycles for reset */
367 udelay(200);
368 ret = pil_mss_deassert_resets(drv);
Kyle Yane45fa022016-08-29 11:40:26 -0700369
370 if (drv->is_booted) {
371 pil_mss_disable_clks(drv);
372 pil_mss_power_down(drv);
373 drv->is_booted = false;
374 }
375
376 return ret;
377}
378
379int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
380{
381 struct modem_data *drv = dev_get_drvdata(pil->dev);
382 struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
383 int ret = 0;
Puja Gupta6caaa232016-05-04 12:03:31 -0700384 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700385 s32 status;
386 u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
387
388 if (err_path) {
389 writel_relaxed(CMD_PILFAIL_NFY_MBA,
390 drv->rmb_base + RMB_MBA_COMMAND);
391 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
392 status == STATUS_MBA_UNLOCKED || status < 0,
393 POLL_INTERVAL_US, val);
394 if (ret)
395 dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
396 ret);
397 else if (status < 0)
398 dev_err(pil->dev, "MBA unlock returned err status: %d\n",
399 status);
400 }
401
402 ret = pil_mss_shutdown(pil);
403
404 if (q6_drv->ahb_clk_vote)
405 clk_disable_unprepare(q6_drv->ahb_clk);
406
407 /* In case of any failure where reclaiming MBA and DP memory
408 * could not happen, free the memory here
409 */
410 if (drv->q6->mba_dp_virt) {
411 if (pil->subsys_vmid > 0)
412 pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
413 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700414 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
Kyle Yane45fa022016-08-29 11:40:26 -0700415 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
416 drv->attrs_dma);
417 drv->q6->mba_dp_virt = NULL;
418 }
419
420 return ret;
421}
422
423int pil_mss_deinit_image(struct pil_desc *pil)
424{
425 return __pil_mss_deinit_image(pil, true);
426}
427
428int pil_mss_make_proxy_votes(struct pil_desc *pil)
429{
430 int ret;
431 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
432 int uv = 0;
433
434 ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
435 if (ret) {
436 dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
437 return ret;
438 }
439
440 ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
441 if (ret) {
442 dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
443 ret);
444 return ret;
445 }
446
447 ret = regulator_enable(drv->vreg_mx);
448 if (ret) {
449 dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
450 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
451 return ret;
452 }
453
454 ret = pil_q6v5_make_proxy_votes(pil);
455 if (ret) {
456 regulator_disable(drv->vreg_mx);
457 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
458 }
459
460 return ret;
461}
462
463void pil_mss_remove_proxy_votes(struct pil_desc *pil)
464{
465 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
466
467 pil_q6v5_remove_proxy_votes(pil);
468 regulator_disable(drv->vreg_mx);
469 regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
470}
471
472static int pil_mss_mem_setup(struct pil_desc *pil,
473 phys_addr_t addr, size_t size)
474{
475 struct modem_data *md = dev_get_drvdata(pil->dev);
476
477 struct pas_init_image_req {
478 u32 proc;
479 u32 start_addr;
480 u32 len;
481 } request;
482 u32 scm_ret = 0;
483 int ret;
484 struct scm_desc desc = {0};
485
486 if (!md->subsys_desc.pil_mss_memsetup)
487 return 0;
488
489 request.proc = md->pas_id;
490 request.start_addr = addr;
491 request.len = size;
492
493 if (!is_scm_armv8()) {
494 ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
495 sizeof(request), &scm_ret, sizeof(scm_ret));
496 } else {
497 desc.args[0] = md->pas_id;
498 desc.args[1] = addr;
499 desc.args[2] = size;
500 desc.arginfo = SCM_ARGS(3);
501 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
502 &desc);
503 scm_ret = desc.ret[0];
504 }
505 if (ret)
506 return ret;
507 return scm_ret;
508}
509
510static int pil_mss_reset(struct pil_desc *pil)
511{
512 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
513 phys_addr_t start_addr = pil_get_entry_addr(pil);
Kyle Yan1ad03be2017-04-28 16:34:11 -0700514 u32 debug_val;
Kyle Yane45fa022016-08-29 11:40:26 -0700515 int ret;
516
Gaurav Kohli2da45012017-05-08 15:21:43 +0530517 trace_pil_func(__func__);
Kyle Yane45fa022016-08-29 11:40:26 -0700518 if (drv->mba_dp_phys)
519 start_addr = drv->mba_dp_phys;
520
521 /*
522 * Bring subsystem out of reset and enable required
523 * regulators and clocks.
524 */
525 ret = pil_mss_power_up(drv);
526 if (ret)
527 goto err_power;
528
Kyle Yane45fa022016-08-29 11:40:26 -0700529 ret = pil_mss_enable_clks(drv);
530 if (ret)
531 goto err_clks;
532
Kyle Yan1ad03be2017-04-28 16:34:11 -0700533 /* Save state of modem debug register before full reset */
534 debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
535
Kyle Yan05f6e102017-04-25 18:30:04 -0700536 /* Assert reset to subsystem */
537 pil_mss_assert_resets(drv);
538 /* Wait 6 32kHz sleep cycles for reset */
539 udelay(200);
540 ret = pil_mss_deassert_resets(drv);
541 if (ret)
542 goto err_restart;
543
Kyle Yan1ad03be2017-04-28 16:34:11 -0700544 writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
Kyle Yane45fa022016-08-29 11:40:26 -0700545 if (modem_dbg_cfg)
546 writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
547
548 /* Program Image Address */
549 if (drv->self_auth) {
550 writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
551 /*
552 * Ensure write to RMB base occurs before reset
553 * is released.
554 */
555 mb();
556 } else {
557 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
558 drv->reg_base + QDSP6SS_RST_EVB);
559 }
560
561 /* Program DP Address */
562 if (drv->dp_size) {
563 writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
564 RMB_PMI_CODE_START);
565 writel_relaxed(drv->dp_size, drv->rmb_base +
566 RMB_PMI_CODE_LENGTH);
567 } else {
568 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
569 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
570 }
571 /* Make sure RMB regs are written before bringing modem out of reset */
572 mb();
573
574 ret = pil_q6v5_reset(pil);
575 if (ret)
576 goto err_q6v5_reset;
577
578 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
579 if (drv->self_auth) {
580 ret = pil_msa_wait_for_mba_ready(drv);
581 if (ret)
582 goto err_q6v5_reset;
583 }
584
585 dev_info(pil->dev, "MBA boot done\n");
586 drv->is_booted = true;
587
588 return 0;
589
590err_q6v5_reset:
591 modem_log_rmb_regs(drv->rmb_base);
Kyle Yan05f6e102017-04-25 18:30:04 -0700592err_restart:
Kyle Yane45fa022016-08-29 11:40:26 -0700593 pil_mss_disable_clks(drv);
594 if (drv->ahb_clk_vote)
595 clk_disable_unprepare(drv->ahb_clk);
596err_clks:
Kyle Yane45fa022016-08-29 11:40:26 -0700597 pil_mss_power_down(drv);
598err_power:
599 return ret;
600}
601
602int pil_mss_reset_load_mba(struct pil_desc *pil)
603{
604 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
605 struct modem_data *md = dev_get_drvdata(pil->dev);
606 const struct firmware *fw, *dp_fw = NULL;
607 char fw_name_legacy[10] = "mba.b00";
608 char fw_name[10] = "mba.mbn";
609 char *dp_name = "msadp";
610 char *fw_name_p;
611 void *mba_dp_virt;
612 dma_addr_t mba_dp_phys, mba_dp_phys_end;
613 int ret, count;
614 const u8 *data;
Puja Gupta6caaa232016-05-04 12:03:31 -0700615 struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700616
Gaurav Kohli2da45012017-05-08 15:21:43 +0530617 trace_pil_func(__func__);
Kyle Yane45fa022016-08-29 11:40:26 -0700618 fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
619 ret = request_firmware(&fw, fw_name_p, pil->dev);
620 if (ret) {
621 dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
622 fw_name_p, ret);
623 return ret;
624 }
625
626 data = fw ? fw->data : NULL;
627 if (!data) {
628 dev_err(pil->dev, "MBA data is NULL\n");
629 ret = -ENOMEM;
630 goto err_invalid_fw;
631 }
632
633 drv->mba_dp_size = SZ_1M;
634
Puja Gupta6caaa232016-05-04 12:03:31 -0700635 arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
Kyle Yane45fa022016-08-29 11:40:26 -0700636
Puja Gupta6caaa232016-05-04 12:03:31 -0700637 dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
638
Kyle Yane45fa022016-08-29 11:40:26 -0700639 md->attrs_dma = 0;
Puja Gupta6caaa232016-05-04 12:03:31 -0700640 md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
Kyle Yane45fa022016-08-29 11:40:26 -0700641 md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
642
643 ret = request_firmware(&dp_fw, dp_name, pil->dev);
644 if (ret) {
645 dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
646 dp_name);
647 } else {
648 if (!dp_fw || !dp_fw->data) {
649 dev_err(pil->dev, "Invalid DP firmware\n");
650 ret = -ENOMEM;
651 goto err_invalid_fw;
652 }
653 drv->dp_size = dp_fw->size;
654 drv->mba_dp_size += drv->dp_size;
Kyle Yanf00cb082017-05-11 11:15:31 -0700655 drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
Kyle Yane45fa022016-08-29 11:40:26 -0700656 }
657
Puja Gupta6caaa232016-05-04 12:03:31 -0700658 mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
659 GFP_KERNEL, md->attrs_dma);
Kyle Yane45fa022016-08-29 11:40:26 -0700660 if (!mba_dp_virt) {
Puja Gupta6caaa232016-05-04 12:03:31 -0700661 dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
662 __func__, drv->mba_dp_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700663 ret = -ENOMEM;
664 goto err_invalid_fw;
665 }
666
667 /* Make sure there are no mappings in PKMAP and fixmap */
668 kmap_flush_unused();
669 kmap_atomic_flush_unused();
670
671 drv->mba_dp_phys = mba_dp_phys;
672 drv->mba_dp_virt = mba_dp_virt;
673 mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
674
675 dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
676 &mba_dp_phys, &mba_dp_phys_end);
677
678 /* Load the MBA image into memory */
679 count = fw->size;
Kishor PK577caa32017-03-30 14:23:37 +0530680 if (count <= SZ_1M) {
681 /* Ensures memcpy is done for max 1MB fw size */
682 memcpy(mba_dp_virt, data, count);
683 } else {
684 dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
685 __func__);
686 ret = -EINVAL;
687 goto err_mba_data;
688 }
Kyle Yane45fa022016-08-29 11:40:26 -0700689 /* Ensure memcpy of the MBA memory is done before loading the DP */
690 wmb();
691
692 /* Load the DP image into memory */
693 if (drv->mba_dp_size > SZ_1M) {
694 memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
695 /* Ensure memcpy is done before powering up modem */
696 wmb();
697 }
698
699 if (pil->subsys_vmid > 0) {
700 ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
701 drv->mba_dp_size);
702 if (ret) {
703 pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
704 ret);
705 goto err_mba_data;
706 }
707 }
708
709 ret = pil_mss_reset(pil);
710 if (ret) {
711 dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
712 goto err_mss_reset;
713 }
714
715 if (dp_fw)
716 release_firmware(dp_fw);
717 release_firmware(fw);
718
719 return 0;
720
721err_mss_reset:
722 if (pil->subsys_vmid > 0)
723 pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
724 drv->mba_dp_size);
725err_mba_data:
Puja Gupta6caaa232016-05-04 12:03:31 -0700726 dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
Kyle Yane45fa022016-08-29 11:40:26 -0700727 drv->mba_dp_phys, md->attrs_dma);
728err_invalid_fw:
729 if (dp_fw)
730 release_firmware(dp_fw);
731 release_firmware(fw);
732 drv->mba_dp_virt = NULL;
733 return ret;
734}
735
736static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
737 size_t size)
738{
739 struct modem_data *drv = dev_get_drvdata(pil->dev);
740 void *mdata_virt;
741 dma_addr_t mdata_phys;
742 s32 status;
743 int ret;
744 u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
Puja Gupta6caaa232016-05-04 12:03:31 -0700745 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700746 unsigned long attrs = 0;
747
Gaurav Kohli2da45012017-05-08 15:21:43 +0530748 trace_pil_func(__func__);
Puja Gupta6caaa232016-05-04 12:03:31 -0700749 dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
750 attrs |= DMA_ATTR_SKIP_ZEROING;
Kyle Yane45fa022016-08-29 11:40:26 -0700751 attrs |= DMA_ATTR_STRONGLY_ORDERED;
752 /* Make metadata physically contiguous and 4K aligned. */
Puja Gupta6caaa232016-05-04 12:03:31 -0700753 mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
Kyle Yane45fa022016-08-29 11:40:26 -0700754 GFP_KERNEL, attrs);
755 if (!mdata_virt) {
756 dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
757 ret = -ENOMEM;
758 goto fail;
759 }
760 memcpy(mdata_virt, metadata, size);
761 /* wmb() ensures copy completes prior to starting authentication. */
762 wmb();
763
764 if (pil->subsys_vmid > 0) {
765 ret = pil_assign_mem_to_subsys(pil, mdata_phys,
766 ALIGN(size, SZ_4K));
767 if (ret) {
768 pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
769 ret);
Puja Gupta6caaa232016-05-04 12:03:31 -0700770 dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
771 attrs);
Kyle Yane45fa022016-08-29 11:40:26 -0700772 goto fail;
773 }
774 }
775
776 /* Initialize length counter to 0 */
777 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
778
779 /* Pass address of meta-data to the MBA and perform authentication */
780 writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
781 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
782 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
783 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
784 POLL_INTERVAL_US, val);
785 if (ret) {
786 dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
787 ret);
788 } else if (status < 0) {
789 dev_err(pil->dev, "MBA returned error %d for headers\n",
790 status);
791 ret = -EINVAL;
792 }
793
794 if (pil->subsys_vmid > 0)
795 pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
796
Puja Gupta6caaa232016-05-04 12:03:31 -0700797 dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
Kyle Yane45fa022016-08-29 11:40:26 -0700798
799 if (!ret)
800 return ret;
801
802fail:
803 modem_log_rmb_regs(drv->rmb_base);
804 if (drv->q6) {
805 pil_mss_shutdown(pil);
806 if (pil->subsys_vmid > 0)
807 pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
808 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700809 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
Kyle Yane45fa022016-08-29 11:40:26 -0700810 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
811 drv->attrs_dma);
812 drv->q6->mba_dp_virt = NULL;
813
814 }
815 return ret;
816}
817
818static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
819 const u8 *metadata, size_t size)
820{
821 int ret;
822
823 ret = pil_mss_reset_load_mba(pil);
824 if (ret)
825 return ret;
826
827 return pil_msa_auth_modem_mdt(pil, metadata, size);
828}
829
830static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
831 size_t size)
832{
833 struct modem_data *drv = dev_get_drvdata(pil->dev);
834 s32 status;
835 u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
836
837 /* Begin image authentication */
838 if (img_length == 0) {
839 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
840 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
841 }
842 /* Increment length counter */
843 img_length += size;
844 writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
845
846 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
847 if (status < 0) {
848 dev_err(pil->dev, "MBA returned error %d\n", status);
849 modem_log_rmb_regs(drv->rmb_base);
850 return -EINVAL;
851 }
852
853 return 0;
854}
855
856static int pil_msa_mba_auth(struct pil_desc *pil)
857{
858 struct modem_data *drv = dev_get_drvdata(pil->dev);
859 struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
860 int ret;
Puja Gupta6caaa232016-05-04 12:03:31 -0700861 struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
Kyle Yane45fa022016-08-29 11:40:26 -0700862 s32 status;
863 u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
864
865 /* Wait for all segments to be authenticated or an error to occur */
866 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
867 status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
868 if (ret) {
869 dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
870 ret);
871 } else if (status < 0) {
872 dev_err(pil->dev, "MBA returned error %d for image\n", status);
873 ret = -EINVAL;
874 }
875
876 if (drv->q6) {
877 if (drv->q6->mba_dp_virt) {
878 /* Reclaim MBA and DP (if allocated) memory. */
879 if (pil->subsys_vmid > 0)
880 pil_assign_mem_to_linux(pil,
881 drv->q6->mba_dp_phys,
882 drv->q6->mba_dp_size);
Puja Gupta6caaa232016-05-04 12:03:31 -0700883 dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
884 drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
885 drv->attrs_dma);
Kyle Yane45fa022016-08-29 11:40:26 -0700886
887 drv->q6->mba_dp_virt = NULL;
888 }
889 }
890 if (ret)
891 modem_log_rmb_regs(drv->rmb_base);
892 if (q6_drv->ahb_clk_vote)
893 clk_disable_unprepare(q6_drv->ahb_clk);
894
895 return ret;
896}
897
898/*
899 * To be used only if self-auth is disabled, or if the
900 * MBA image is loaded as segments and not in init_image.
901 */
902struct pil_reset_ops pil_msa_mss_ops = {
903 .proxy_vote = pil_mss_make_proxy_votes,
904 .proxy_unvote = pil_mss_remove_proxy_votes,
905 .auth_and_reset = pil_mss_reset,
906 .shutdown = pil_mss_shutdown,
907};
908
909/*
910 * To be used if self-auth is enabled and the MBA is to be loaded
911 * in init_image and the modem headers are also to be authenticated
912 * in init_image. Modem segments authenticated in auth_and_reset.
913 */
914struct pil_reset_ops pil_msa_mss_ops_selfauth = {
915 .init_image = pil_msa_mss_reset_mba_load_auth_mdt,
916 .proxy_vote = pil_mss_make_proxy_votes,
917 .proxy_unvote = pil_mss_remove_proxy_votes,
918 .mem_setup = pil_mss_mem_setup,
919 .verify_blob = pil_msa_mba_verify_blob,
920 .auth_and_reset = pil_msa_mba_auth,
921 .deinit_image = pil_mss_deinit_image,
922 .shutdown = pil_mss_shutdown,
923};
924
925/*
926 * To be used if the modem headers are to be authenticated
927 * in init_image, and the modem segments in auth_and_reset.
928 */
929struct pil_reset_ops pil_msa_femto_mba_ops = {
930 .init_image = pil_msa_auth_modem_mdt,
931 .verify_blob = pil_msa_mba_verify_blob,
932 .auth_and_reset = pil_msa_mba_auth,
933};