blob: 0727d31d68708f816a8da22c34ab24ac0e2bbe70 [file] [log] [blame]
Matt Wagantall4e2599e2012-03-21 22:31:35 -07001/*
Matt Wagantalla72d03d2013-02-26 21:13:14 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantall4e2599e2012-03-21 22:31:35 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070020#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/of.h>
25#include <linux/regulator/consumer.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070026#include <linux/interrupt.h>
Seemanta Dutta6e58f542013-03-04 19:28:16 -080027#include <linux/of_gpio.h>
Matt Wagantall724b2bb2013-03-18 14:54:06 -070028#include <linux/dma-mapping.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070029
Stephen Boyd3da4fd02012-07-06 10:00:12 -070030#include <mach/subsystem_restart.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070031#include <mach/clk.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070032#include <mach/msm_smsm.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070033#include <mach/ramdump.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070034
35#include "peripheral-loader.h"
36#include "pil-q6v5.h"
Vikram Mulukutla896d0582012-10-17 16:57:46 -070037#include "sysmon.h"
Matt Wagantall4e2599e2012-03-21 22:31:35 -070038
39/* Q6 Register Offsets */
40#define QDSP6SS_RST_EVB 0x010
41
42/* AXI Halting Registers */
43#define MSS_Q6_HALT_BASE 0x180
44#define MSS_MODEM_HALT_BASE 0x200
45#define MSS_NC_HALT_BASE 0x280
46
47/* RMB Status Register Values */
48#define STATUS_PBL_SUCCESS 0x1
49#define STATUS_XPU_UNLOCKED 0x1
50#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE 0x00
54#define RMB_PBL_STATUS 0x04
Stephen Boyd3da4fd02012-07-06 10:00:12 -070055#define RMB_MBA_COMMAND 0x08
Matt Wagantall4e2599e2012-03-21 22:31:35 -070056#define RMB_MBA_STATUS 0x0C
Stephen Boyd3da4fd02012-07-06 10:00:12 -070057#define RMB_PMI_META_DATA 0x10
58#define RMB_PMI_CODE_START 0x14
59#define RMB_PMI_CODE_LENGTH 0x18
Matt Wagantall4e2599e2012-03-21 22:31:35 -070060
Matt Wagantall70315fb2012-12-03 16:33:28 -080061#define VDD_MSS_UV 1050000
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -070062#define MAX_VDD_MSS_UV 1150000
Patrick Daly068ea8e2013-03-04 19:52:40 -080063#define MAX_VDD_MX_UV 1150000
Matt Wagantall70315fb2012-12-03 16:33:28 -080064
Matt Wagantall4e2599e2012-03-21 22:31:35 -070065#define PROXY_TIMEOUT_MS 10000
66#define POLL_INTERVAL_US 50
67
Stephen Boyd3da4fd02012-07-06 10:00:12 -070068#define CMD_META_DATA_READY 0x1
69#define CMD_LOAD_READY 0x2
70
71#define STATUS_META_DATA_AUTH_SUCCESS 0x3
72#define STATUS_AUTH_COMPLETE 0x4
73
74#define MAX_SSR_REASON_LEN 81U
75
Patrick Daly11ca6af2013-03-03 17:07:28 -080076/* External BHS */
77#define EXTERNAL_BHS_ON BIT(0)
78#define EXTERNAL_BHS_STATUS BIT(4)
79#define BHS_TIMEOUT_US 50
80
Stephen Boyd3da4fd02012-07-06 10:00:12 -070081struct mba_data {
Stephen Boyd3da4fd02012-07-06 10:00:12 -070082 void __iomem *rmb_base;
83 void __iomem *io_clamp_reg;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070084 struct pil_desc desc;
85 struct subsys_device *subsys;
86 struct subsys_desc subsys_desc;
Vikram Mulukutla896d0582012-10-17 16:57:46 -070087 void *adsp_state_notifier;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070088 u32 img_length;
89 struct q6v5_data *q6;
Patrick Dalyb830a3f2013-03-11 14:21:34 -070090 bool self_auth;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070091 void *ramdump_dev;
92 void *smem_ramdump_dev;
93 bool crash_shutdown;
94 bool ignore_errors;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -080095 int is_loadable;
Seemanta Dutta6e58f542013-03-04 19:28:16 -080096 int err_fatal_irq;
97 int force_stop_gpio;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070098};
99
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700100static int pbl_mba_boot_timeout_ms = 100;
101module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
102
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700103static int modem_auth_timeout_ms = 10000;
104module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
105
Stephen Boyd3826cd42012-07-05 17:37:53 -0700106static int pil_mss_power_up(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700107{
108 int ret;
Stephen Boyd3826cd42012-07-05 17:37:53 -0700109 struct device *dev = drv->desc.dev;
Patrick Daly11ca6af2013-03-03 17:07:28 -0800110 u32 regval;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700111
112 ret = regulator_enable(drv->vreg);
113 if (ret)
Matt Wagantall70315fb2012-12-03 16:33:28 -0800114 dev_err(dev, "Failed to enable modem regulator.\n");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700115
Patrick Daly11ca6af2013-03-03 17:07:28 -0800116 if (drv->cxrail_bhs) {
117 regval = readl_relaxed(drv->cxrail_bhs);
118 regval |= EXTERNAL_BHS_ON;
119 writel_relaxed(regval, drv->cxrail_bhs);
120
121 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
122 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
123 }
124
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700125 return ret;
126}
127
Stephen Boyd3826cd42012-07-05 17:37:53 -0700128static int pil_mss_power_down(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700129{
Patrick Daly11ca6af2013-03-03 17:07:28 -0800130 u32 regval;
131
132 if (drv->cxrail_bhs) {
133 regval = readl_relaxed(drv->cxrail_bhs);
134 regval &= ~EXTERNAL_BHS_ON;
135 writel_relaxed(regval, drv->cxrail_bhs);
136 }
137
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700138 return regulator_disable(drv->vreg);
139}
140
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700141static int pil_mss_enable_clks(struct q6v5_data *drv)
142{
143 int ret;
144
145 ret = clk_prepare_enable(drv->ahb_clk);
146 if (ret)
147 goto err_ahb_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700148 ret = clk_prepare_enable(drv->axi_clk);
149 if (ret)
150 goto err_axi_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700151 ret = clk_prepare_enable(drv->rom_clk);
152 if (ret)
153 goto err_rom_clk;
154
155 return 0;
156
157err_rom_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700158 clk_disable_unprepare(drv->axi_clk);
159err_axi_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700160 clk_disable_unprepare(drv->ahb_clk);
161err_ahb_clk:
162 return ret;
163}
164
165static void pil_mss_disable_clks(struct q6v5_data *drv)
166{
167 clk_disable_unprepare(drv->rom_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700168 clk_disable_unprepare(drv->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700169 clk_disable_unprepare(drv->ahb_clk);
170}
171
Stephen Boyd3826cd42012-07-05 17:37:53 -0700172static int wait_for_mba_ready(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700173{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700174 struct device *dev = drv->desc.dev;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700175 struct mba_data *mba = platform_get_drvdata(to_platform_device(dev));
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700176 int ret;
177 u32 status;
178
179 /* Wait for PBL completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700180 ret = readl_poll_timeout(mba->rmb_base + RMB_PBL_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700181 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700182 if (ret) {
183 dev_err(dev, "PBL boot timed out\n");
184 return ret;
185 }
186 if (status != STATUS_PBL_SUCCESS) {
187 dev_err(dev, "PBL returned unexpected status %d\n", status);
188 return -EINVAL;
189 }
190
191 /* Wait for MBA completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700192 ret = readl_poll_timeout(mba->rmb_base + RMB_MBA_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700193 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700194 if (ret) {
195 dev_err(dev, "MBA boot timed out\n");
196 return ret;
197 }
198 if (status != STATUS_XPU_UNLOCKED &&
199 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
200 dev_err(dev, "MBA returned unexpected status %d\n", status);
201 return -EINVAL;
202 }
203
204 return 0;
205}
206
207static int pil_mss_shutdown(struct pil_desc *pil)
208{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700209 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700210
211 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
212 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
213 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
214
215 /*
216 * If the shutdown function is called before the reset function, clocks
217 * and power will not be enabled yet. Enable them here so that register
218 * writes performed during the shutdown succeed.
219 */
220 if (drv->is_booted == false) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700221 pil_mss_power_up(drv);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700222 pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700223 }
224 pil_q6v5_shutdown(pil);
225
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700226 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700227
228 writel_relaxed(1, drv->restart_reg);
229
Patrick Daly11ca6af2013-03-03 17:07:28 -0800230 /*
231 * access to the cx_rail_bhs is restricted until after the gcc_mss
232 * reset is asserted once the PBL starts executing.
233 */
234 pil_mss_power_down(drv);
235
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700236 drv->is_booted = false;
237
238 return 0;
239}
240
241static int pil_mss_reset(struct pil_desc *pil)
242{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700243 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700244 struct platform_device *pdev = to_platform_device(pil->dev);
245 struct mba_data *mba = platform_get_drvdata(pdev);
Stephen Boyd3030c252012-08-08 17:24:05 -0700246 unsigned long start_addr = pil_get_entry_addr(pil);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700247 int ret;
248
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700249 /*
250 * Bring subsystem out of reset and enable required
251 * regulators and clocks.
252 */
Stephen Boyd3826cd42012-07-05 17:37:53 -0700253 ret = pil_mss_power_up(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700254 if (ret)
255 goto err_power;
256
Patrick Daly11ca6af2013-03-03 17:07:28 -0800257 /* Deassert reset to subsystem and wait for propagation */
258 writel_relaxed(0, drv->restart_reg);
259 mb();
260 udelay(2);
261
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700262 ret = pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700263 if (ret)
264 goto err_clks;
265
266 /* Program Image Address */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700267 if (mba->self_auth) {
Stephen Boyd3030c252012-08-08 17:24:05 -0700268 writel_relaxed(start_addr, mba->rmb_base + RMB_MBA_IMAGE);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700269 /* Ensure write to RMB base occurs before reset is released. */
270 mb();
271 } else {
Stephen Boyd3030c252012-08-08 17:24:05 -0700272 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700273 drv->reg_base + QDSP6SS_RST_EVB);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700274 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700275
276 ret = pil_q6v5_reset(pil);
277 if (ret)
278 goto err_q6v5_reset;
279
280 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700281 if (mba->self_auth) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700282 ret = wait_for_mba_ready(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700283 if (ret)
284 goto err_auth;
285 }
286
287 drv->is_booted = true;
288
289 return 0;
290
291err_auth:
292 pil_q6v5_shutdown(pil);
293err_q6v5_reset:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700294 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700295err_clks:
Stephen Boyd3826cd42012-07-05 17:37:53 -0700296 pil_mss_power_down(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700297err_power:
298 return ret;
299}
300
Matt Wagantall70315fb2012-12-03 16:33:28 -0800301static int pil_q6v5_mss_make_proxy_votes(struct pil_desc *pil)
302{
303 int ret;
304 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
305
306 ret = regulator_set_voltage(drv->vreg_mx, VDD_MSS_UV, MAX_VDD_MX_UV);
307 if (ret) {
308 dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
309 return ret;
310 }
311
312 ret = regulator_enable(drv->vreg_mx);
313 if (ret) {
314 dev_err(pil->dev, "Failed to enable vreg_mx\n");
315 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
316 return ret;
317 }
318
319 ret = pil_q6v5_make_proxy_votes(pil);
320 if (ret) {
321 regulator_disable(drv->vreg_mx);
322 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
323 }
324
325 return ret;
326}
327
328static void pil_q6v5_mss_remove_proxy_votes(struct pil_desc *pil)
329{
330 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
331 pil_q6v5_remove_proxy_votes(pil);
332 regulator_disable(drv->vreg_mx);
333 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
334}
335
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700336static struct pil_reset_ops pil_mss_ops = {
Matt Wagantall70315fb2012-12-03 16:33:28 -0800337 .proxy_vote = pil_q6v5_mss_make_proxy_votes,
338 .proxy_unvote = pil_q6v5_mss_remove_proxy_votes,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700339 .auth_and_reset = pil_mss_reset,
340 .shutdown = pil_mss_shutdown,
341};
342
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700343static int pil_mba_make_proxy_votes(struct pil_desc *pil)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700344{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700345 int ret;
346 struct mba_data *drv = dev_get_drvdata(pil->dev);
347
348 ret = clk_prepare_enable(drv->q6->xo);
349 if (ret) {
350 dev_err(pil->dev, "Failed to enable XO\n");
351 return ret;
352 }
353 return 0;
354}
355
356static void pil_mba_remove_proxy_votes(struct pil_desc *pil)
357{
358 struct mba_data *drv = dev_get_drvdata(pil->dev);
359 clk_disable_unprepare(drv->q6->xo);
360}
361
362static int pil_mba_init_image(struct pil_desc *pil,
363 const u8 *metadata, size_t size)
364{
365 struct mba_data *drv = dev_get_drvdata(pil->dev);
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700366 void *mdata_virt;
367 dma_addr_t mdata_phys;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700368 s32 status;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700369 int ret;
370
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700371 /* Make metadata physically contiguous and 4K aligned. */
372 mdata_virt = dma_alloc_coherent(pil->dev, size, &mdata_phys,
373 GFP_KERNEL);
374 if (!mdata_virt) {
375 dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
376 return -ENOMEM;
377 }
378 memcpy(mdata_virt, metadata, size);
379 /* wmb() ensures copy completes prior to starting authentication. */
380 wmb();
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700381
382 /* Initialize length counter to 0 */
383 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
384 drv->img_length = 0;
385
386 /* Pass address of meta-data to the MBA and perform authentication */
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700387 writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700388 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
389 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
390 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
391 POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
392 if (ret) {
393 dev_err(pil->dev, "MBA authentication of headers timed out\n");
394 } else if (status < 0) {
395 dev_err(pil->dev, "MBA returned error %d for headers\n",
396 status);
397 ret = -EINVAL;
398 }
399
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700400 dma_free_coherent(pil->dev, size, mdata_virt, mdata_phys);
401
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700402 return ret;
403}
404
405static int pil_mba_verify_blob(struct pil_desc *pil, u32 phy_addr,
406 size_t size)
407{
408 struct mba_data *drv = dev_get_drvdata(pil->dev);
409 s32 status;
410
411 /* Begin image authentication */
412 if (drv->img_length == 0) {
413 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
414 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
415 }
416 /* Increment length counter */
417 drv->img_length += size;
418 writel_relaxed(drv->img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
419
420 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
421 if (status < 0) {
422 dev_err(pil->dev, "MBA returned error %d\n", status);
423 return -EINVAL;
424 }
425
426 return 0;
427}
428
429static int pil_mba_auth(struct pil_desc *pil)
430{
431 struct mba_data *drv = dev_get_drvdata(pil->dev);
432 int ret;
433 s32 status;
434
435 /* Wait for all segments to be authenticated or an error to occur */
436 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
437 status == STATUS_AUTH_COMPLETE || status < 0,
438 50, modem_auth_timeout_ms * 1000);
439 if (ret) {
440 dev_err(pil->dev, "MBA authentication of image timed out\n");
441 } else if (status < 0) {
442 dev_err(pil->dev, "MBA returned error %d for image\n", status);
443 ret = -EINVAL;
444 }
445
446 return ret;
447}
448
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700449static struct pil_reset_ops pil_mba_ops = {
450 .init_image = pil_mba_init_image,
451 .proxy_vote = pil_mba_make_proxy_votes,
452 .proxy_unvote = pil_mba_remove_proxy_votes,
453 .verify_blob = pil_mba_verify_blob,
454 .auth_and_reset = pil_mba_auth,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700455};
456
457#define subsys_to_drv(d) container_of(d, struct mba_data, subsys_desc)
458
459static void log_modem_sfr(void)
460{
461 u32 size;
462 char *smem_reason, reason[MAX_SSR_REASON_LEN];
463
464 smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
465 if (!smem_reason || !size) {
466 pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
467 return;
468 }
469 if (!smem_reason[0]) {
470 pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
471 return;
472 }
473
474 strlcpy(reason, smem_reason, min(size, sizeof(reason)));
475 pr_err("modem subsystem failure reason: %s.\n", reason);
476
477 smem_reason[0] = '\0';
478 wmb();
479}
480
481static void restart_modem(struct mba_data *drv)
482{
483 log_modem_sfr();
484 drv->ignore_errors = true;
485 subsystem_restart_dev(drv->subsys);
486}
487
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800488static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700489{
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800490 struct mba_data *drv = dev_id;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700491
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800492 /* Ignore if we're the one that set the force stop GPIO */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700493 if (drv->crash_shutdown)
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800494 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700495
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800496 pr_err("Fatal error on the modem.\n");
497 restart_modem(drv);
498 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700499}
500
501static int modem_shutdown(const struct subsys_desc *subsys)
502{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700503 struct mba_data *drv = subsys_to_drv(subsys);
504
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800505 if (!drv->is_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800506 return 0;
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800507 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700508 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700509 return 0;
510}
511
512static int modem_powerup(const struct subsys_desc *subsys)
513{
514 struct mba_data *drv = subsys_to_drv(subsys);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700515 int ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800516
517 if (!drv->is_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800518 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700519 /*
520 * At this time, the modem is shutdown. Therefore this function cannot
521 * run concurrently with either the watchdog bite error handler or the
522 * SMSM callback, making it safe to unset the flag below.
523 */
524 drv->ignore_errors = false;
Stephen Boyde83a0a22012-06-29 13:51:27 -0700525 ret = pil_boot(&drv->q6->desc);
526 if (ret)
527 return ret;
528 ret = pil_boot(&drv->desc);
529 if (ret)
530 pil_shutdown(&drv->q6->desc);
531 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700532}
533
534static void modem_crash_shutdown(const struct subsys_desc *subsys)
535{
536 struct mba_data *drv = subsys_to_drv(subsys);
537 drv->crash_shutdown = true;
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800538 gpio_set_value(drv->force_stop_gpio, 1);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700539}
540
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700541static struct ramdump_segment smem_segments[] = {
542 {0x0FA00000, 0x0FC00000 - 0x0FA00000},
543};
544
545static int modem_ramdump(int enable, const struct subsys_desc *subsys)
546{
547 struct mba_data *drv = subsys_to_drv(subsys);
548 int ret;
549
550 if (!enable)
551 return 0;
552
Stephen Boyde83a0a22012-06-29 13:51:27 -0700553 ret = pil_boot(&drv->q6->desc);
554 if (ret)
555 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700556
Stephen Boyd05c45f22013-01-24 12:02:28 -0800557 ret = pil_do_ramdump(&drv->desc, drv->ramdump_dev);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700558 if (ret < 0) {
559 pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
560 goto out;
561 }
562
Stephen Boyd5eb17ce2012-11-29 15:34:21 -0800563 ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700564 ARRAY_SIZE(smem_segments));
565 if (ret < 0) {
566 pr_err("Unable to dump smem memory (rc = %d).\n", ret);
567 goto out;
568 }
569
570out:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700571 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700572 return ret;
573}
574
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700575static int adsp_state_notifier_fn(struct notifier_block *this,
576 unsigned long code, void *ss_handle)
577{
578 int ret;
579 ret = sysmon_send_event(SYSMON_SS_MODEM, "adsp", code);
580 if (ret < 0)
581 pr_err("%s: sysmon_send_event failed (%d).", __func__, ret);
582 return NOTIFY_DONE;
583}
584
585static struct notifier_block adsp_state_notifier_block = {
586 .notifier_call = adsp_state_notifier_fn,
587};
588
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700589static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
590{
591 struct mba_data *drv = dev_id;
592 if (drv->ignore_errors)
593 return IRQ_HANDLED;
594 pr_err("Watchdog bite received from modem software!\n");
595 restart_modem(drv);
596 return IRQ_HANDLED;
597}
598
599static int mss_start(const struct subsys_desc *desc)
600{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700601 int ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700602 struct mba_data *drv = subsys_to_drv(desc);
603
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800604 if (!drv->is_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800605 return 0;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800606
Stephen Boyde83a0a22012-06-29 13:51:27 -0700607 ret = pil_boot(&drv->q6->desc);
608 if (ret)
609 return ret;
610 ret = pil_boot(&drv->desc);
611 if (ret)
612 pil_shutdown(&drv->q6->desc);
613 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700614}
615
616static void mss_stop(const struct subsys_desc *desc)
617{
618 struct mba_data *drv = subsys_to_drv(desc);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800619
620 if (!drv->is_loadable)
621 return;
622
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800623 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700624 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700625}
626
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800627static int __devinit pil_subsys_init(struct mba_data *drv,
628 struct platform_device *pdev)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700629{
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800630 int irq, ret;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700631
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700632 irq = platform_get_irq(pdev, 0);
633 if (irq < 0)
634 return irq;
635
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800636 drv->subsys_desc.name = "modem";
637 drv->subsys_desc.dev = &pdev->dev;
638 drv->subsys_desc.owner = THIS_MODULE;
639 drv->subsys_desc.shutdown = modem_shutdown;
640 drv->subsys_desc.powerup = modem_powerup;
641 drv->subsys_desc.ramdump = modem_ramdump;
642 drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
643 drv->subsys_desc.start = mss_start;
644 drv->subsys_desc.stop = mss_stop;
645
646 drv->subsys = subsys_register(&drv->subsys_desc);
647 if (IS_ERR(drv->subsys)) {
648 ret = PTR_ERR(drv->subsys);
649 goto err_subsys;
650 }
651
652 drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
653 if (!drv->ramdump_dev) {
654 pr_err("%s: Unable to create a modem ramdump device.\n",
655 __func__);
656 ret = -ENOMEM;
657 goto err_ramdump;
658 }
659
660 drv->smem_ramdump_dev = create_ramdump_device("smem-modem", &pdev->dev);
661 if (!drv->smem_ramdump_dev) {
662 pr_err("%s: Unable to create an smem ramdump device.\n",
663 __func__);
664 ret = -ENOMEM;
665 goto err_ramdump_smem;
666 }
667
668 ret = devm_request_irq(&pdev->dev, irq, modem_wdog_bite_irq,
669 IRQF_TRIGGER_RISING, "modem_wdog", drv);
670 if (ret < 0) {
671 dev_err(&pdev->dev, "Unable to request watchdog IRQ.\n");
672 goto err_irq;
673 }
674
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800675 ret = devm_request_irq(&pdev->dev, drv->err_fatal_irq,
676 modem_err_fatal_intr_handler,
677 IRQF_TRIGGER_RISING, "pil-mss", drv);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800678 if (ret < 0) {
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800679 dev_err(&pdev->dev, "Unable to register SMP2P err fatal handler!\n");
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800680 goto err_irq;
681 }
682
683 drv->adsp_state_notifier = subsys_notif_register_notifier("adsp",
684 &adsp_state_notifier_block);
685 if (IS_ERR(drv->adsp_state_notifier)) {
686 ret = PTR_ERR(drv->adsp_state_notifier);
687 dev_err(&pdev->dev, "%s: Registration with the SSR notification driver failed (%d)",
688 __func__, ret);
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800689 goto err_irq;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800690 }
691
692 return 0;
693
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800694err_irq:
695 destroy_ramdump_device(drv->smem_ramdump_dev);
696err_ramdump_smem:
697 destroy_ramdump_device(drv->ramdump_dev);
698err_ramdump:
699 subsys_unregister(drv->subsys);
700err_subsys:
701 return ret;
702}
703
704static int __devinit pil_mss_loadable_init(struct mba_data *drv,
705 struct platform_device *pdev)
706{
707 struct q6v5_data *q6;
708 struct pil_desc *q6_desc, *mba_desc;
709 struct resource *res;
710 int ret;
711
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700712 q6 = pil_q6v5_init(pdev);
713 if (IS_ERR(q6))
714 return PTR_ERR(q6);
715 drv->q6 = q6;
716
717 q6_desc = &q6->desc;
718 q6_desc->ops = &pil_mss_ops;
719 q6_desc->owner = THIS_MODULE;
720 q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700721
Patrick Dalyb830a3f2013-03-11 14:21:34 -0700722 drv->self_auth = of_property_read_bool(pdev->dev.of_node,
723 "qcom,pil-self-auth");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700724 if (drv->self_auth) {
Matt Wagantall1f168152012-09-25 13:26:47 -0700725 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
726 "rmb_base");
Stephen Boydf8f89282012-07-16 18:05:48 -0700727 drv->rmb_base = devm_request_and_ioremap(&pdev->dev, res);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700728 if (!drv->rmb_base)
729 return -ENOMEM;
730 }
731
Matt Wagantall1f168152012-09-25 13:26:47 -0700732 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
Stephen Boydf8f89282012-07-16 18:05:48 -0700733 q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700734 if (!q6->restart_reg)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700735 return -ENOMEM;
736
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700737 q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
738 if (IS_ERR(q6->vreg))
739 return PTR_ERR(q6->vreg);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700740
Matt Wagantall70315fb2012-12-03 16:33:28 -0800741 q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
742 if (IS_ERR(q6->vreg_mx))
743 return PTR_ERR(q6->vreg_mx);
744
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -0700745 ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV, MAX_VDD_MSS_UV);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700746 if (ret)
747 dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
748
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700749 ret = regulator_set_optimum_mode(q6->vreg, 100000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700750 if (ret < 0) {
751 dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
752 return ret;
753 }
754
Patrick Daly11ca6af2013-03-03 17:07:28 -0800755 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
756 "cxrail_bhs_reg");
757 if (res)
758 q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
759 resource_size(res));
760
761
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700762 q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
763 if (IS_ERR(q6->ahb_clk))
764 return PTR_ERR(q6->ahb_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700765
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700766 q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
767 if (IS_ERR(q6->axi_clk))
768 return PTR_ERR(q6->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700769
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700770 q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
771 if (IS_ERR(q6->rom_clk))
772 return PTR_ERR(q6->rom_clk);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700773
Stephen Boyde83a0a22012-06-29 13:51:27 -0700774 ret = pil_desc_init(q6_desc);
775 if (ret)
776 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700777
778 mba_desc = &drv->desc;
779 mba_desc->name = "modem";
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700780 mba_desc->dev = &pdev->dev;
781 mba_desc->ops = &pil_mba_ops;
782 mba_desc->owner = THIS_MODULE;
783 mba_desc->proxy_timeout = PROXY_TIMEOUT_MS;
784
Stephen Boyde83a0a22012-06-29 13:51:27 -0700785 ret = pil_desc_init(mba_desc);
786 if (ret)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700787 goto err_mba_desc;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700788
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700789 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700790
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700791err_mba_desc:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700792 pil_desc_release(q6_desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700793 return ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800794
795}
796
797static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
798{
799 struct mba_data *drv;
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800800 int ret, err_fatal_gpio;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800801
802 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
803 if (!drv)
804 return -ENOMEM;
805 platform_set_drvdata(pdev, drv);
806
Vikram Mulukutla2d4f0862012-11-16 11:57:34 -0800807 drv->is_loadable = of_property_read_bool(pdev->dev.of_node,
808 "qcom,is-loadable");
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800809 if (drv->is_loadable) {
810 ret = pil_mss_loadable_init(drv, pdev);
811 if (ret)
812 return ret;
813 }
814
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800815 /* Get the IRQ from the GPIO for registering inbound handler */
816 err_fatal_gpio = of_get_named_gpio(pdev->dev.of_node,
817 "qcom,gpio-err-fatal", 0);
818 if (err_fatal_gpio < 0)
819 return err_fatal_gpio;
820
821 drv->err_fatal_irq = gpio_to_irq(err_fatal_gpio);
822 if (drv->err_fatal_irq < 0)
823 return drv->err_fatal_irq;
824
825 /* Get the GPIO pin for writing the outbound bits: add more as needed */
826 drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
827 "qcom,gpio-force-stop", 0);
828 if (drv->force_stop_gpio < 0)
829 return drv->force_stop_gpio;
830
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800831 return pil_subsys_init(drv, pdev);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700832}
833
834static int __devexit pil_mss_driver_exit(struct platform_device *pdev)
835{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700836 struct mba_data *drv = platform_get_drvdata(pdev);
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700837
838 subsys_notif_unregister_notifier(drv->adsp_state_notifier,
839 &adsp_state_notifier_block);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700840 subsys_unregister(drv->subsys);
841 destroy_ramdump_device(drv->smem_ramdump_dev);
842 destroy_ramdump_device(drv->ramdump_dev);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700843 pil_desc_release(&drv->desc);
844 pil_desc_release(&drv->q6->desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700845 return 0;
846}
847
848static struct of_device_id mss_match_table[] = {
849 { .compatible = "qcom,pil-q6v5-mss" },
850 {}
851};
852
853static struct platform_driver pil_mss_driver = {
854 .probe = pil_mss_driver_probe,
855 .remove = __devexit_p(pil_mss_driver_exit),
856 .driver = {
857 .name = "pil-q6v5-mss",
858 .of_match_table = mss_match_table,
859 .owner = THIS_MODULE,
860 },
861};
862
863static int __init pil_mss_init(void)
864{
865 return platform_driver_register(&pil_mss_driver);
866}
867module_init(pil_mss_init);
868
869static void __exit pil_mss_exit(void)
870{
871 platform_driver_unregister(&pil_mss_driver);
872}
873module_exit(pil_mss_exit);
874
875MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
876MODULE_LICENSE("GPL v2");