blob: 7446b3945761cd4d971640b1eb965fc09fe8bb7c [file] [log] [blame]
Matt Wagantall4e2599e2012-03-21 22:31:35 -07001/*
Matt Wagantalla72d03d2013-02-26 21:13:14 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantall4e2599e2012-03-21 22:31:35 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070020#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/of.h>
25#include <linux/regulator/consumer.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070026#include <linux/interrupt.h>
Seemanta Dutta6e58f542013-03-04 19:28:16 -080027#include <linux/of_gpio.h>
Matt Wagantall724b2bb2013-03-18 14:54:06 -070028#include <linux/dma-mapping.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070029
Stephen Boyd3da4fd02012-07-06 10:00:12 -070030#include <mach/subsystem_restart.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070031#include <mach/clk.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070032#include <mach/msm_smsm.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070033#include <mach/ramdump.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070034
35#include "peripheral-loader.h"
36#include "pil-q6v5.h"
Vikram Mulukutla896d0582012-10-17 16:57:46 -070037#include "sysmon.h"
Matt Wagantall4e2599e2012-03-21 22:31:35 -070038
39/* Q6 Register Offsets */
40#define QDSP6SS_RST_EVB 0x010
41
42/* AXI Halting Registers */
43#define MSS_Q6_HALT_BASE 0x180
44#define MSS_MODEM_HALT_BASE 0x200
45#define MSS_NC_HALT_BASE 0x280
46
47/* RMB Status Register Values */
48#define STATUS_PBL_SUCCESS 0x1
49#define STATUS_XPU_UNLOCKED 0x1
50#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE 0x00
54#define RMB_PBL_STATUS 0x04
Stephen Boyd3da4fd02012-07-06 10:00:12 -070055#define RMB_MBA_COMMAND 0x08
Matt Wagantall4e2599e2012-03-21 22:31:35 -070056#define RMB_MBA_STATUS 0x0C
Stephen Boyd3da4fd02012-07-06 10:00:12 -070057#define RMB_PMI_META_DATA 0x10
58#define RMB_PMI_CODE_START 0x14
59#define RMB_PMI_CODE_LENGTH 0x18
Matt Wagantall4e2599e2012-03-21 22:31:35 -070060
Matt Wagantall70315fb2012-12-03 16:33:28 -080061#define VDD_MSS_UV 1050000
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -070062#define MAX_VDD_MSS_UV 1150000
Patrick Daly068ea8e2013-03-04 19:52:40 -080063#define MAX_VDD_MX_UV 1150000
Matt Wagantall70315fb2012-12-03 16:33:28 -080064
Matt Wagantall4e2599e2012-03-21 22:31:35 -070065#define PROXY_TIMEOUT_MS 10000
66#define POLL_INTERVAL_US 50
67
Stephen Boyd3da4fd02012-07-06 10:00:12 -070068#define CMD_META_DATA_READY 0x1
69#define CMD_LOAD_READY 0x2
70
71#define STATUS_META_DATA_AUTH_SUCCESS 0x3
72#define STATUS_AUTH_COMPLETE 0x4
73
74#define MAX_SSR_REASON_LEN 81U
75
Patrick Daly11ca6af2013-03-03 17:07:28 -080076/* External BHS */
77#define EXTERNAL_BHS_ON BIT(0)
78#define EXTERNAL_BHS_STATUS BIT(4)
79#define BHS_TIMEOUT_US 50
80
Stephen Boyd3da4fd02012-07-06 10:00:12 -070081struct mba_data {
Stephen Boyd3da4fd02012-07-06 10:00:12 -070082 void __iomem *rmb_base;
83 void __iomem *io_clamp_reg;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070084 struct pil_desc desc;
85 struct subsys_device *subsys;
86 struct subsys_desc subsys_desc;
Vikram Mulukutla896d0582012-10-17 16:57:46 -070087 void *adsp_state_notifier;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070088 u32 img_length;
89 struct q6v5_data *q6;
Patrick Dalyb830a3f2013-03-11 14:21:34 -070090 bool self_auth;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070091 void *ramdump_dev;
92 void *smem_ramdump_dev;
93 bool crash_shutdown;
94 bool ignore_errors;
Seemanta Dutta6e58f542013-03-04 19:28:16 -080095 int err_fatal_irq;
96 int force_stop_gpio;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070097};
98
Matt Wagantalld251d8e2012-08-16 18:53:53 -070099static int pbl_mba_boot_timeout_ms = 100;
100module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
101
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700102static int modem_auth_timeout_ms = 10000;
103module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
104
Stephen Boyd3826cd42012-07-05 17:37:53 -0700105static int pil_mss_power_up(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700106{
107 int ret;
Stephen Boyd3826cd42012-07-05 17:37:53 -0700108 struct device *dev = drv->desc.dev;
Patrick Daly11ca6af2013-03-03 17:07:28 -0800109 u32 regval;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700110
111 ret = regulator_enable(drv->vreg);
112 if (ret)
Matt Wagantall70315fb2012-12-03 16:33:28 -0800113 dev_err(dev, "Failed to enable modem regulator.\n");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700114
Patrick Daly11ca6af2013-03-03 17:07:28 -0800115 if (drv->cxrail_bhs) {
116 regval = readl_relaxed(drv->cxrail_bhs);
117 regval |= EXTERNAL_BHS_ON;
118 writel_relaxed(regval, drv->cxrail_bhs);
119
120 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
121 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
122 }
123
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700124 return ret;
125}
126
Stephen Boyd3826cd42012-07-05 17:37:53 -0700127static int pil_mss_power_down(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700128{
Patrick Daly11ca6af2013-03-03 17:07:28 -0800129 u32 regval;
130
131 if (drv->cxrail_bhs) {
132 regval = readl_relaxed(drv->cxrail_bhs);
133 regval &= ~EXTERNAL_BHS_ON;
134 writel_relaxed(regval, drv->cxrail_bhs);
135 }
136
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700137 return regulator_disable(drv->vreg);
138}
139
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700140static int pil_mss_enable_clks(struct q6v5_data *drv)
141{
142 int ret;
143
144 ret = clk_prepare_enable(drv->ahb_clk);
145 if (ret)
146 goto err_ahb_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700147 ret = clk_prepare_enable(drv->axi_clk);
148 if (ret)
149 goto err_axi_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700150 ret = clk_prepare_enable(drv->rom_clk);
151 if (ret)
152 goto err_rom_clk;
153
154 return 0;
155
156err_rom_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700157 clk_disable_unprepare(drv->axi_clk);
158err_axi_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700159 clk_disable_unprepare(drv->ahb_clk);
160err_ahb_clk:
161 return ret;
162}
163
164static void pil_mss_disable_clks(struct q6v5_data *drv)
165{
166 clk_disable_unprepare(drv->rom_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700167 clk_disable_unprepare(drv->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700168 clk_disable_unprepare(drv->ahb_clk);
169}
170
Stephen Boyd3826cd42012-07-05 17:37:53 -0700171static int wait_for_mba_ready(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700172{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700173 struct device *dev = drv->desc.dev;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700174 struct mba_data *mba = platform_get_drvdata(to_platform_device(dev));
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700175 int ret;
176 u32 status;
177
178 /* Wait for PBL completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700179 ret = readl_poll_timeout(mba->rmb_base + RMB_PBL_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700180 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700181 if (ret) {
182 dev_err(dev, "PBL boot timed out\n");
183 return ret;
184 }
185 if (status != STATUS_PBL_SUCCESS) {
186 dev_err(dev, "PBL returned unexpected status %d\n", status);
187 return -EINVAL;
188 }
189
190 /* Wait for MBA completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700191 ret = readl_poll_timeout(mba->rmb_base + RMB_MBA_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700192 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700193 if (ret) {
194 dev_err(dev, "MBA boot timed out\n");
195 return ret;
196 }
197 if (status != STATUS_XPU_UNLOCKED &&
198 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
199 dev_err(dev, "MBA returned unexpected status %d\n", status);
200 return -EINVAL;
201 }
202
203 return 0;
204}
205
206static int pil_mss_shutdown(struct pil_desc *pil)
207{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700208 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700209
210 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
211 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
212 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
213
214 /*
215 * If the shutdown function is called before the reset function, clocks
216 * and power will not be enabled yet. Enable them here so that register
217 * writes performed during the shutdown succeed.
218 */
219 if (drv->is_booted == false) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700220 pil_mss_power_up(drv);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700221 pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700222 }
223 pil_q6v5_shutdown(pil);
224
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700225 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700226
227 writel_relaxed(1, drv->restart_reg);
228
Patrick Daly11ca6af2013-03-03 17:07:28 -0800229 /*
230 * access to the cx_rail_bhs is restricted until after the gcc_mss
231 * reset is asserted once the PBL starts executing.
232 */
233 pil_mss_power_down(drv);
234
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700235 drv->is_booted = false;
236
237 return 0;
238}
239
240static int pil_mss_reset(struct pil_desc *pil)
241{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700242 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700243 struct platform_device *pdev = to_platform_device(pil->dev);
244 struct mba_data *mba = platform_get_drvdata(pdev);
Tianyi Gou819851e2013-04-16 16:05:56 -0700245 phys_addr_t start_addr = pil_get_entry_addr(pil);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700246 int ret;
247
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700248 /*
249 * Bring subsystem out of reset and enable required
250 * regulators and clocks.
251 */
Stephen Boyd3826cd42012-07-05 17:37:53 -0700252 ret = pil_mss_power_up(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700253 if (ret)
254 goto err_power;
255
Patrick Daly11ca6af2013-03-03 17:07:28 -0800256 /* Deassert reset to subsystem and wait for propagation */
257 writel_relaxed(0, drv->restart_reg);
258 mb();
259 udelay(2);
260
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700261 ret = pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700262 if (ret)
263 goto err_clks;
264
265 /* Program Image Address */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700266 if (mba->self_auth) {
Stephen Boyd3030c252012-08-08 17:24:05 -0700267 writel_relaxed(start_addr, mba->rmb_base + RMB_MBA_IMAGE);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700268 /* Ensure write to RMB base occurs before reset is released. */
269 mb();
270 } else {
Stephen Boyd3030c252012-08-08 17:24:05 -0700271 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700272 drv->reg_base + QDSP6SS_RST_EVB);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700273 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700274
275 ret = pil_q6v5_reset(pil);
276 if (ret)
277 goto err_q6v5_reset;
278
279 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700280 if (mba->self_auth) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700281 ret = wait_for_mba_ready(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700282 if (ret)
283 goto err_auth;
284 }
285
286 drv->is_booted = true;
287
288 return 0;
289
290err_auth:
291 pil_q6v5_shutdown(pil);
292err_q6v5_reset:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700293 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700294err_clks:
Stephen Boyd3826cd42012-07-05 17:37:53 -0700295 pil_mss_power_down(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700296err_power:
297 return ret;
298}
299
Matt Wagantall70315fb2012-12-03 16:33:28 -0800300static int pil_q6v5_mss_make_proxy_votes(struct pil_desc *pil)
301{
302 int ret;
303 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
304
305 ret = regulator_set_voltage(drv->vreg_mx, VDD_MSS_UV, MAX_VDD_MX_UV);
306 if (ret) {
307 dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
308 return ret;
309 }
310
311 ret = regulator_enable(drv->vreg_mx);
312 if (ret) {
313 dev_err(pil->dev, "Failed to enable vreg_mx\n");
314 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
315 return ret;
316 }
317
318 ret = pil_q6v5_make_proxy_votes(pil);
319 if (ret) {
320 regulator_disable(drv->vreg_mx);
321 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
322 }
323
324 return ret;
325}
326
327static void pil_q6v5_mss_remove_proxy_votes(struct pil_desc *pil)
328{
329 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
330 pil_q6v5_remove_proxy_votes(pil);
331 regulator_disable(drv->vreg_mx);
332 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
333}
334
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700335static struct pil_reset_ops pil_mss_ops = {
Matt Wagantall70315fb2012-12-03 16:33:28 -0800336 .proxy_vote = pil_q6v5_mss_make_proxy_votes,
337 .proxy_unvote = pil_q6v5_mss_remove_proxy_votes,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700338 .auth_and_reset = pil_mss_reset,
339 .shutdown = pil_mss_shutdown,
340};
341
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700342static int pil_mba_make_proxy_votes(struct pil_desc *pil)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700343{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700344 int ret;
345 struct mba_data *drv = dev_get_drvdata(pil->dev);
346
347 ret = clk_prepare_enable(drv->q6->xo);
348 if (ret) {
349 dev_err(pil->dev, "Failed to enable XO\n");
350 return ret;
351 }
352 return 0;
353}
354
355static void pil_mba_remove_proxy_votes(struct pil_desc *pil)
356{
357 struct mba_data *drv = dev_get_drvdata(pil->dev);
358 clk_disable_unprepare(drv->q6->xo);
359}
360
361static int pil_mba_init_image(struct pil_desc *pil,
362 const u8 *metadata, size_t size)
363{
364 struct mba_data *drv = dev_get_drvdata(pil->dev);
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700365 void *mdata_virt;
366 dma_addr_t mdata_phys;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700367 s32 status;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700368 int ret;
369
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700370 /* Make metadata physically contiguous and 4K aligned. */
371 mdata_virt = dma_alloc_coherent(pil->dev, size, &mdata_phys,
372 GFP_KERNEL);
373 if (!mdata_virt) {
374 dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
375 return -ENOMEM;
376 }
377 memcpy(mdata_virt, metadata, size);
378 /* wmb() ensures copy completes prior to starting authentication. */
379 wmb();
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700380
381 /* Initialize length counter to 0 */
382 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
383 drv->img_length = 0;
384
385 /* Pass address of meta-data to the MBA and perform authentication */
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700386 writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700387 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
388 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
389 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
390 POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
391 if (ret) {
392 dev_err(pil->dev, "MBA authentication of headers timed out\n");
393 } else if (status < 0) {
394 dev_err(pil->dev, "MBA returned error %d for headers\n",
395 status);
396 ret = -EINVAL;
397 }
398
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700399 dma_free_coherent(pil->dev, size, mdata_virt, mdata_phys);
400
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700401 return ret;
402}
403
Tianyi Gou82374c62013-04-15 21:07:50 -0700404static int pil_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700405 size_t size)
406{
407 struct mba_data *drv = dev_get_drvdata(pil->dev);
408 s32 status;
409
410 /* Begin image authentication */
411 if (drv->img_length == 0) {
412 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
413 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
414 }
415 /* Increment length counter */
416 drv->img_length += size;
417 writel_relaxed(drv->img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
418
419 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
420 if (status < 0) {
421 dev_err(pil->dev, "MBA returned error %d\n", status);
422 return -EINVAL;
423 }
424
425 return 0;
426}
427
428static int pil_mba_auth(struct pil_desc *pil)
429{
430 struct mba_data *drv = dev_get_drvdata(pil->dev);
431 int ret;
432 s32 status;
433
434 /* Wait for all segments to be authenticated or an error to occur */
435 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
436 status == STATUS_AUTH_COMPLETE || status < 0,
437 50, modem_auth_timeout_ms * 1000);
438 if (ret) {
439 dev_err(pil->dev, "MBA authentication of image timed out\n");
440 } else if (status < 0) {
441 dev_err(pil->dev, "MBA returned error %d for image\n", status);
442 ret = -EINVAL;
443 }
444
445 return ret;
446}
447
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700448static struct pil_reset_ops pil_mba_ops = {
449 .init_image = pil_mba_init_image,
450 .proxy_vote = pil_mba_make_proxy_votes,
451 .proxy_unvote = pil_mba_remove_proxy_votes,
452 .verify_blob = pil_mba_verify_blob,
453 .auth_and_reset = pil_mba_auth,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700454};
455
456#define subsys_to_drv(d) container_of(d, struct mba_data, subsys_desc)
457
458static void log_modem_sfr(void)
459{
460 u32 size;
461 char *smem_reason, reason[MAX_SSR_REASON_LEN];
462
463 smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
464 if (!smem_reason || !size) {
465 pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
466 return;
467 }
468 if (!smem_reason[0]) {
469 pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
470 return;
471 }
472
473 strlcpy(reason, smem_reason, min(size, sizeof(reason)));
474 pr_err("modem subsystem failure reason: %s.\n", reason);
475
476 smem_reason[0] = '\0';
477 wmb();
478}
479
480static void restart_modem(struct mba_data *drv)
481{
482 log_modem_sfr();
483 drv->ignore_errors = true;
484 subsystem_restart_dev(drv->subsys);
485}
486
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800487static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700488{
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800489 struct mba_data *drv = dev_id;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700490
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800491 /* Ignore if we're the one that set the force stop GPIO */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700492 if (drv->crash_shutdown)
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800493 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700494
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800495 pr_err("Fatal error on the modem.\n");
496 restart_modem(drv);
497 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700498}
499
500static int modem_shutdown(const struct subsys_desc *subsys)
501{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700502 struct mba_data *drv = subsys_to_drv(subsys);
503
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700504 if (subsys->is_not_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800505 return 0;
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800506 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700507 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700508 return 0;
509}
510
511static int modem_powerup(const struct subsys_desc *subsys)
512{
513 struct mba_data *drv = subsys_to_drv(subsys);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700514 int ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800515
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700516 if (subsys->is_not_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800517 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700518 /*
519 * At this time, the modem is shutdown. Therefore this function cannot
520 * run concurrently with either the watchdog bite error handler or the
521 * SMSM callback, making it safe to unset the flag below.
522 */
523 drv->ignore_errors = false;
Stephen Boyde83a0a22012-06-29 13:51:27 -0700524 ret = pil_boot(&drv->q6->desc);
525 if (ret)
526 return ret;
527 ret = pil_boot(&drv->desc);
528 if (ret)
529 pil_shutdown(&drv->q6->desc);
530 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700531}
532
533static void modem_crash_shutdown(const struct subsys_desc *subsys)
534{
535 struct mba_data *drv = subsys_to_drv(subsys);
536 drv->crash_shutdown = true;
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800537 gpio_set_value(drv->force_stop_gpio, 1);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700538}
539
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700540static struct ramdump_segment smem_segments[] = {
541 {0x0FA00000, 0x0FC00000 - 0x0FA00000},
542};
543
544static int modem_ramdump(int enable, const struct subsys_desc *subsys)
545{
546 struct mba_data *drv = subsys_to_drv(subsys);
547 int ret;
548
549 if (!enable)
550 return 0;
551
Stephen Boyde83a0a22012-06-29 13:51:27 -0700552 ret = pil_boot(&drv->q6->desc);
553 if (ret)
554 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700555
Stephen Boyd05c45f22013-01-24 12:02:28 -0800556 ret = pil_do_ramdump(&drv->desc, drv->ramdump_dev);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700557 if (ret < 0) {
558 pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
559 goto out;
560 }
561
Stephen Boyd5eb17ce2012-11-29 15:34:21 -0800562 ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700563 ARRAY_SIZE(smem_segments));
564 if (ret < 0) {
565 pr_err("Unable to dump smem memory (rc = %d).\n", ret);
566 goto out;
567 }
568
569out:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700570 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700571 return ret;
572}
573
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700574static int adsp_state_notifier_fn(struct notifier_block *this,
575 unsigned long code, void *ss_handle)
576{
577 int ret;
578 ret = sysmon_send_event(SYSMON_SS_MODEM, "adsp", code);
579 if (ret < 0)
580 pr_err("%s: sysmon_send_event failed (%d).", __func__, ret);
581 return NOTIFY_DONE;
582}
583
584static struct notifier_block adsp_state_notifier_block = {
585 .notifier_call = adsp_state_notifier_fn,
586};
587
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700588static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
589{
590 struct mba_data *drv = dev_id;
591 if (drv->ignore_errors)
592 return IRQ_HANDLED;
593 pr_err("Watchdog bite received from modem software!\n");
594 restart_modem(drv);
595 return IRQ_HANDLED;
596}
597
598static int mss_start(const struct subsys_desc *desc)
599{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700600 int ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700601 struct mba_data *drv = subsys_to_drv(desc);
602
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700603 if (desc->is_not_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800604 return 0;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800605
Stephen Boyde83a0a22012-06-29 13:51:27 -0700606 ret = pil_boot(&drv->q6->desc);
607 if (ret)
608 return ret;
609 ret = pil_boot(&drv->desc);
Seemanta Dutta9e5f6402013-04-19 16:55:53 -0700610 if (ret) {
Stephen Boyde83a0a22012-06-29 13:51:27 -0700611 pil_shutdown(&drv->q6->desc);
Seemanta Dutta9e5f6402013-04-19 16:55:53 -0700612 /*
613 * We know now that the unvote interrupt is not coming.
614 * Remove the proxy votes immediately.
615 */
616 if (drv->q6->desc.proxy_unvote_irq)
617 pil_q6v5_mss_remove_proxy_votes(&drv->q6->desc);
618 }
Stephen Boyde83a0a22012-06-29 13:51:27 -0700619 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700620}
621
622static void mss_stop(const struct subsys_desc *desc)
623{
624 struct mba_data *drv = subsys_to_drv(desc);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800625
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700626 if (desc->is_not_loadable)
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800627 return;
628
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800629 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700630 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700631}
632
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800633static int __devinit pil_subsys_init(struct mba_data *drv,
634 struct platform_device *pdev)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700635{
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800636 int irq, ret;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700637
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700638 irq = platform_get_irq(pdev, 0);
639 if (irq < 0)
640 return irq;
641
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800642 drv->subsys_desc.name = "modem";
643 drv->subsys_desc.dev = &pdev->dev;
644 drv->subsys_desc.owner = THIS_MODULE;
645 drv->subsys_desc.shutdown = modem_shutdown;
646 drv->subsys_desc.powerup = modem_powerup;
647 drv->subsys_desc.ramdump = modem_ramdump;
648 drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
649 drv->subsys_desc.start = mss_start;
650 drv->subsys_desc.stop = mss_stop;
651
Seemanta Dutta9fb72ed2013-01-25 14:22:15 -0800652 ret = of_get_named_gpio(pdev->dev.of_node,
653 "qcom,gpio-err-ready", 0);
654 if (ret < 0)
655 return ret;
656
657 ret = gpio_to_irq(ret);
658 if (ret < 0)
659 return ret;
660
661 drv->subsys_desc.err_ready_irq = ret;
662
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800663 drv->subsys = subsys_register(&drv->subsys_desc);
664 if (IS_ERR(drv->subsys)) {
665 ret = PTR_ERR(drv->subsys);
666 goto err_subsys;
667 }
668
669 drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
670 if (!drv->ramdump_dev) {
671 pr_err("%s: Unable to create a modem ramdump device.\n",
672 __func__);
673 ret = -ENOMEM;
674 goto err_ramdump;
675 }
676
677 drv->smem_ramdump_dev = create_ramdump_device("smem-modem", &pdev->dev);
678 if (!drv->smem_ramdump_dev) {
679 pr_err("%s: Unable to create an smem ramdump device.\n",
680 __func__);
681 ret = -ENOMEM;
682 goto err_ramdump_smem;
683 }
684
685 ret = devm_request_irq(&pdev->dev, irq, modem_wdog_bite_irq,
686 IRQF_TRIGGER_RISING, "modem_wdog", drv);
687 if (ret < 0) {
688 dev_err(&pdev->dev, "Unable to request watchdog IRQ.\n");
689 goto err_irq;
690 }
691
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800692 ret = devm_request_irq(&pdev->dev, drv->err_fatal_irq,
693 modem_err_fatal_intr_handler,
694 IRQF_TRIGGER_RISING, "pil-mss", drv);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800695 if (ret < 0) {
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800696 dev_err(&pdev->dev, "Unable to register SMP2P err fatal handler!\n");
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800697 goto err_irq;
698 }
699
700 drv->adsp_state_notifier = subsys_notif_register_notifier("adsp",
701 &adsp_state_notifier_block);
702 if (IS_ERR(drv->adsp_state_notifier)) {
703 ret = PTR_ERR(drv->adsp_state_notifier);
704 dev_err(&pdev->dev, "%s: Registration with the SSR notification driver failed (%d)",
705 __func__, ret);
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800706 goto err_irq;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800707 }
708
709 return 0;
710
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800711err_irq:
712 destroy_ramdump_device(drv->smem_ramdump_dev);
713err_ramdump_smem:
714 destroy_ramdump_device(drv->ramdump_dev);
715err_ramdump:
716 subsys_unregister(drv->subsys);
717err_subsys:
718 return ret;
719}
720
721static int __devinit pil_mss_loadable_init(struct mba_data *drv,
722 struct platform_device *pdev)
723{
724 struct q6v5_data *q6;
725 struct pil_desc *q6_desc, *mba_desc;
726 struct resource *res;
727 int ret;
728
Seemanta Duttaa0f253e2013-01-16 18:54:40 -0800729 int clk_ready = of_get_named_gpio(pdev->dev.of_node,
730 "qcom,gpio-proxy-unvote", 0);
731 if (clk_ready < 0)
732 return clk_ready;
733
734 clk_ready = gpio_to_irq(clk_ready);
735 if (clk_ready < 0)
736 return clk_ready;
737
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700738 q6 = pil_q6v5_init(pdev);
739 if (IS_ERR(q6))
740 return PTR_ERR(q6);
741 drv->q6 = q6;
742
743 q6_desc = &q6->desc;
744 q6_desc->ops = &pil_mss_ops;
745 q6_desc->owner = THIS_MODULE;
746 q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Seemanta Duttaa0f253e2013-01-16 18:54:40 -0800747 q6_desc->proxy_unvote_irq = clk_ready;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700748
Patrick Dalyb830a3f2013-03-11 14:21:34 -0700749 drv->self_auth = of_property_read_bool(pdev->dev.of_node,
750 "qcom,pil-self-auth");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700751 if (drv->self_auth) {
Matt Wagantall1f168152012-09-25 13:26:47 -0700752 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
753 "rmb_base");
Stephen Boydf8f89282012-07-16 18:05:48 -0700754 drv->rmb_base = devm_request_and_ioremap(&pdev->dev, res);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700755 if (!drv->rmb_base)
756 return -ENOMEM;
757 }
758
Matt Wagantall1f168152012-09-25 13:26:47 -0700759 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
Stephen Boydf8f89282012-07-16 18:05:48 -0700760 q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700761 if (!q6->restart_reg)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700762 return -ENOMEM;
763
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700764 q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
765 if (IS_ERR(q6->vreg))
766 return PTR_ERR(q6->vreg);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700767
Matt Wagantall70315fb2012-12-03 16:33:28 -0800768 q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
769 if (IS_ERR(q6->vreg_mx))
770 return PTR_ERR(q6->vreg_mx);
771
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -0700772 ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV, MAX_VDD_MSS_UV);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700773 if (ret)
774 dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
775
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700776 ret = regulator_set_optimum_mode(q6->vreg, 100000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700777 if (ret < 0) {
778 dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
779 return ret;
780 }
781
Patrick Daly11ca6af2013-03-03 17:07:28 -0800782 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
783 "cxrail_bhs_reg");
784 if (res)
785 q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
786 resource_size(res));
787
788
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700789 q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
790 if (IS_ERR(q6->ahb_clk))
791 return PTR_ERR(q6->ahb_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700792
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700793 q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
794 if (IS_ERR(q6->axi_clk))
795 return PTR_ERR(q6->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700796
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700797 q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
798 if (IS_ERR(q6->rom_clk))
799 return PTR_ERR(q6->rom_clk);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700800
Stephen Boyde83a0a22012-06-29 13:51:27 -0700801 ret = pil_desc_init(q6_desc);
802 if (ret)
803 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700804
805 mba_desc = &drv->desc;
806 mba_desc->name = "modem";
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700807 mba_desc->dev = &pdev->dev;
808 mba_desc->ops = &pil_mba_ops;
809 mba_desc->owner = THIS_MODULE;
810 mba_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Seemanta Duttaa0f253e2013-01-16 18:54:40 -0800811 mba_desc->proxy_unvote_irq = clk_ready;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700812
Stephen Boyde83a0a22012-06-29 13:51:27 -0700813 ret = pil_desc_init(mba_desc);
814 if (ret)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700815 goto err_mba_desc;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700816
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700817 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700818
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700819err_mba_desc:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700820 pil_desc_release(q6_desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700821 return ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800822
823}
824
825static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
826{
827 struct mba_data *drv;
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700828 int ret, err_fatal_gpio, is_not_loadable;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800829
830 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
831 if (!drv)
832 return -ENOMEM;
833 platform_set_drvdata(pdev, drv);
834
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700835 is_not_loadable = of_property_read_bool(pdev->dev.of_node,
836 "qcom,is-not-loadable");
837 if (is_not_loadable) {
838 drv->subsys_desc.is_not_loadable = 1;
839 } else {
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800840 ret = pil_mss_loadable_init(drv, pdev);
841 if (ret)
842 return ret;
843 }
844
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800845 /* Get the IRQ from the GPIO for registering inbound handler */
846 err_fatal_gpio = of_get_named_gpio(pdev->dev.of_node,
847 "qcom,gpio-err-fatal", 0);
848 if (err_fatal_gpio < 0)
849 return err_fatal_gpio;
850
851 drv->err_fatal_irq = gpio_to_irq(err_fatal_gpio);
852 if (drv->err_fatal_irq < 0)
853 return drv->err_fatal_irq;
854
855 /* Get the GPIO pin for writing the outbound bits: add more as needed */
856 drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
857 "qcom,gpio-force-stop", 0);
858 if (drv->force_stop_gpio < 0)
859 return drv->force_stop_gpio;
860
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800861 return pil_subsys_init(drv, pdev);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700862}
863
864static int __devexit pil_mss_driver_exit(struct platform_device *pdev)
865{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700866 struct mba_data *drv = platform_get_drvdata(pdev);
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700867
868 subsys_notif_unregister_notifier(drv->adsp_state_notifier,
869 &adsp_state_notifier_block);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700870 subsys_unregister(drv->subsys);
871 destroy_ramdump_device(drv->smem_ramdump_dev);
872 destroy_ramdump_device(drv->ramdump_dev);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700873 pil_desc_release(&drv->desc);
874 pil_desc_release(&drv->q6->desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700875 return 0;
876}
877
878static struct of_device_id mss_match_table[] = {
879 { .compatible = "qcom,pil-q6v5-mss" },
880 {}
881};
882
883static struct platform_driver pil_mss_driver = {
884 .probe = pil_mss_driver_probe,
885 .remove = __devexit_p(pil_mss_driver_exit),
886 .driver = {
887 .name = "pil-q6v5-mss",
888 .of_match_table = mss_match_table,
889 .owner = THIS_MODULE,
890 },
891};
892
893static int __init pil_mss_init(void)
894{
895 return platform_driver_register(&pil_mss_driver);
896}
897module_init(pil_mss_init);
898
899static void __exit pil_mss_exit(void)
900{
901 platform_driver_unregister(&pil_mss_driver);
902}
903module_exit(pil_mss_exit);
904
905MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
906MODULE_LICENSE("GPL v2");