blob: 599b24cb4c2902543cf332c29b6c1432ed363070 [file] [log] [blame]
Matt Wagantall4e2599e2012-03-21 22:31:35 -07001/*
Matt Wagantalla72d03d2013-02-26 21:13:14 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantall4e2599e2012-03-21 22:31:35 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070020#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/of.h>
25#include <linux/regulator/consumer.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070026#include <linux/interrupt.h>
Seemanta Dutta6e58f542013-03-04 19:28:16 -080027#include <linux/of_gpio.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070028
Stephen Boyd3da4fd02012-07-06 10:00:12 -070029#include <mach/subsystem_restart.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070030#include <mach/clk.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070031#include <mach/msm_smsm.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070032
33#include "peripheral-loader.h"
34#include "pil-q6v5.h"
Stephen Boyd3da4fd02012-07-06 10:00:12 -070035#include "ramdump.h"
Vikram Mulukutla896d0582012-10-17 16:57:46 -070036#include "sysmon.h"
Matt Wagantall4e2599e2012-03-21 22:31:35 -070037
38/* Q6 Register Offsets */
39#define QDSP6SS_RST_EVB 0x010
40
41/* AXI Halting Registers */
42#define MSS_Q6_HALT_BASE 0x180
43#define MSS_MODEM_HALT_BASE 0x200
44#define MSS_NC_HALT_BASE 0x280
45
46/* RMB Status Register Values */
47#define STATUS_PBL_SUCCESS 0x1
48#define STATUS_XPU_UNLOCKED 0x1
49#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
50
51/* PBL/MBA interface registers */
52#define RMB_MBA_IMAGE 0x00
53#define RMB_PBL_STATUS 0x04
Stephen Boyd3da4fd02012-07-06 10:00:12 -070054#define RMB_MBA_COMMAND 0x08
Matt Wagantall4e2599e2012-03-21 22:31:35 -070055#define RMB_MBA_STATUS 0x0C
Stephen Boyd3da4fd02012-07-06 10:00:12 -070056#define RMB_PMI_META_DATA 0x10
57#define RMB_PMI_CODE_START 0x14
58#define RMB_PMI_CODE_LENGTH 0x18
Matt Wagantall4e2599e2012-03-21 22:31:35 -070059
Matt Wagantall70315fb2012-12-03 16:33:28 -080060#define VDD_MSS_UV 1050000
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -070061#define MAX_VDD_MSS_UV 1150000
Patrick Daly068ea8e2013-03-04 19:52:40 -080062#define MAX_VDD_MX_UV 1150000
Matt Wagantall70315fb2012-12-03 16:33:28 -080063
Matt Wagantall4e2599e2012-03-21 22:31:35 -070064#define PROXY_TIMEOUT_MS 10000
65#define POLL_INTERVAL_US 50
66
Stephen Boyd3da4fd02012-07-06 10:00:12 -070067#define CMD_META_DATA_READY 0x1
68#define CMD_LOAD_READY 0x2
69
70#define STATUS_META_DATA_AUTH_SUCCESS 0x3
71#define STATUS_AUTH_COMPLETE 0x4
72
73#define MAX_SSR_REASON_LEN 81U
74
Patrick Daly11ca6af2013-03-03 17:07:28 -080075/* External BHS */
76#define EXTERNAL_BHS_ON BIT(0)
77#define EXTERNAL_BHS_STATUS BIT(4)
78#define BHS_TIMEOUT_US 50
79
Stephen Boyd3da4fd02012-07-06 10:00:12 -070080struct mba_data {
81 void __iomem *metadata_base;
82 void __iomem *rmb_base;
83 void __iomem *io_clamp_reg;
84 unsigned long metadata_phys;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070085 struct pil_desc desc;
86 struct subsys_device *subsys;
87 struct subsys_desc subsys_desc;
Vikram Mulukutla896d0582012-10-17 16:57:46 -070088 void *adsp_state_notifier;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070089 u32 img_length;
90 struct q6v5_data *q6;
Patrick Dalyb830a3f2013-03-11 14:21:34 -070091 bool self_auth;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070092 void *ramdump_dev;
93 void *smem_ramdump_dev;
94 bool crash_shutdown;
95 bool ignore_errors;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -080096 int is_loadable;
Seemanta Dutta6e58f542013-03-04 19:28:16 -080097 int err_fatal_irq;
98 int force_stop_gpio;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070099};
100
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700101static int pbl_mba_boot_timeout_ms = 100;
102module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
103
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700104static int modem_auth_timeout_ms = 10000;
105module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
106
Stephen Boyd3826cd42012-07-05 17:37:53 -0700107static int pil_mss_power_up(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700108{
109 int ret;
Stephen Boyd3826cd42012-07-05 17:37:53 -0700110 struct device *dev = drv->desc.dev;
Patrick Daly11ca6af2013-03-03 17:07:28 -0800111 u32 regval;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700112
113 ret = regulator_enable(drv->vreg);
114 if (ret)
Matt Wagantall70315fb2012-12-03 16:33:28 -0800115 dev_err(dev, "Failed to enable modem regulator.\n");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700116
Patrick Daly11ca6af2013-03-03 17:07:28 -0800117 if (drv->cxrail_bhs) {
118 regval = readl_relaxed(drv->cxrail_bhs);
119 regval |= EXTERNAL_BHS_ON;
120 writel_relaxed(regval, drv->cxrail_bhs);
121
122 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
123 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
124 }
125
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700126 return ret;
127}
128
Stephen Boyd3826cd42012-07-05 17:37:53 -0700129static int pil_mss_power_down(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700130{
Patrick Daly11ca6af2013-03-03 17:07:28 -0800131 u32 regval;
132
133 if (drv->cxrail_bhs) {
134 regval = readl_relaxed(drv->cxrail_bhs);
135 regval &= ~EXTERNAL_BHS_ON;
136 writel_relaxed(regval, drv->cxrail_bhs);
137 }
138
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700139 return regulator_disable(drv->vreg);
140}
141
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700142static int pil_mss_enable_clks(struct q6v5_data *drv)
143{
144 int ret;
145
146 ret = clk_prepare_enable(drv->ahb_clk);
147 if (ret)
148 goto err_ahb_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700149 ret = clk_prepare_enable(drv->axi_clk);
150 if (ret)
151 goto err_axi_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700152 ret = clk_prepare_enable(drv->rom_clk);
153 if (ret)
154 goto err_rom_clk;
155
156 return 0;
157
158err_rom_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700159 clk_disable_unprepare(drv->axi_clk);
160err_axi_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700161 clk_disable_unprepare(drv->ahb_clk);
162err_ahb_clk:
163 return ret;
164}
165
166static void pil_mss_disable_clks(struct q6v5_data *drv)
167{
168 clk_disable_unprepare(drv->rom_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700169 clk_disable_unprepare(drv->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700170 clk_disable_unprepare(drv->ahb_clk);
171}
172
Stephen Boyd3826cd42012-07-05 17:37:53 -0700173static int wait_for_mba_ready(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700174{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700175 struct device *dev = drv->desc.dev;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700176 struct mba_data *mba = platform_get_drvdata(to_platform_device(dev));
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700177 int ret;
178 u32 status;
179
180 /* Wait for PBL completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700181 ret = readl_poll_timeout(mba->rmb_base + RMB_PBL_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700182 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700183 if (ret) {
184 dev_err(dev, "PBL boot timed out\n");
185 return ret;
186 }
187 if (status != STATUS_PBL_SUCCESS) {
188 dev_err(dev, "PBL returned unexpected status %d\n", status);
189 return -EINVAL;
190 }
191
192 /* Wait for MBA completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700193 ret = readl_poll_timeout(mba->rmb_base + RMB_MBA_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700194 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700195 if (ret) {
196 dev_err(dev, "MBA boot timed out\n");
197 return ret;
198 }
199 if (status != STATUS_XPU_UNLOCKED &&
200 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
201 dev_err(dev, "MBA returned unexpected status %d\n", status);
202 return -EINVAL;
203 }
204
205 return 0;
206}
207
208static int pil_mss_shutdown(struct pil_desc *pil)
209{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700210 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700211
212 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
213 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
214 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
215
216 /*
217 * If the shutdown function is called before the reset function, clocks
218 * and power will not be enabled yet. Enable them here so that register
219 * writes performed during the shutdown succeed.
220 */
221 if (drv->is_booted == false) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700222 pil_mss_power_up(drv);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700223 pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700224 }
225 pil_q6v5_shutdown(pil);
226
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700227 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700228
229 writel_relaxed(1, drv->restart_reg);
230
Patrick Daly11ca6af2013-03-03 17:07:28 -0800231 /*
232 * access to the cx_rail_bhs is restricted until after the gcc_mss
233 * reset is asserted once the PBL starts executing.
234 */
235 pil_mss_power_down(drv);
236
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700237 drv->is_booted = false;
238
239 return 0;
240}
241
242static int pil_mss_reset(struct pil_desc *pil)
243{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700244 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700245 struct platform_device *pdev = to_platform_device(pil->dev);
246 struct mba_data *mba = platform_get_drvdata(pdev);
Stephen Boyd3030c252012-08-08 17:24:05 -0700247 unsigned long start_addr = pil_get_entry_addr(pil);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700248 int ret;
249
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700250 /*
251 * Bring subsystem out of reset and enable required
252 * regulators and clocks.
253 */
Stephen Boyd3826cd42012-07-05 17:37:53 -0700254 ret = pil_mss_power_up(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700255 if (ret)
256 goto err_power;
257
Patrick Daly11ca6af2013-03-03 17:07:28 -0800258 /* Deassert reset to subsystem and wait for propagation */
259 writel_relaxed(0, drv->restart_reg);
260 mb();
261 udelay(2);
262
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700263 ret = pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700264 if (ret)
265 goto err_clks;
266
267 /* Program Image Address */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700268 if (mba->self_auth) {
Stephen Boyd3030c252012-08-08 17:24:05 -0700269 writel_relaxed(start_addr, mba->rmb_base + RMB_MBA_IMAGE);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700270 /* Ensure write to RMB base occurs before reset is released. */
271 mb();
272 } else {
Stephen Boyd3030c252012-08-08 17:24:05 -0700273 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700274 drv->reg_base + QDSP6SS_RST_EVB);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700275 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700276
277 ret = pil_q6v5_reset(pil);
278 if (ret)
279 goto err_q6v5_reset;
280
281 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700282 if (mba->self_auth) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700283 ret = wait_for_mba_ready(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700284 if (ret)
285 goto err_auth;
286 }
287
288 drv->is_booted = true;
289
290 return 0;
291
292err_auth:
293 pil_q6v5_shutdown(pil);
294err_q6v5_reset:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700295 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700296err_clks:
Stephen Boyd3826cd42012-07-05 17:37:53 -0700297 pil_mss_power_down(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700298err_power:
299 return ret;
300}
301
Matt Wagantall70315fb2012-12-03 16:33:28 -0800302static int pil_q6v5_mss_make_proxy_votes(struct pil_desc *pil)
303{
304 int ret;
305 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
306
307 ret = regulator_set_voltage(drv->vreg_mx, VDD_MSS_UV, MAX_VDD_MX_UV);
308 if (ret) {
309 dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
310 return ret;
311 }
312
313 ret = regulator_enable(drv->vreg_mx);
314 if (ret) {
315 dev_err(pil->dev, "Failed to enable vreg_mx\n");
316 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
317 return ret;
318 }
319
320 ret = pil_q6v5_make_proxy_votes(pil);
321 if (ret) {
322 regulator_disable(drv->vreg_mx);
323 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
324 }
325
326 return ret;
327}
328
329static void pil_q6v5_mss_remove_proxy_votes(struct pil_desc *pil)
330{
331 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
332 pil_q6v5_remove_proxy_votes(pil);
333 regulator_disable(drv->vreg_mx);
334 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
335}
336
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700337static struct pil_reset_ops pil_mss_ops = {
Matt Wagantall70315fb2012-12-03 16:33:28 -0800338 .proxy_vote = pil_q6v5_mss_make_proxy_votes,
339 .proxy_unvote = pil_q6v5_mss_remove_proxy_votes,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700340 .auth_and_reset = pil_mss_reset,
341 .shutdown = pil_mss_shutdown,
342};
343
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700344static int pil_mba_make_proxy_votes(struct pil_desc *pil)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700345{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700346 int ret;
347 struct mba_data *drv = dev_get_drvdata(pil->dev);
348
349 ret = clk_prepare_enable(drv->q6->xo);
350 if (ret) {
351 dev_err(pil->dev, "Failed to enable XO\n");
352 return ret;
353 }
354 return 0;
355}
356
357static void pil_mba_remove_proxy_votes(struct pil_desc *pil)
358{
359 struct mba_data *drv = dev_get_drvdata(pil->dev);
360 clk_disable_unprepare(drv->q6->xo);
361}
362
363static int pil_mba_init_image(struct pil_desc *pil,
364 const u8 *metadata, size_t size)
365{
366 struct mba_data *drv = dev_get_drvdata(pil->dev);
367 s32 status;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700368 int ret;
369
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700370 /* Copy metadata to assigned shared buffer location */
371 memcpy(drv->metadata_base, metadata, size);
372
373 /* Initialize length counter to 0 */
374 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
375 drv->img_length = 0;
376
377 /* Pass address of meta-data to the MBA and perform authentication */
378 writel_relaxed(drv->metadata_phys, drv->rmb_base + RMB_PMI_META_DATA);
379 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
380 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
381 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
382 POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
383 if (ret) {
384 dev_err(pil->dev, "MBA authentication of headers timed out\n");
385 } else if (status < 0) {
386 dev_err(pil->dev, "MBA returned error %d for headers\n",
387 status);
388 ret = -EINVAL;
389 }
390
391 return ret;
392}
393
394static int pil_mba_verify_blob(struct pil_desc *pil, u32 phy_addr,
395 size_t size)
396{
397 struct mba_data *drv = dev_get_drvdata(pil->dev);
398 s32 status;
399
400 /* Begin image authentication */
401 if (drv->img_length == 0) {
402 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
403 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
404 }
405 /* Increment length counter */
406 drv->img_length += size;
407 writel_relaxed(drv->img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
408
409 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
410 if (status < 0) {
411 dev_err(pil->dev, "MBA returned error %d\n", status);
412 return -EINVAL;
413 }
414
415 return 0;
416}
417
418static int pil_mba_auth(struct pil_desc *pil)
419{
420 struct mba_data *drv = dev_get_drvdata(pil->dev);
421 int ret;
422 s32 status;
423
424 /* Wait for all segments to be authenticated or an error to occur */
425 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
426 status == STATUS_AUTH_COMPLETE || status < 0,
427 50, modem_auth_timeout_ms * 1000);
428 if (ret) {
429 dev_err(pil->dev, "MBA authentication of image timed out\n");
430 } else if (status < 0) {
431 dev_err(pil->dev, "MBA returned error %d for image\n", status);
432 ret = -EINVAL;
433 }
434
435 return ret;
436}
437
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700438static struct pil_reset_ops pil_mba_ops = {
439 .init_image = pil_mba_init_image,
440 .proxy_vote = pil_mba_make_proxy_votes,
441 .proxy_unvote = pil_mba_remove_proxy_votes,
442 .verify_blob = pil_mba_verify_blob,
443 .auth_and_reset = pil_mba_auth,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700444};
445
446#define subsys_to_drv(d) container_of(d, struct mba_data, subsys_desc)
447
448static void log_modem_sfr(void)
449{
450 u32 size;
451 char *smem_reason, reason[MAX_SSR_REASON_LEN];
452
453 smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
454 if (!smem_reason || !size) {
455 pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
456 return;
457 }
458 if (!smem_reason[0]) {
459 pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
460 return;
461 }
462
463 strlcpy(reason, smem_reason, min(size, sizeof(reason)));
464 pr_err("modem subsystem failure reason: %s.\n", reason);
465
466 smem_reason[0] = '\0';
467 wmb();
468}
469
470static void restart_modem(struct mba_data *drv)
471{
472 log_modem_sfr();
473 drv->ignore_errors = true;
474 subsystem_restart_dev(drv->subsys);
475}
476
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800477static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700478{
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800479 struct mba_data *drv = dev_id;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700480
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800481 /* Ignore if we're the one that set the force stop GPIO */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700482 if (drv->crash_shutdown)
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800483 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700484
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800485 pr_err("Fatal error on the modem.\n");
486 restart_modem(drv);
487 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700488}
489
490static int modem_shutdown(const struct subsys_desc *subsys)
491{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700492 struct mba_data *drv = subsys_to_drv(subsys);
493
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800494 if (!drv->is_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800495 return 0;
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800496 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700497 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700498 return 0;
499}
500
501static int modem_powerup(const struct subsys_desc *subsys)
502{
503 struct mba_data *drv = subsys_to_drv(subsys);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700504 int ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800505
506 if (!drv->is_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800507 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700508 /*
509 * At this time, the modem is shutdown. Therefore this function cannot
510 * run concurrently with either the watchdog bite error handler or the
511 * SMSM callback, making it safe to unset the flag below.
512 */
513 drv->ignore_errors = false;
Stephen Boyde83a0a22012-06-29 13:51:27 -0700514 ret = pil_boot(&drv->q6->desc);
515 if (ret)
516 return ret;
517 ret = pil_boot(&drv->desc);
518 if (ret)
519 pil_shutdown(&drv->q6->desc);
520 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700521}
522
523static void modem_crash_shutdown(const struct subsys_desc *subsys)
524{
525 struct mba_data *drv = subsys_to_drv(subsys);
526 drv->crash_shutdown = true;
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800527 gpio_set_value(drv->force_stop_gpio, 1);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700528}
529
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700530static struct ramdump_segment smem_segments[] = {
531 {0x0FA00000, 0x0FC00000 - 0x0FA00000},
532};
533
534static int modem_ramdump(int enable, const struct subsys_desc *subsys)
535{
536 struct mba_data *drv = subsys_to_drv(subsys);
537 int ret;
538
539 if (!enable)
540 return 0;
541
Stephen Boyde83a0a22012-06-29 13:51:27 -0700542 ret = pil_boot(&drv->q6->desc);
543 if (ret)
544 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700545
Stephen Boyd05c45f22013-01-24 12:02:28 -0800546 ret = pil_do_ramdump(&drv->desc, drv->ramdump_dev);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700547 if (ret < 0) {
548 pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
549 goto out;
550 }
551
Stephen Boyd5eb17ce2012-11-29 15:34:21 -0800552 ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700553 ARRAY_SIZE(smem_segments));
554 if (ret < 0) {
555 pr_err("Unable to dump smem memory (rc = %d).\n", ret);
556 goto out;
557 }
558
559out:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700560 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700561 return ret;
562}
563
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700564static int adsp_state_notifier_fn(struct notifier_block *this,
565 unsigned long code, void *ss_handle)
566{
567 int ret;
568 ret = sysmon_send_event(SYSMON_SS_MODEM, "adsp", code);
569 if (ret < 0)
570 pr_err("%s: sysmon_send_event failed (%d).", __func__, ret);
571 return NOTIFY_DONE;
572}
573
574static struct notifier_block adsp_state_notifier_block = {
575 .notifier_call = adsp_state_notifier_fn,
576};
577
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700578static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
579{
580 struct mba_data *drv = dev_id;
581 if (drv->ignore_errors)
582 return IRQ_HANDLED;
583 pr_err("Watchdog bite received from modem software!\n");
584 restart_modem(drv);
585 return IRQ_HANDLED;
586}
587
588static int mss_start(const struct subsys_desc *desc)
589{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700590 int ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700591 struct mba_data *drv = subsys_to_drv(desc);
592
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800593 if (!drv->is_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800594 return 0;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800595
Stephen Boyde83a0a22012-06-29 13:51:27 -0700596 ret = pil_boot(&drv->q6->desc);
597 if (ret)
598 return ret;
599 ret = pil_boot(&drv->desc);
600 if (ret)
601 pil_shutdown(&drv->q6->desc);
602 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700603}
604
605static void mss_stop(const struct subsys_desc *desc)
606{
607 struct mba_data *drv = subsys_to_drv(desc);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800608
609 if (!drv->is_loadable)
610 return;
611
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800612 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700613 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700614}
615
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800616static int __devinit pil_subsys_init(struct mba_data *drv,
617 struct platform_device *pdev)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700618{
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800619 int irq, ret;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700620
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700621 irq = platform_get_irq(pdev, 0);
622 if (irq < 0)
623 return irq;
624
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800625 drv->subsys_desc.name = "modem";
626 drv->subsys_desc.dev = &pdev->dev;
627 drv->subsys_desc.owner = THIS_MODULE;
628 drv->subsys_desc.shutdown = modem_shutdown;
629 drv->subsys_desc.powerup = modem_powerup;
630 drv->subsys_desc.ramdump = modem_ramdump;
631 drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
632 drv->subsys_desc.start = mss_start;
633 drv->subsys_desc.stop = mss_stop;
634
635 drv->subsys = subsys_register(&drv->subsys_desc);
636 if (IS_ERR(drv->subsys)) {
637 ret = PTR_ERR(drv->subsys);
638 goto err_subsys;
639 }
640
641 drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
642 if (!drv->ramdump_dev) {
643 pr_err("%s: Unable to create a modem ramdump device.\n",
644 __func__);
645 ret = -ENOMEM;
646 goto err_ramdump;
647 }
648
649 drv->smem_ramdump_dev = create_ramdump_device("smem-modem", &pdev->dev);
650 if (!drv->smem_ramdump_dev) {
651 pr_err("%s: Unable to create an smem ramdump device.\n",
652 __func__);
653 ret = -ENOMEM;
654 goto err_ramdump_smem;
655 }
656
657 ret = devm_request_irq(&pdev->dev, irq, modem_wdog_bite_irq,
658 IRQF_TRIGGER_RISING, "modem_wdog", drv);
659 if (ret < 0) {
660 dev_err(&pdev->dev, "Unable to request watchdog IRQ.\n");
661 goto err_irq;
662 }
663
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800664 ret = devm_request_irq(&pdev->dev, drv->err_fatal_irq,
665 modem_err_fatal_intr_handler,
666 IRQF_TRIGGER_RISING, "pil-mss", drv);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800667 if (ret < 0) {
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800668 dev_err(&pdev->dev, "Unable to register SMP2P err fatal handler!\n");
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800669 goto err_irq;
670 }
671
672 drv->adsp_state_notifier = subsys_notif_register_notifier("adsp",
673 &adsp_state_notifier_block);
674 if (IS_ERR(drv->adsp_state_notifier)) {
675 ret = PTR_ERR(drv->adsp_state_notifier);
676 dev_err(&pdev->dev, "%s: Registration with the SSR notification driver failed (%d)",
677 __func__, ret);
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800678 goto err_irq;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800679 }
680
681 return 0;
682
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800683err_irq:
684 destroy_ramdump_device(drv->smem_ramdump_dev);
685err_ramdump_smem:
686 destroy_ramdump_device(drv->ramdump_dev);
687err_ramdump:
688 subsys_unregister(drv->subsys);
689err_subsys:
690 return ret;
691}
692
693static int __devinit pil_mss_loadable_init(struct mba_data *drv,
694 struct platform_device *pdev)
695{
696 struct q6v5_data *q6;
697 struct pil_desc *q6_desc, *mba_desc;
698 struct resource *res;
699 int ret;
700
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700701 q6 = pil_q6v5_init(pdev);
702 if (IS_ERR(q6))
703 return PTR_ERR(q6);
704 drv->q6 = q6;
705
706 q6_desc = &q6->desc;
707 q6_desc->ops = &pil_mss_ops;
708 q6_desc->owner = THIS_MODULE;
709 q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700710
Patrick Dalyb830a3f2013-03-11 14:21:34 -0700711 drv->self_auth = of_property_read_bool(pdev->dev.of_node,
712 "qcom,pil-self-auth");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700713 if (drv->self_auth) {
Matt Wagantall1f168152012-09-25 13:26:47 -0700714 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
715 "rmb_base");
Stephen Boydf8f89282012-07-16 18:05:48 -0700716 drv->rmb_base = devm_request_and_ioremap(&pdev->dev, res);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700717 if (!drv->rmb_base)
718 return -ENOMEM;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700719 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
720 "metadata_base");
721 if (res) {
722 drv->metadata_base = devm_ioremap(&pdev->dev,
723 res->start, resource_size(res));
724 if (!drv->metadata_base)
725 return -ENOMEM;
726 drv->metadata_phys = res->start;
727 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700728 }
729
Matt Wagantall1f168152012-09-25 13:26:47 -0700730 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
Stephen Boydf8f89282012-07-16 18:05:48 -0700731 q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700732 if (!q6->restart_reg)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700733 return -ENOMEM;
734
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700735 q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
736 if (IS_ERR(q6->vreg))
737 return PTR_ERR(q6->vreg);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700738
Matt Wagantall70315fb2012-12-03 16:33:28 -0800739 q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
740 if (IS_ERR(q6->vreg_mx))
741 return PTR_ERR(q6->vreg_mx);
742
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -0700743 ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV, MAX_VDD_MSS_UV);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700744 if (ret)
745 dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
746
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700747 ret = regulator_set_optimum_mode(q6->vreg, 100000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700748 if (ret < 0) {
749 dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
750 return ret;
751 }
752
Patrick Daly11ca6af2013-03-03 17:07:28 -0800753 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
754 "cxrail_bhs_reg");
755 if (res)
756 q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
757 resource_size(res));
758
759
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700760 q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
761 if (IS_ERR(q6->ahb_clk))
762 return PTR_ERR(q6->ahb_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700763
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700764 q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
765 if (IS_ERR(q6->axi_clk))
766 return PTR_ERR(q6->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700767
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700768 q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
769 if (IS_ERR(q6->rom_clk))
770 return PTR_ERR(q6->rom_clk);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700771
Stephen Boyde83a0a22012-06-29 13:51:27 -0700772 ret = pil_desc_init(q6_desc);
773 if (ret)
774 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700775
776 mba_desc = &drv->desc;
777 mba_desc->name = "modem";
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700778 mba_desc->dev = &pdev->dev;
779 mba_desc->ops = &pil_mba_ops;
780 mba_desc->owner = THIS_MODULE;
781 mba_desc->proxy_timeout = PROXY_TIMEOUT_MS;
782
Stephen Boyde83a0a22012-06-29 13:51:27 -0700783 ret = pil_desc_init(mba_desc);
784 if (ret)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700785 goto err_mba_desc;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700786
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700787 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700788
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700789err_mba_desc:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700790 pil_desc_release(q6_desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700791 return ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800792
793}
794
795static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
796{
797 struct mba_data *drv;
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800798 int ret, err_fatal_gpio;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800799
800 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
801 if (!drv)
802 return -ENOMEM;
803 platform_set_drvdata(pdev, drv);
804
Vikram Mulukutla2d4f0862012-11-16 11:57:34 -0800805 drv->is_loadable = of_property_read_bool(pdev->dev.of_node,
806 "qcom,is-loadable");
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800807 if (drv->is_loadable) {
808 ret = pil_mss_loadable_init(drv, pdev);
809 if (ret)
810 return ret;
811 }
812
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800813 /* Get the IRQ from the GPIO for registering inbound handler */
814 err_fatal_gpio = of_get_named_gpio(pdev->dev.of_node,
815 "qcom,gpio-err-fatal", 0);
816 if (err_fatal_gpio < 0)
817 return err_fatal_gpio;
818
819 drv->err_fatal_irq = gpio_to_irq(err_fatal_gpio);
820 if (drv->err_fatal_irq < 0)
821 return drv->err_fatal_irq;
822
823 /* Get the GPIO pin for writing the outbound bits: add more as needed */
824 drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
825 "qcom,gpio-force-stop", 0);
826 if (drv->force_stop_gpio < 0)
827 return drv->force_stop_gpio;
828
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800829 return pil_subsys_init(drv, pdev);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700830}
831
832static int __devexit pil_mss_driver_exit(struct platform_device *pdev)
833{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700834 struct mba_data *drv = platform_get_drvdata(pdev);
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700835
836 subsys_notif_unregister_notifier(drv->adsp_state_notifier,
837 &adsp_state_notifier_block);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700838 subsys_unregister(drv->subsys);
839 destroy_ramdump_device(drv->smem_ramdump_dev);
840 destroy_ramdump_device(drv->ramdump_dev);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700841 pil_desc_release(&drv->desc);
842 pil_desc_release(&drv->q6->desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700843 return 0;
844}
845
846static struct of_device_id mss_match_table[] = {
847 { .compatible = "qcom,pil-q6v5-mss" },
848 {}
849};
850
851static struct platform_driver pil_mss_driver = {
852 .probe = pil_mss_driver_probe,
853 .remove = __devexit_p(pil_mss_driver_exit),
854 .driver = {
855 .name = "pil-q6v5-mss",
856 .of_match_table = mss_match_table,
857 .owner = THIS_MODULE,
858 },
859};
860
861static int __init pil_mss_init(void)
862{
863 return platform_driver_register(&pil_mss_driver);
864}
865module_init(pil_mss_init);
866
867static void __exit pil_mss_exit(void)
868{
869 platform_driver_unregister(&pil_mss_driver);
870}
871module_exit(pil_mss_exit);
872
873MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
874MODULE_LICENSE("GPL v2");