blob: b033bf434a2e626c38048dc8e5a2c25e04557c07 [file] [log] [blame]
Matt Wagantall4e2599e2012-03-21 22:31:35 -07001/*
Matt Wagantallf5c44bd2012-10-29 12:20:07 -07002 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
Matt Wagantall4e2599e2012-03-21 22:31:35 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070020#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/of.h>
25#include <linux/regulator/consumer.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070026#include <linux/interrupt.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070027
Stephen Boyd3da4fd02012-07-06 10:00:12 -070028#include <mach/subsystem_restart.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070029#include <mach/clk.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070030#include <mach/msm_smsm.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070031
32#include "peripheral-loader.h"
33#include "pil-q6v5.h"
Stephen Boyd3da4fd02012-07-06 10:00:12 -070034#include "ramdump.h"
Matt Wagantall4e2599e2012-03-21 22:31:35 -070035
36/* Q6 Register Offsets */
37#define QDSP6SS_RST_EVB 0x010
38
39/* AXI Halting Registers */
40#define MSS_Q6_HALT_BASE 0x180
41#define MSS_MODEM_HALT_BASE 0x200
42#define MSS_NC_HALT_BASE 0x280
43
44/* RMB Status Register Values */
45#define STATUS_PBL_SUCCESS 0x1
46#define STATUS_XPU_UNLOCKED 0x1
47#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
48
49/* PBL/MBA interface registers */
50#define RMB_MBA_IMAGE 0x00
51#define RMB_PBL_STATUS 0x04
Stephen Boyd3da4fd02012-07-06 10:00:12 -070052#define RMB_MBA_COMMAND 0x08
Matt Wagantall4e2599e2012-03-21 22:31:35 -070053#define RMB_MBA_STATUS 0x0C
Stephen Boyd3da4fd02012-07-06 10:00:12 -070054#define RMB_PMI_META_DATA 0x10
55#define RMB_PMI_CODE_START 0x14
56#define RMB_PMI_CODE_LENGTH 0x18
Matt Wagantall4e2599e2012-03-21 22:31:35 -070057
Matt Wagantall4e2599e2012-03-21 22:31:35 -070058#define PROXY_TIMEOUT_MS 10000
59#define POLL_INTERVAL_US 50
60
Stephen Boyd3da4fd02012-07-06 10:00:12 -070061#define CMD_META_DATA_READY 0x1
62#define CMD_LOAD_READY 0x2
63
64#define STATUS_META_DATA_AUTH_SUCCESS 0x3
65#define STATUS_AUTH_COMPLETE 0x4
66
67#define MAX_SSR_REASON_LEN 81U
68
69struct mba_data {
70 void __iomem *metadata_base;
71 void __iomem *rmb_base;
72 void __iomem *io_clamp_reg;
73 unsigned long metadata_phys;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070074 struct pil_desc desc;
75 struct subsys_device *subsys;
76 struct subsys_desc subsys_desc;
77 u32 img_length;
78 struct q6v5_data *q6;
79 int self_auth;
80 void *ramdump_dev;
81 void *smem_ramdump_dev;
82 bool crash_shutdown;
83 bool ignore_errors;
84};
85
Matt Wagantalld251d8e2012-08-16 18:53:53 -070086static int pbl_mba_boot_timeout_ms = 100;
87module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
88
Stephen Boyd3da4fd02012-07-06 10:00:12 -070089static int modem_auth_timeout_ms = 10000;
90module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
91
Stephen Boyd3826cd42012-07-05 17:37:53 -070092static int pil_mss_power_up(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -070093{
94 int ret;
Stephen Boyd3826cd42012-07-05 17:37:53 -070095 struct device *dev = drv->desc.dev;
Matt Wagantall4e2599e2012-03-21 22:31:35 -070096
97 ret = regulator_enable(drv->vreg);
98 if (ret)
99 dev_err(dev, "Failed to enable regulator.\n");
100
101 return ret;
102}
103
Stephen Boyd3826cd42012-07-05 17:37:53 -0700104static int pil_mss_power_down(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700105{
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700106 return regulator_disable(drv->vreg);
107}
108
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700109static int pil_mss_enable_clks(struct q6v5_data *drv)
110{
111 int ret;
112
113 ret = clk_prepare_enable(drv->ahb_clk);
114 if (ret)
115 goto err_ahb_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700116 ret = clk_prepare_enable(drv->axi_clk);
117 if (ret)
118 goto err_axi_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700119 ret = clk_prepare_enable(drv->rom_clk);
120 if (ret)
121 goto err_rom_clk;
122
123 return 0;
124
125err_rom_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700126 clk_disable_unprepare(drv->axi_clk);
127err_axi_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700128 clk_disable_unprepare(drv->ahb_clk);
129err_ahb_clk:
130 return ret;
131}
132
133static void pil_mss_disable_clks(struct q6v5_data *drv)
134{
135 clk_disable_unprepare(drv->rom_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700136 clk_disable_unprepare(drv->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700137 clk_disable_unprepare(drv->ahb_clk);
138}
139
Stephen Boyd3826cd42012-07-05 17:37:53 -0700140static int wait_for_mba_ready(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700141{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700142 struct device *dev = drv->desc.dev;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700143 struct mba_data *mba = platform_get_drvdata(to_platform_device(dev));
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700144 int ret;
145 u32 status;
146
147 /* Wait for PBL completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700148 ret = readl_poll_timeout(mba->rmb_base + RMB_PBL_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700149 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700150 if (ret) {
151 dev_err(dev, "PBL boot timed out\n");
152 return ret;
153 }
154 if (status != STATUS_PBL_SUCCESS) {
155 dev_err(dev, "PBL returned unexpected status %d\n", status);
156 return -EINVAL;
157 }
158
159 /* Wait for MBA completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700160 ret = readl_poll_timeout(mba->rmb_base + RMB_MBA_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700161 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700162 if (ret) {
163 dev_err(dev, "MBA boot timed out\n");
164 return ret;
165 }
166 if (status != STATUS_XPU_UNLOCKED &&
167 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
168 dev_err(dev, "MBA returned unexpected status %d\n", status);
169 return -EINVAL;
170 }
171
172 return 0;
173}
174
175static int pil_mss_shutdown(struct pil_desc *pil)
176{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700177 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700178
179 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
180 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
181 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
182
183 /*
184 * If the shutdown function is called before the reset function, clocks
185 * and power will not be enabled yet. Enable them here so that register
186 * writes performed during the shutdown succeed.
187 */
188 if (drv->is_booted == false) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700189 pil_mss_power_up(drv);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700190 pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700191 }
192 pil_q6v5_shutdown(pil);
193
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700194 pil_mss_disable_clks(drv);
Stephen Boyd3826cd42012-07-05 17:37:53 -0700195 pil_mss_power_down(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700196
197 writel_relaxed(1, drv->restart_reg);
198
199 drv->is_booted = false;
200
201 return 0;
202}
203
204static int pil_mss_reset(struct pil_desc *pil)
205{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700206 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700207 struct platform_device *pdev = to_platform_device(pil->dev);
208 struct mba_data *mba = platform_get_drvdata(pdev);
Stephen Boyd3030c252012-08-08 17:24:05 -0700209 unsigned long start_addr = pil_get_entry_addr(pil);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700210 int ret;
211
Matt Wagantall33c2ec72012-07-26 20:26:57 -0700212 /* Deassert reset to subsystem and wait for propagation */
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700213 writel_relaxed(0, drv->restart_reg);
214 mb();
Matt Wagantall33c2ec72012-07-26 20:26:57 -0700215 udelay(2);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700216
217 /*
218 * Bring subsystem out of reset and enable required
219 * regulators and clocks.
220 */
Stephen Boyd3826cd42012-07-05 17:37:53 -0700221 ret = pil_mss_power_up(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700222 if (ret)
223 goto err_power;
224
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700225 ret = pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700226 if (ret)
227 goto err_clks;
228
229 /* Program Image Address */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700230 if (mba->self_auth) {
Stephen Boyd3030c252012-08-08 17:24:05 -0700231 writel_relaxed(start_addr, mba->rmb_base + RMB_MBA_IMAGE);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700232 /* Ensure write to RMB base occurs before reset is released. */
233 mb();
234 } else {
Stephen Boyd3030c252012-08-08 17:24:05 -0700235 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700236 drv->reg_base + QDSP6SS_RST_EVB);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700237 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700238
239 ret = pil_q6v5_reset(pil);
240 if (ret)
241 goto err_q6v5_reset;
242
243 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700244 if (mba->self_auth) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700245 ret = wait_for_mba_ready(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700246 if (ret)
247 goto err_auth;
248 }
249
250 drv->is_booted = true;
251
252 return 0;
253
254err_auth:
255 pil_q6v5_shutdown(pil);
256err_q6v5_reset:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700257 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700258err_clks:
Stephen Boyd3826cd42012-07-05 17:37:53 -0700259 pil_mss_power_down(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700260err_power:
261 return ret;
262}
263
264static struct pil_reset_ops pil_mss_ops = {
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700265 .proxy_vote = pil_q6v5_make_proxy_votes,
266 .proxy_unvote = pil_q6v5_remove_proxy_votes,
267 .auth_and_reset = pil_mss_reset,
268 .shutdown = pil_mss_shutdown,
269};
270
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700271static int pil_mba_make_proxy_votes(struct pil_desc *pil)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700272{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700273 int ret;
274 struct mba_data *drv = dev_get_drvdata(pil->dev);
275
276 ret = clk_prepare_enable(drv->q6->xo);
277 if (ret) {
278 dev_err(pil->dev, "Failed to enable XO\n");
279 return ret;
280 }
281 return 0;
282}
283
284static void pil_mba_remove_proxy_votes(struct pil_desc *pil)
285{
286 struct mba_data *drv = dev_get_drvdata(pil->dev);
287 clk_disable_unprepare(drv->q6->xo);
288}
289
290static int pil_mba_init_image(struct pil_desc *pil,
291 const u8 *metadata, size_t size)
292{
293 struct mba_data *drv = dev_get_drvdata(pil->dev);
294 s32 status;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700295 int ret;
296
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700297 /* Copy metadata to assigned shared buffer location */
298 memcpy(drv->metadata_base, metadata, size);
299
300 /* Initialize length counter to 0 */
301 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
302 drv->img_length = 0;
303
304 /* Pass address of meta-data to the MBA and perform authentication */
305 writel_relaxed(drv->metadata_phys, drv->rmb_base + RMB_PMI_META_DATA);
306 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
307 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
308 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
309 POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
310 if (ret) {
311 dev_err(pil->dev, "MBA authentication of headers timed out\n");
312 } else if (status < 0) {
313 dev_err(pil->dev, "MBA returned error %d for headers\n",
314 status);
315 ret = -EINVAL;
316 }
317
318 return ret;
319}
320
321static int pil_mba_verify_blob(struct pil_desc *pil, u32 phy_addr,
322 size_t size)
323{
324 struct mba_data *drv = dev_get_drvdata(pil->dev);
325 s32 status;
326
327 /* Begin image authentication */
328 if (drv->img_length == 0) {
329 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
330 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
331 }
332 /* Increment length counter */
333 drv->img_length += size;
334 writel_relaxed(drv->img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
335
336 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
337 if (status < 0) {
338 dev_err(pil->dev, "MBA returned error %d\n", status);
339 return -EINVAL;
340 }
341
342 return 0;
343}
344
345static int pil_mba_auth(struct pil_desc *pil)
346{
347 struct mba_data *drv = dev_get_drvdata(pil->dev);
348 int ret;
349 s32 status;
350
351 /* Wait for all segments to be authenticated or an error to occur */
352 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
353 status == STATUS_AUTH_COMPLETE || status < 0,
354 50, modem_auth_timeout_ms * 1000);
355 if (ret) {
356 dev_err(pil->dev, "MBA authentication of image timed out\n");
357 } else if (status < 0) {
358 dev_err(pil->dev, "MBA returned error %d for image\n", status);
359 ret = -EINVAL;
360 }
361
362 return ret;
363}
364
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700365static struct pil_reset_ops pil_mba_ops = {
366 .init_image = pil_mba_init_image,
367 .proxy_vote = pil_mba_make_proxy_votes,
368 .proxy_unvote = pil_mba_remove_proxy_votes,
369 .verify_blob = pil_mba_verify_blob,
370 .auth_and_reset = pil_mba_auth,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700371};
372
373#define subsys_to_drv(d) container_of(d, struct mba_data, subsys_desc)
374
375static void log_modem_sfr(void)
376{
377 u32 size;
378 char *smem_reason, reason[MAX_SSR_REASON_LEN];
379
380 smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
381 if (!smem_reason || !size) {
382 pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
383 return;
384 }
385 if (!smem_reason[0]) {
386 pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
387 return;
388 }
389
390 strlcpy(reason, smem_reason, min(size, sizeof(reason)));
391 pr_err("modem subsystem failure reason: %s.\n", reason);
392
393 smem_reason[0] = '\0';
394 wmb();
395}
396
397static void restart_modem(struct mba_data *drv)
398{
399 log_modem_sfr();
400 drv->ignore_errors = true;
401 subsystem_restart_dev(drv->subsys);
402}
403
404static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
405{
406 struct mba_data *drv = data;
407
408 /* Ignore if we're the one that set SMSM_RESET */
409 if (drv->crash_shutdown)
410 return;
411
412 if (new_state & SMSM_RESET) {
413 pr_err("Probable fatal error on the modem.\n");
414 restart_modem(drv);
415 }
416}
417
418static int modem_shutdown(const struct subsys_desc *subsys)
419{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700420 struct mba_data *drv = subsys_to_drv(subsys);
421
422 /* MBA doesn't support shutdown */
423 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700424 return 0;
425}
426
427static int modem_powerup(const struct subsys_desc *subsys)
428{
429 struct mba_data *drv = subsys_to_drv(subsys);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700430 int ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700431 /*
432 * At this time, the modem is shutdown. Therefore this function cannot
433 * run concurrently with either the watchdog bite error handler or the
434 * SMSM callback, making it safe to unset the flag below.
435 */
436 drv->ignore_errors = false;
Stephen Boyde83a0a22012-06-29 13:51:27 -0700437 ret = pil_boot(&drv->q6->desc);
438 if (ret)
439 return ret;
440 ret = pil_boot(&drv->desc);
441 if (ret)
442 pil_shutdown(&drv->q6->desc);
443 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700444}
445
446static void modem_crash_shutdown(const struct subsys_desc *subsys)
447{
448 struct mba_data *drv = subsys_to_drv(subsys);
449 drv->crash_shutdown = true;
450 smsm_reset_modem(SMSM_RESET);
451}
452
453static struct ramdump_segment modem_segments[] = {
454 {0x08400000, 0x0D100000 - 0x08400000},
455};
456
457static struct ramdump_segment smem_segments[] = {
458 {0x0FA00000, 0x0FC00000 - 0x0FA00000},
459};
460
461static int modem_ramdump(int enable, const struct subsys_desc *subsys)
462{
463 struct mba_data *drv = subsys_to_drv(subsys);
464 int ret;
465
466 if (!enable)
467 return 0;
468
Stephen Boyde83a0a22012-06-29 13:51:27 -0700469 ret = pil_boot(&drv->q6->desc);
470 if (ret)
471 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700472
473 ret = do_ramdump(drv->ramdump_dev, modem_segments,
474 ARRAY_SIZE(modem_segments));
475 if (ret < 0) {
476 pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
477 goto out;
478 }
479
480 ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
481 ARRAY_SIZE(smem_segments));
482 if (ret < 0) {
483 pr_err("Unable to dump smem memory (rc = %d).\n", ret);
484 goto out;
485 }
486
487out:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700488 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700489 return ret;
490}
491
492static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
493{
494 struct mba_data *drv = dev_id;
495 if (drv->ignore_errors)
496 return IRQ_HANDLED;
497 pr_err("Watchdog bite received from modem software!\n");
498 restart_modem(drv);
499 return IRQ_HANDLED;
500}
501
502static int mss_start(const struct subsys_desc *desc)
503{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700504 int ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700505 struct mba_data *drv = subsys_to_drv(desc);
506
Stephen Boyde83a0a22012-06-29 13:51:27 -0700507 ret = pil_boot(&drv->q6->desc);
508 if (ret)
509 return ret;
510 ret = pil_boot(&drv->desc);
511 if (ret)
512 pil_shutdown(&drv->q6->desc);
513 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700514}
515
516static void mss_stop(const struct subsys_desc *desc)
517{
518 struct mba_data *drv = subsys_to_drv(desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700519 /* MBA doesn't support shutdown */
520 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700521}
522
523static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
524{
525 struct mba_data *drv;
526 struct q6v5_data *q6;
527 struct pil_desc *q6_desc, *mba_desc;
528 struct resource *res;
529 int ret, irq;
530
531 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
532 if (!drv)
533 return -ENOMEM;
Stephen Boyd3826cd42012-07-05 17:37:53 -0700534 platform_set_drvdata(pdev, drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700535
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700536 irq = platform_get_irq(pdev, 0);
537 if (irq < 0)
538 return irq;
539
540 q6 = pil_q6v5_init(pdev);
541 if (IS_ERR(q6))
542 return PTR_ERR(q6);
543 drv->q6 = q6;
544
545 q6_desc = &q6->desc;
546 q6_desc->ops = &pil_mss_ops;
547 q6_desc->owner = THIS_MODULE;
548 q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700549
550 of_property_read_u32(pdev->dev.of_node, "qcom,pil-self-auth",
551 &drv->self_auth);
552 if (drv->self_auth) {
Matt Wagantall1f168152012-09-25 13:26:47 -0700553 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
554 "rmb_base");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700555 drv->rmb_base = devm_ioremap(&pdev->dev, res->start,
556 resource_size(res));
557 if (!drv->rmb_base)
558 return -ENOMEM;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700559 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
560 "metadata_base");
561 if (res) {
562 drv->metadata_base = devm_ioremap(&pdev->dev,
563 res->start, resource_size(res));
564 if (!drv->metadata_base)
565 return -ENOMEM;
566 drv->metadata_phys = res->start;
567 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700568 }
569
Matt Wagantall1f168152012-09-25 13:26:47 -0700570 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700571 q6->restart_reg = devm_ioremap(&pdev->dev, res->start,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700572 resource_size(res));
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700573 if (!q6->restart_reg)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700574 return -ENOMEM;
575
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700576 q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
577 if (IS_ERR(q6->vreg))
578 return PTR_ERR(q6->vreg);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700579
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700580 ret = regulator_set_voltage(q6->vreg, 1050000, 1050000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700581 if (ret)
582 dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
583
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700584 ret = regulator_set_optimum_mode(q6->vreg, 100000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700585 if (ret < 0) {
586 dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
587 return ret;
588 }
589
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700590 q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
591 if (IS_ERR(q6->ahb_clk))
592 return PTR_ERR(q6->ahb_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700593
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700594 q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
595 if (IS_ERR(q6->axi_clk))
596 return PTR_ERR(q6->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700597
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700598 q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
599 if (IS_ERR(q6->rom_clk))
600 return PTR_ERR(q6->rom_clk);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700601
Stephen Boyde83a0a22012-06-29 13:51:27 -0700602 ret = pil_desc_init(q6_desc);
603 if (ret)
604 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700605
606 mba_desc = &drv->desc;
607 mba_desc->name = "modem";
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700608 mba_desc->dev = &pdev->dev;
609 mba_desc->ops = &pil_mba_ops;
610 mba_desc->owner = THIS_MODULE;
611 mba_desc->proxy_timeout = PROXY_TIMEOUT_MS;
612
Stephen Boyde83a0a22012-06-29 13:51:27 -0700613 ret = pil_desc_init(mba_desc);
614 if (ret)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700615 goto err_mba_desc;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700616
617 drv->subsys_desc.name = "modem";
618 drv->subsys_desc.dev = &pdev->dev;
619 drv->subsys_desc.owner = THIS_MODULE;
620 drv->subsys_desc.shutdown = modem_shutdown;
621 drv->subsys_desc.powerup = modem_powerup;
622 drv->subsys_desc.ramdump = modem_ramdump;
623 drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
624 drv->subsys_desc.start = mss_start;
625 drv->subsys_desc.stop = mss_stop;
626
Stephen Boydc1a72612012-07-05 14:07:35 -0700627 drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700628 if (!drv->ramdump_dev) {
629 pr_err("%s: Unable to create a modem ramdump device.\n",
630 __func__);
631 ret = -ENOMEM;
632 goto err_ramdump;
633 }
634
Stephen Boydc1a72612012-07-05 14:07:35 -0700635 drv->smem_ramdump_dev = create_ramdump_device("smem-modem", &pdev->dev);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700636 if (!drv->smem_ramdump_dev) {
637 pr_err("%s: Unable to create an smem ramdump device.\n",
638 __func__);
639 ret = -ENOMEM;
640 goto err_ramdump_smem;
641 }
642
643 drv->subsys = subsys_register(&drv->subsys_desc);
644 if (IS_ERR(drv->subsys)) {
645 ret = PTR_ERR(drv->subsys);
646 goto err_subsys;
647 }
648
649 ret = devm_request_irq(&pdev->dev, irq, modem_wdog_bite_irq,
650 IRQF_TRIGGER_RISING, "modem_wdog", drv);
651 if (ret < 0) {
652 dev_err(&pdev->dev, "Unable to request watchdog IRQ.\n");
653 goto err_irq;
654 }
655
656 ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET,
657 smsm_state_cb, drv);
658 if (ret < 0) {
659 dev_err(&pdev->dev, "Unable to register SMSM callback!\n");
660 goto err_irq;
661 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700662
663 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700664
665err_irq:
666 subsys_unregister(drv->subsys);
667err_subsys:
668 destroy_ramdump_device(drv->smem_ramdump_dev);
669err_ramdump_smem:
670 destroy_ramdump_device(drv->ramdump_dev);
671err_ramdump:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700672 pil_desc_release(mba_desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700673err_mba_desc:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700674 pil_desc_release(q6_desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700675 return ret;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700676}
677
678static int __devexit pil_mss_driver_exit(struct platform_device *pdev)
679{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700680 struct mba_data *drv = platform_get_drvdata(pdev);
681 smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_RESET,
682 smsm_state_cb, drv);
683 subsys_unregister(drv->subsys);
684 destroy_ramdump_device(drv->smem_ramdump_dev);
685 destroy_ramdump_device(drv->ramdump_dev);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700686 pil_desc_release(&drv->desc);
687 pil_desc_release(&drv->q6->desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700688 return 0;
689}
690
691static struct of_device_id mss_match_table[] = {
692 { .compatible = "qcom,pil-q6v5-mss" },
693 {}
694};
695
696static struct platform_driver pil_mss_driver = {
697 .probe = pil_mss_driver_probe,
698 .remove = __devexit_p(pil_mss_driver_exit),
699 .driver = {
700 .name = "pil-q6v5-mss",
701 .of_match_table = mss_match_table,
702 .owner = THIS_MODULE,
703 },
704};
705
706static int __init pil_mss_init(void)
707{
708 return platform_driver_register(&pil_mss_driver);
709}
710module_init(pil_mss_init);
711
712static void __exit pil_mss_exit(void)
713{
714 platform_driver_unregister(&pil_mss_driver);
715}
716module_exit(pil_mss_exit);
717
718MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
719MODULE_LICENSE("GPL v2");