blob: 979458ed927e80e515e66b1c2ef1a266e58a5e80 [file] [log] [blame]
Matt Wagantall4e2599e2012-03-21 22:31:35 -07001/*
Matt Wagantalla72d03d2013-02-26 21:13:14 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantall4e2599e2012-03-21 22:31:35 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070020#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/of.h>
25#include <linux/regulator/consumer.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070026#include <linux/interrupt.h>
Seemanta Dutta6e58f542013-03-04 19:28:16 -080027#include <linux/of_gpio.h>
Matt Wagantall724b2bb2013-03-18 14:54:06 -070028#include <linux/dma-mapping.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070029
Stephen Boyd3da4fd02012-07-06 10:00:12 -070030#include <mach/subsystem_restart.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070031#include <mach/clk.h>
Stephen Boyd3da4fd02012-07-06 10:00:12 -070032#include <mach/msm_smsm.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070033#include <mach/ramdump.h>
Matt Wagantall4e2599e2012-03-21 22:31:35 -070034
35#include "peripheral-loader.h"
36#include "pil-q6v5.h"
Vikram Mulukutla896d0582012-10-17 16:57:46 -070037#include "sysmon.h"
Matt Wagantall4e2599e2012-03-21 22:31:35 -070038
39/* Q6 Register Offsets */
40#define QDSP6SS_RST_EVB 0x010
41
42/* AXI Halting Registers */
43#define MSS_Q6_HALT_BASE 0x180
44#define MSS_MODEM_HALT_BASE 0x200
45#define MSS_NC_HALT_BASE 0x280
46
47/* RMB Status Register Values */
48#define STATUS_PBL_SUCCESS 0x1
49#define STATUS_XPU_UNLOCKED 0x1
50#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
51
52/* PBL/MBA interface registers */
53#define RMB_MBA_IMAGE 0x00
54#define RMB_PBL_STATUS 0x04
Stephen Boyd3da4fd02012-07-06 10:00:12 -070055#define RMB_MBA_COMMAND 0x08
Matt Wagantall4e2599e2012-03-21 22:31:35 -070056#define RMB_MBA_STATUS 0x0C
Stephen Boyd3da4fd02012-07-06 10:00:12 -070057#define RMB_PMI_META_DATA 0x10
58#define RMB_PMI_CODE_START 0x14
59#define RMB_PMI_CODE_LENGTH 0x18
Matt Wagantall4e2599e2012-03-21 22:31:35 -070060
Matt Wagantall70315fb2012-12-03 16:33:28 -080061#define VDD_MSS_UV 1050000
Vikram Mulukutlac06e4de2013-03-13 15:58:28 -070062#define MAX_VDD_MSS_UV 1150000
Patrick Daly068ea8e2013-03-04 19:52:40 -080063#define MAX_VDD_MX_UV 1150000
Matt Wagantall70315fb2012-12-03 16:33:28 -080064
Matt Wagantall4e2599e2012-03-21 22:31:35 -070065#define PROXY_TIMEOUT_MS 10000
66#define POLL_INTERVAL_US 50
67
Stephen Boyd3da4fd02012-07-06 10:00:12 -070068#define CMD_META_DATA_READY 0x1
69#define CMD_LOAD_READY 0x2
70
71#define STATUS_META_DATA_AUTH_SUCCESS 0x3
72#define STATUS_AUTH_COMPLETE 0x4
73
74#define MAX_SSR_REASON_LEN 81U
75
Patrick Daly11ca6af2013-03-03 17:07:28 -080076/* External BHS */
77#define EXTERNAL_BHS_ON BIT(0)
78#define EXTERNAL_BHS_STATUS BIT(4)
79#define BHS_TIMEOUT_US 50
80
Stephen Boyd3da4fd02012-07-06 10:00:12 -070081struct mba_data {
Stephen Boyd3da4fd02012-07-06 10:00:12 -070082 void __iomem *rmb_base;
83 void __iomem *io_clamp_reg;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070084 struct pil_desc desc;
85 struct subsys_device *subsys;
86 struct subsys_desc subsys_desc;
Vikram Mulukutla896d0582012-10-17 16:57:46 -070087 void *adsp_state_notifier;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070088 u32 img_length;
89 struct q6v5_data *q6;
Patrick Dalyb830a3f2013-03-11 14:21:34 -070090 bool self_auth;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070091 void *ramdump_dev;
92 void *smem_ramdump_dev;
93 bool crash_shutdown;
94 bool ignore_errors;
Seemanta Dutta6e58f542013-03-04 19:28:16 -080095 int err_fatal_irq;
96 int force_stop_gpio;
Stephen Boyd3da4fd02012-07-06 10:00:12 -070097};
98
Matt Wagantalld251d8e2012-08-16 18:53:53 -070099static int pbl_mba_boot_timeout_ms = 100;
100module_param(pbl_mba_boot_timeout_ms, int, S_IRUGO | S_IWUSR);
101
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700102static int modem_auth_timeout_ms = 10000;
103module_param(modem_auth_timeout_ms, int, S_IRUGO | S_IWUSR);
104
Stephen Boyd3826cd42012-07-05 17:37:53 -0700105static int pil_mss_power_up(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700106{
Vikram Mulukutla44c87972013-04-30 21:25:03 -0700107 int ret = 0;
Stephen Boyd3826cd42012-07-05 17:37:53 -0700108 struct device *dev = drv->desc.dev;
Patrick Daly11ca6af2013-03-03 17:07:28 -0800109 u32 regval;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700110
Vikram Mulukutla44c87972013-04-30 21:25:03 -0700111 if (drv->vreg) {
112 ret = regulator_enable(drv->vreg);
113 if (ret)
114 dev_err(dev, "Failed to enable modem regulator.\n");
115 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700116
Patrick Daly11ca6af2013-03-03 17:07:28 -0800117 if (drv->cxrail_bhs) {
118 regval = readl_relaxed(drv->cxrail_bhs);
119 regval |= EXTERNAL_BHS_ON;
120 writel_relaxed(regval, drv->cxrail_bhs);
121
122 ret = readl_poll_timeout(drv->cxrail_bhs, regval,
123 regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
124 }
125
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700126 return ret;
127}
128
Stephen Boyd3826cd42012-07-05 17:37:53 -0700129static int pil_mss_power_down(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700130{
Patrick Daly11ca6af2013-03-03 17:07:28 -0800131 u32 regval;
132
133 if (drv->cxrail_bhs) {
134 regval = readl_relaxed(drv->cxrail_bhs);
135 regval &= ~EXTERNAL_BHS_ON;
136 writel_relaxed(regval, drv->cxrail_bhs);
137 }
138
Vikram Mulukutla44c87972013-04-30 21:25:03 -0700139 if (drv->vreg)
140 return regulator_disable(drv->vreg);
141
142 return 0;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700143}
144
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700145static int pil_mss_enable_clks(struct q6v5_data *drv)
146{
147 int ret;
148
149 ret = clk_prepare_enable(drv->ahb_clk);
150 if (ret)
151 goto err_ahb_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700152 ret = clk_prepare_enable(drv->axi_clk);
153 if (ret)
154 goto err_axi_clk;
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700155 ret = clk_prepare_enable(drv->rom_clk);
156 if (ret)
157 goto err_rom_clk;
158
159 return 0;
160
161err_rom_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700162 clk_disable_unprepare(drv->axi_clk);
163err_axi_clk:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700164 clk_disable_unprepare(drv->ahb_clk);
165err_ahb_clk:
166 return ret;
167}
168
169static void pil_mss_disable_clks(struct q6v5_data *drv)
170{
171 clk_disable_unprepare(drv->rom_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700172 clk_disable_unprepare(drv->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700173 clk_disable_unprepare(drv->ahb_clk);
174}
175
Stephen Boyd3826cd42012-07-05 17:37:53 -0700176static int wait_for_mba_ready(struct q6v5_data *drv)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700177{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700178 struct device *dev = drv->desc.dev;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700179 struct mba_data *mba = platform_get_drvdata(to_platform_device(dev));
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700180 int ret;
181 u32 status;
182
183 /* Wait for PBL completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700184 ret = readl_poll_timeout(mba->rmb_base + RMB_PBL_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700185 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700186 if (ret) {
187 dev_err(dev, "PBL boot timed out\n");
188 return ret;
189 }
190 if (status != STATUS_PBL_SUCCESS) {
191 dev_err(dev, "PBL returned unexpected status %d\n", status);
192 return -EINVAL;
193 }
194
195 /* Wait for MBA completion. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700196 ret = readl_poll_timeout(mba->rmb_base + RMB_MBA_STATUS, status,
Matt Wagantalld251d8e2012-08-16 18:53:53 -0700197 status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700198 if (ret) {
199 dev_err(dev, "MBA boot timed out\n");
200 return ret;
201 }
202 if (status != STATUS_XPU_UNLOCKED &&
203 status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
204 dev_err(dev, "MBA returned unexpected status %d\n", status);
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211static int pil_mss_shutdown(struct pil_desc *pil)
212{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700213 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700214
215 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_Q6_HALT_BASE);
216 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_MODEM_HALT_BASE);
217 pil_q6v5_halt_axi_port(pil, drv->axi_halt_base + MSS_NC_HALT_BASE);
218
219 /*
220 * If the shutdown function is called before the reset function, clocks
221 * and power will not be enabled yet. Enable them here so that register
222 * writes performed during the shutdown succeed.
223 */
224 if (drv->is_booted == false) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700225 pil_mss_power_up(drv);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700226 pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700227 }
228 pil_q6v5_shutdown(pil);
229
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700230 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700231
232 writel_relaxed(1, drv->restart_reg);
233
Patrick Daly11ca6af2013-03-03 17:07:28 -0800234 /*
235 * access to the cx_rail_bhs is restricted until after the gcc_mss
236 * reset is asserted once the PBL starts executing.
237 */
238 pil_mss_power_down(drv);
239
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700240 drv->is_booted = false;
241
242 return 0;
243}
244
245static int pil_mss_reset(struct pil_desc *pil)
246{
Stephen Boyd3826cd42012-07-05 17:37:53 -0700247 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700248 struct platform_device *pdev = to_platform_device(pil->dev);
249 struct mba_data *mba = platform_get_drvdata(pdev);
Tianyi Gou819851e2013-04-16 16:05:56 -0700250 phys_addr_t start_addr = pil_get_entry_addr(pil);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700251 int ret;
252
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700253 /*
254 * Bring subsystem out of reset and enable required
255 * regulators and clocks.
256 */
Stephen Boyd3826cd42012-07-05 17:37:53 -0700257 ret = pil_mss_power_up(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700258 if (ret)
259 goto err_power;
260
Patrick Daly11ca6af2013-03-03 17:07:28 -0800261 /* Deassert reset to subsystem and wait for propagation */
262 writel_relaxed(0, drv->restart_reg);
263 mb();
264 udelay(2);
265
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700266 ret = pil_mss_enable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700267 if (ret)
268 goto err_clks;
269
270 /* Program Image Address */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700271 if (mba->self_auth) {
Stephen Boyd3030c252012-08-08 17:24:05 -0700272 writel_relaxed(start_addr, mba->rmb_base + RMB_MBA_IMAGE);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700273 /* Ensure write to RMB base occurs before reset is released. */
274 mb();
275 } else {
Stephen Boyd3030c252012-08-08 17:24:05 -0700276 writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700277 drv->reg_base + QDSP6SS_RST_EVB);
Matt Wagantallf11928a2012-07-27 15:47:59 -0700278 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700279
280 ret = pil_q6v5_reset(pil);
281 if (ret)
282 goto err_q6v5_reset;
283
284 /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700285 if (mba->self_auth) {
Stephen Boyd3826cd42012-07-05 17:37:53 -0700286 ret = wait_for_mba_ready(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700287 if (ret)
288 goto err_auth;
289 }
290
291 drv->is_booted = true;
292
293 return 0;
294
295err_auth:
296 pil_q6v5_shutdown(pil);
297err_q6v5_reset:
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700298 pil_mss_disable_clks(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700299err_clks:
Stephen Boyd3826cd42012-07-05 17:37:53 -0700300 pil_mss_power_down(drv);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700301err_power:
302 return ret;
303}
304
Matt Wagantall70315fb2012-12-03 16:33:28 -0800305static int pil_q6v5_mss_make_proxy_votes(struct pil_desc *pil)
306{
307 int ret;
308 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
309
310 ret = regulator_set_voltage(drv->vreg_mx, VDD_MSS_UV, MAX_VDD_MX_UV);
311 if (ret) {
312 dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
313 return ret;
314 }
315
316 ret = regulator_enable(drv->vreg_mx);
317 if (ret) {
318 dev_err(pil->dev, "Failed to enable vreg_mx\n");
319 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
320 return ret;
321 }
322
323 ret = pil_q6v5_make_proxy_votes(pil);
324 if (ret) {
325 regulator_disable(drv->vreg_mx);
326 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
327 }
328
329 return ret;
330}
331
332static void pil_q6v5_mss_remove_proxy_votes(struct pil_desc *pil)
333{
334 struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
335 pil_q6v5_remove_proxy_votes(pil);
336 regulator_disable(drv->vreg_mx);
337 regulator_set_voltage(drv->vreg_mx, 0, MAX_VDD_MX_UV);
338}
339
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700340static struct pil_reset_ops pil_mss_ops = {
Matt Wagantall70315fb2012-12-03 16:33:28 -0800341 .proxy_vote = pil_q6v5_mss_make_proxy_votes,
342 .proxy_unvote = pil_q6v5_mss_remove_proxy_votes,
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700343 .auth_and_reset = pil_mss_reset,
344 .shutdown = pil_mss_shutdown,
345};
346
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700347static int pil_mba_make_proxy_votes(struct pil_desc *pil)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700348{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700349 int ret;
350 struct mba_data *drv = dev_get_drvdata(pil->dev);
351
352 ret = clk_prepare_enable(drv->q6->xo);
353 if (ret) {
354 dev_err(pil->dev, "Failed to enable XO\n");
355 return ret;
356 }
357 return 0;
358}
359
360static void pil_mba_remove_proxy_votes(struct pil_desc *pil)
361{
362 struct mba_data *drv = dev_get_drvdata(pil->dev);
363 clk_disable_unprepare(drv->q6->xo);
364}
365
366static int pil_mba_init_image(struct pil_desc *pil,
367 const u8 *metadata, size_t size)
368{
369 struct mba_data *drv = dev_get_drvdata(pil->dev);
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700370 void *mdata_virt;
371 dma_addr_t mdata_phys;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700372 s32 status;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700373 int ret;
374
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700375 /* Make metadata physically contiguous and 4K aligned. */
376 mdata_virt = dma_alloc_coherent(pil->dev, size, &mdata_phys,
377 GFP_KERNEL);
378 if (!mdata_virt) {
379 dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
380 return -ENOMEM;
381 }
382 memcpy(mdata_virt, metadata, size);
383 /* wmb() ensures copy completes prior to starting authentication. */
384 wmb();
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700385
386 /* Initialize length counter to 0 */
387 writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
388 drv->img_length = 0;
389
390 /* Pass address of meta-data to the MBA and perform authentication */
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700391 writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700392 writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
393 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
394 status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
395 POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
396 if (ret) {
397 dev_err(pil->dev, "MBA authentication of headers timed out\n");
398 } else if (status < 0) {
399 dev_err(pil->dev, "MBA returned error %d for headers\n",
400 status);
401 ret = -EINVAL;
402 }
403
Matt Wagantall724b2bb2013-03-18 14:54:06 -0700404 dma_free_coherent(pil->dev, size, mdata_virt, mdata_phys);
405
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700406 return ret;
407}
408
Tianyi Gou82374c62013-04-15 21:07:50 -0700409static int pil_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700410 size_t size)
411{
412 struct mba_data *drv = dev_get_drvdata(pil->dev);
413 s32 status;
414
415 /* Begin image authentication */
416 if (drv->img_length == 0) {
417 writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
418 writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
419 }
420 /* Increment length counter */
421 drv->img_length += size;
422 writel_relaxed(drv->img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
423
424 status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
425 if (status < 0) {
426 dev_err(pil->dev, "MBA returned error %d\n", status);
427 return -EINVAL;
428 }
429
430 return 0;
431}
432
433static int pil_mba_auth(struct pil_desc *pil)
434{
435 struct mba_data *drv = dev_get_drvdata(pil->dev);
436 int ret;
437 s32 status;
438
439 /* Wait for all segments to be authenticated or an error to occur */
440 ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
441 status == STATUS_AUTH_COMPLETE || status < 0,
442 50, modem_auth_timeout_ms * 1000);
443 if (ret) {
444 dev_err(pil->dev, "MBA authentication of image timed out\n");
445 } else if (status < 0) {
446 dev_err(pil->dev, "MBA returned error %d for image\n", status);
447 ret = -EINVAL;
448 }
449
450 return ret;
451}
452
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700453static struct pil_reset_ops pil_mba_ops = {
454 .init_image = pil_mba_init_image,
455 .proxy_vote = pil_mba_make_proxy_votes,
456 .proxy_unvote = pil_mba_remove_proxy_votes,
457 .verify_blob = pil_mba_verify_blob,
458 .auth_and_reset = pil_mba_auth,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700459};
460
461#define subsys_to_drv(d) container_of(d, struct mba_data, subsys_desc)
462
463static void log_modem_sfr(void)
464{
465 u32 size;
466 char *smem_reason, reason[MAX_SSR_REASON_LEN];
467
468 smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size);
469 if (!smem_reason || !size) {
470 pr_err("modem subsystem failure reason: (unknown, smem_get_entry failed).\n");
471 return;
472 }
473 if (!smem_reason[0]) {
474 pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
475 return;
476 }
477
478 strlcpy(reason, smem_reason, min(size, sizeof(reason)));
479 pr_err("modem subsystem failure reason: %s.\n", reason);
480
481 smem_reason[0] = '\0';
482 wmb();
483}
484
485static void restart_modem(struct mba_data *drv)
486{
487 log_modem_sfr();
488 drv->ignore_errors = true;
489 subsystem_restart_dev(drv->subsys);
490}
491
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800492static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700493{
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800494 struct mba_data *drv = dev_id;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700495
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800496 /* Ignore if we're the one that set the force stop GPIO */
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700497 if (drv->crash_shutdown)
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800498 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700499
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800500 pr_err("Fatal error on the modem.\n");
501 restart_modem(drv);
502 return IRQ_HANDLED;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700503}
504
505static int modem_shutdown(const struct subsys_desc *subsys)
506{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700507 struct mba_data *drv = subsys_to_drv(subsys);
508
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700509 if (subsys->is_not_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800510 return 0;
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800511 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700512 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700513 return 0;
514}
515
516static int modem_powerup(const struct subsys_desc *subsys)
517{
518 struct mba_data *drv = subsys_to_drv(subsys);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700519 int ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800520
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700521 if (subsys->is_not_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800522 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700523 /*
524 * At this time, the modem is shutdown. Therefore this function cannot
525 * run concurrently with either the watchdog bite error handler or the
526 * SMSM callback, making it safe to unset the flag below.
527 */
528 drv->ignore_errors = false;
Stephen Boyde83a0a22012-06-29 13:51:27 -0700529 ret = pil_boot(&drv->q6->desc);
530 if (ret)
531 return ret;
532 ret = pil_boot(&drv->desc);
533 if (ret)
534 pil_shutdown(&drv->q6->desc);
535 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700536}
537
538static void modem_crash_shutdown(const struct subsys_desc *subsys)
539{
540 struct mba_data *drv = subsys_to_drv(subsys);
541 drv->crash_shutdown = true;
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800542 gpio_set_value(drv->force_stop_gpio, 1);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700543}
544
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700545static struct ramdump_segment smem_segments[] = {
546 {0x0FA00000, 0x0FC00000 - 0x0FA00000},
547};
548
549static int modem_ramdump(int enable, const struct subsys_desc *subsys)
550{
551 struct mba_data *drv = subsys_to_drv(subsys);
552 int ret;
553
554 if (!enable)
555 return 0;
556
Stephen Boyde83a0a22012-06-29 13:51:27 -0700557 ret = pil_boot(&drv->q6->desc);
558 if (ret)
559 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700560
Stephen Boyd05c45f22013-01-24 12:02:28 -0800561 ret = pil_do_ramdump(&drv->desc, drv->ramdump_dev);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700562 if (ret < 0) {
563 pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
564 goto out;
565 }
566
Stephen Boyd5eb17ce2012-11-29 15:34:21 -0800567 ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700568 ARRAY_SIZE(smem_segments));
569 if (ret < 0) {
570 pr_err("Unable to dump smem memory (rc = %d).\n", ret);
571 goto out;
572 }
573
574out:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700575 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700576 return ret;
577}
578
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700579static int adsp_state_notifier_fn(struct notifier_block *this,
580 unsigned long code, void *ss_handle)
581{
582 int ret;
583 ret = sysmon_send_event(SYSMON_SS_MODEM, "adsp", code);
584 if (ret < 0)
585 pr_err("%s: sysmon_send_event failed (%d).", __func__, ret);
586 return NOTIFY_DONE;
587}
588
589static struct notifier_block adsp_state_notifier_block = {
590 .notifier_call = adsp_state_notifier_fn,
591};
592
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700593static irqreturn_t modem_wdog_bite_irq(int irq, void *dev_id)
594{
595 struct mba_data *drv = dev_id;
596 if (drv->ignore_errors)
597 return IRQ_HANDLED;
598 pr_err("Watchdog bite received from modem software!\n");
599 restart_modem(drv);
600 return IRQ_HANDLED;
601}
602
603static int mss_start(const struct subsys_desc *desc)
604{
Stephen Boyde83a0a22012-06-29 13:51:27 -0700605 int ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700606 struct mba_data *drv = subsys_to_drv(desc);
607
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700608 if (desc->is_not_loadable)
Vikram Mulukutla1d958af2012-11-20 14:06:12 -0800609 return 0;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800610
Stephen Boyde83a0a22012-06-29 13:51:27 -0700611 ret = pil_boot(&drv->q6->desc);
612 if (ret)
613 return ret;
614 ret = pil_boot(&drv->desc);
Seemanta Dutta9e5f6402013-04-19 16:55:53 -0700615 if (ret) {
Stephen Boyde83a0a22012-06-29 13:51:27 -0700616 pil_shutdown(&drv->q6->desc);
Seemanta Dutta9e5f6402013-04-19 16:55:53 -0700617 /*
618 * We know now that the unvote interrupt is not coming.
619 * Remove the proxy votes immediately.
620 */
621 if (drv->q6->desc.proxy_unvote_irq)
622 pil_q6v5_mss_remove_proxy_votes(&drv->q6->desc);
623 }
Stephen Boyde83a0a22012-06-29 13:51:27 -0700624 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700625}
626
627static void mss_stop(const struct subsys_desc *desc)
628{
629 struct mba_data *drv = subsys_to_drv(desc);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800630
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700631 if (desc->is_not_loadable)
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800632 return;
633
Matt Wagantalla72d03d2013-02-26 21:13:14 -0800634 pil_shutdown(&drv->desc);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700635 pil_shutdown(&drv->q6->desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700636}
637
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800638static int __devinit pil_subsys_init(struct mba_data *drv,
639 struct platform_device *pdev)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700640{
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800641 int irq, ret;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700642
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700643 irq = platform_get_irq(pdev, 0);
644 if (irq < 0)
645 return irq;
646
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800647 drv->subsys_desc.name = "modem";
648 drv->subsys_desc.dev = &pdev->dev;
649 drv->subsys_desc.owner = THIS_MODULE;
650 drv->subsys_desc.shutdown = modem_shutdown;
651 drv->subsys_desc.powerup = modem_powerup;
652 drv->subsys_desc.ramdump = modem_ramdump;
653 drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
654 drv->subsys_desc.start = mss_start;
655 drv->subsys_desc.stop = mss_stop;
656
Seemanta Dutta9fb72ed2013-01-25 14:22:15 -0800657 ret = of_get_named_gpio(pdev->dev.of_node,
658 "qcom,gpio-err-ready", 0);
659 if (ret < 0)
660 return ret;
661
662 ret = gpio_to_irq(ret);
663 if (ret < 0)
664 return ret;
665
666 drv->subsys_desc.err_ready_irq = ret;
667
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800668 drv->subsys = subsys_register(&drv->subsys_desc);
669 if (IS_ERR(drv->subsys)) {
670 ret = PTR_ERR(drv->subsys);
671 goto err_subsys;
672 }
673
674 drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
675 if (!drv->ramdump_dev) {
676 pr_err("%s: Unable to create a modem ramdump device.\n",
677 __func__);
678 ret = -ENOMEM;
679 goto err_ramdump;
680 }
681
682 drv->smem_ramdump_dev = create_ramdump_device("smem-modem", &pdev->dev);
683 if (!drv->smem_ramdump_dev) {
684 pr_err("%s: Unable to create an smem ramdump device.\n",
685 __func__);
686 ret = -ENOMEM;
687 goto err_ramdump_smem;
688 }
689
690 ret = devm_request_irq(&pdev->dev, irq, modem_wdog_bite_irq,
691 IRQF_TRIGGER_RISING, "modem_wdog", drv);
692 if (ret < 0) {
693 dev_err(&pdev->dev, "Unable to request watchdog IRQ.\n");
694 goto err_irq;
695 }
696
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800697 ret = devm_request_irq(&pdev->dev, drv->err_fatal_irq,
698 modem_err_fatal_intr_handler,
699 IRQF_TRIGGER_RISING, "pil-mss", drv);
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800700 if (ret < 0) {
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800701 dev_err(&pdev->dev, "Unable to register SMP2P err fatal handler!\n");
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800702 goto err_irq;
703 }
704
705 drv->adsp_state_notifier = subsys_notif_register_notifier("adsp",
706 &adsp_state_notifier_block);
707 if (IS_ERR(drv->adsp_state_notifier)) {
708 ret = PTR_ERR(drv->adsp_state_notifier);
709 dev_err(&pdev->dev, "%s: Registration with the SSR notification driver failed (%d)",
710 __func__, ret);
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800711 goto err_irq;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800712 }
713
714 return 0;
715
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800716err_irq:
717 destroy_ramdump_device(drv->smem_ramdump_dev);
718err_ramdump_smem:
719 destroy_ramdump_device(drv->ramdump_dev);
720err_ramdump:
721 subsys_unregister(drv->subsys);
722err_subsys:
723 return ret;
724}
725
726static int __devinit pil_mss_loadable_init(struct mba_data *drv,
727 struct platform_device *pdev)
728{
729 struct q6v5_data *q6;
730 struct pil_desc *q6_desc, *mba_desc;
731 struct resource *res;
Vikram Mulukutla44c87972013-04-30 21:25:03 -0700732 struct property *prop;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800733 int ret;
734
Seemanta Duttaa0f253e2013-01-16 18:54:40 -0800735 int clk_ready = of_get_named_gpio(pdev->dev.of_node,
736 "qcom,gpio-proxy-unvote", 0);
737 if (clk_ready < 0)
738 return clk_ready;
739
740 clk_ready = gpio_to_irq(clk_ready);
741 if (clk_ready < 0)
742 return clk_ready;
743
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700744 q6 = pil_q6v5_init(pdev);
745 if (IS_ERR(q6))
746 return PTR_ERR(q6);
747 drv->q6 = q6;
748
749 q6_desc = &q6->desc;
750 q6_desc->ops = &pil_mss_ops;
751 q6_desc->owner = THIS_MODULE;
752 q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Seemanta Duttaa0f253e2013-01-16 18:54:40 -0800753 q6_desc->proxy_unvote_irq = clk_ready;
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700754
Patrick Dalyb830a3f2013-03-11 14:21:34 -0700755 drv->self_auth = of_property_read_bool(pdev->dev.of_node,
756 "qcom,pil-self-auth");
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700757 if (drv->self_auth) {
Matt Wagantall1f168152012-09-25 13:26:47 -0700758 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
759 "rmb_base");
Stephen Boydf8f89282012-07-16 18:05:48 -0700760 drv->rmb_base = devm_request_and_ioremap(&pdev->dev, res);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700761 if (!drv->rmb_base)
762 return -ENOMEM;
763 }
764
Matt Wagantall1f168152012-09-25 13:26:47 -0700765 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
Stephen Boydf8f89282012-07-16 18:05:48 -0700766 q6->restart_reg = devm_request_and_ioremap(&pdev->dev, res);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700767 if (!q6->restart_reg)
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700768 return -ENOMEM;
769
Vikram Mulukutla44c87972013-04-30 21:25:03 -0700770 q6->vreg = NULL;
771
772 prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
773 if (prop) {
774 q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
775 if (IS_ERR(q6->vreg))
776 return PTR_ERR(q6->vreg);
777
778 ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
779 MAX_VDD_MSS_UV);
780 if (ret)
781 dev_err(&pdev->dev, "Failed to set vreg voltage.\n");
782
783 ret = regulator_set_optimum_mode(q6->vreg, 100000);
784 if (ret < 0) {
785 dev_err(&pdev->dev, "Failed to set vreg mode.\n");
786 return ret;
787 }
788 }
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700789
Matt Wagantall70315fb2012-12-03 16:33:28 -0800790 q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
791 if (IS_ERR(q6->vreg_mx))
792 return PTR_ERR(q6->vreg_mx);
793
Patrick Daly11ca6af2013-03-03 17:07:28 -0800794 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
795 "cxrail_bhs_reg");
796 if (res)
797 q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
798 resource_size(res));
799
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700800 q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
801 if (IS_ERR(q6->ahb_clk))
802 return PTR_ERR(q6->ahb_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700803
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700804 q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
805 if (IS_ERR(q6->axi_clk))
806 return PTR_ERR(q6->axi_clk);
Matt Wagantall8c2246d2012-08-12 17:08:04 -0700807
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700808 q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
809 if (IS_ERR(q6->rom_clk))
810 return PTR_ERR(q6->rom_clk);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700811
Stephen Boyde83a0a22012-06-29 13:51:27 -0700812 ret = pil_desc_init(q6_desc);
813 if (ret)
814 return ret;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700815
816 mba_desc = &drv->desc;
817 mba_desc->name = "modem";
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700818 mba_desc->dev = &pdev->dev;
819 mba_desc->ops = &pil_mba_ops;
820 mba_desc->owner = THIS_MODULE;
821 mba_desc->proxy_timeout = PROXY_TIMEOUT_MS;
Seemanta Duttaa0f253e2013-01-16 18:54:40 -0800822 mba_desc->proxy_unvote_irq = clk_ready;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700823
Stephen Boyde83a0a22012-06-29 13:51:27 -0700824 ret = pil_desc_init(mba_desc);
825 if (ret)
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700826 goto err_mba_desc;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700827
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700828 return 0;
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700829
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700830err_mba_desc:
Stephen Boyde83a0a22012-06-29 13:51:27 -0700831 pil_desc_release(q6_desc);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700832 return ret;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800833
834}
835
836static int __devinit pil_mss_driver_probe(struct platform_device *pdev)
837{
838 struct mba_data *drv;
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700839 int ret, err_fatal_gpio, is_not_loadable;
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800840
841 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
842 if (!drv)
843 return -ENOMEM;
844 platform_set_drvdata(pdev, drv);
845
Seemanta Duttaf9458c92013-05-08 19:53:29 -0700846 is_not_loadable = of_property_read_bool(pdev->dev.of_node,
847 "qcom,is-not-loadable");
848 if (is_not_loadable) {
849 drv->subsys_desc.is_not_loadable = 1;
850 } else {
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800851 ret = pil_mss_loadable_init(drv, pdev);
852 if (ret)
853 return ret;
854 }
855
Seemanta Dutta6e58f542013-03-04 19:28:16 -0800856 /* Get the IRQ from the GPIO for registering inbound handler */
857 err_fatal_gpio = of_get_named_gpio(pdev->dev.of_node,
858 "qcom,gpio-err-fatal", 0);
859 if (err_fatal_gpio < 0)
860 return err_fatal_gpio;
861
862 drv->err_fatal_irq = gpio_to_irq(err_fatal_gpio);
863 if (drv->err_fatal_irq < 0)
864 return drv->err_fatal_irq;
865
866 /* Get the GPIO pin for writing the outbound bits: add more as needed */
867 drv->force_stop_gpio = of_get_named_gpio(pdev->dev.of_node,
868 "qcom,gpio-force-stop", 0);
869 if (drv->force_stop_gpio < 0)
870 return drv->force_stop_gpio;
871
Vikram Mulukutla7dc2d4e2012-11-12 13:04:50 -0800872 return pil_subsys_init(drv, pdev);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700873}
874
875static int __devexit pil_mss_driver_exit(struct platform_device *pdev)
876{
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700877 struct mba_data *drv = platform_get_drvdata(pdev);
Vikram Mulukutla896d0582012-10-17 16:57:46 -0700878
879 subsys_notif_unregister_notifier(drv->adsp_state_notifier,
880 &adsp_state_notifier_block);
Stephen Boyd3da4fd02012-07-06 10:00:12 -0700881 subsys_unregister(drv->subsys);
882 destroy_ramdump_device(drv->smem_ramdump_dev);
883 destroy_ramdump_device(drv->ramdump_dev);
Stephen Boyde83a0a22012-06-29 13:51:27 -0700884 pil_desc_release(&drv->desc);
885 pil_desc_release(&drv->q6->desc);
Matt Wagantall4e2599e2012-03-21 22:31:35 -0700886 return 0;
887}
888
889static struct of_device_id mss_match_table[] = {
890 { .compatible = "qcom,pil-q6v5-mss" },
891 {}
892};
893
894static struct platform_driver pil_mss_driver = {
895 .probe = pil_mss_driver_probe,
896 .remove = __devexit_p(pil_mss_driver_exit),
897 .driver = {
898 .name = "pil-q6v5-mss",
899 .of_match_table = mss_match_table,
900 .owner = THIS_MODULE,
901 },
902};
903
904static int __init pil_mss_init(void)
905{
906 return platform_driver_register(&pil_mss_driver);
907}
908module_init(pil_mss_init);
909
910static void __exit pil_mss_exit(void)
911{
912 platform_driver_unregister(&pil_mss_driver);
913}
914module_exit(pil_mss_exit);
915
916MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
917MODULE_LICENSE("GPL v2");