blob: 83a78daf55ef2c8dc59dfd3e4b86c1cf29ab5cc0 [file] [log] [blame]
Bjorn Andersson051fb702016-06-20 14:28:41 -07001/*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/mfd/syscon.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +053026#include <linux/of_device.h>
Bjorn Andersson051fb702016-06-20 14:28:41 -070027#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/regulator/consumer.h>
30#include <linux/remoteproc.h>
31#include <linux/reset.h>
32#include <linux/soc/qcom/smem.h>
33#include <linux/soc/qcom/smem_state.h>
34
35#include "remoteproc_internal.h"
36#include "qcom_mdt_loader.h"
37
38#include <linux/qcom_scm.h>
39
Bjorn Andersson051fb702016-06-20 14:28:41 -070040#define MPSS_CRASH_REASON_SMEM 421
41
42/* RMB Status Register Values */
43#define RMB_PBL_SUCCESS 0x1
44
45#define RMB_MBA_XPU_UNLOCKED 0x1
46#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
47#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
48#define RMB_MBA_AUTH_COMPLETE 0x4
49
50/* PBL/MBA interface registers */
51#define RMB_MBA_IMAGE_REG 0x00
52#define RMB_PBL_STATUS_REG 0x04
53#define RMB_MBA_COMMAND_REG 0x08
54#define RMB_MBA_STATUS_REG 0x0C
55#define RMB_PMI_META_DATA_REG 0x10
56#define RMB_PMI_CODE_START_REG 0x14
57#define RMB_PMI_CODE_LENGTH_REG 0x18
58
59#define RMB_CMD_META_DATA_READY 0x1
60#define RMB_CMD_LOAD_READY 0x2
61
62/* QDSP6SS Register Offsets */
63#define QDSP6SS_RESET_REG 0x014
64#define QDSP6SS_GFMUX_CTL_REG 0x020
65#define QDSP6SS_PWR_CTL_REG 0x030
66
67/* AXI Halt Register Offsets */
68#define AXI_HALTREQ_REG 0x0
69#define AXI_HALTACK_REG 0x4
70#define AXI_IDLE_REG 0x8
71
72#define HALT_ACK_TIMEOUT_MS 100
73
74/* QDSP6SS_RESET */
75#define Q6SS_STOP_CORE BIT(0)
76#define Q6SS_CORE_ARES BIT(1)
77#define Q6SS_BUS_ARES_ENABLE BIT(2)
78
79/* QDSP6SS_GFMUX_CTL */
80#define Q6SS_CLK_ENABLE BIT(1)
81
82/* QDSP6SS_PWR_CTL */
83#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
84#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
85#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
86#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
87#define Q6SS_ETB_SLP_NRET_N BIT(17)
88#define Q6SS_L2DATA_STBY_N BIT(18)
89#define Q6SS_SLP_RET_N BIT(19)
90#define Q6SS_CLAMP_IO BIT(20)
91#define QDSS_BHS_ON BIT(21)
92#define QDSS_LDO_BYP BIT(22)
93
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +053094struct reg_info {
95 struct regulator *reg;
96 int uV;
97 int uA;
98};
99
100struct qcom_mss_reg_res {
101 const char *supply;
102 int uV;
103 int uA;
104};
105
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530106struct rproc_hexagon_res {
107 const char *hexagon_mba_image;
Arnd Bergmannec671b52017-02-01 17:56:28 +0100108 struct qcom_mss_reg_res *proxy_supply;
109 struct qcom_mss_reg_res *active_supply;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530110 char **proxy_clk_names;
111 char **active_clk_names;
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530112};
113
Bjorn Andersson051fb702016-06-20 14:28:41 -0700114struct q6v5 {
115 struct device *dev;
116 struct rproc *rproc;
117
118 void __iomem *reg_base;
119 void __iomem *rmb_base;
120
121 struct regmap *halt_map;
122 u32 halt_q6;
123 u32 halt_modem;
124 u32 halt_nc;
125
126 struct reset_control *mss_restart;
127
128 struct qcom_smem_state *state;
129 unsigned stop_bit;
130
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530131 struct clk *active_clks[8];
132 struct clk *proxy_clks[4];
133 int active_clk_count;
134 int proxy_clk_count;
135
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530136 struct reg_info active_regs[1];
137 struct reg_info proxy_regs[3];
138 int active_reg_count;
139 int proxy_reg_count;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700140
141 struct completion start_done;
142 struct completion stop_done;
143 bool running;
144
145 phys_addr_t mba_phys;
146 void *mba_region;
147 size_t mba_size;
148
149 phys_addr_t mpss_phys;
150 phys_addr_t mpss_reloc;
151 void *mpss_region;
152 size_t mpss_size;
153};
154
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530155static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
156 const struct qcom_mss_reg_res *reg_res)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700157{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530158 int rc;
159 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700160
Bjorn Andersson2bb5d902017-01-30 03:20:27 -0800161 if (!reg_res)
162 return 0;
163
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530164 for (i = 0; reg_res[i].supply; i++) {
165 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
166 if (IS_ERR(regs[i].reg)) {
167 rc = PTR_ERR(regs[i].reg);
168 if (rc != -EPROBE_DEFER)
169 dev_err(dev, "Failed to get %s\n regulator",
170 reg_res[i].supply);
171 return rc;
172 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700173
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530174 regs[i].uV = reg_res[i].uV;
175 regs[i].uA = reg_res[i].uA;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700176 }
177
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530178 return i;
179}
180
181static int q6v5_regulator_enable(struct q6v5 *qproc,
182 struct reg_info *regs, int count)
183{
184 int ret;
185 int i;
186
187 for (i = 0; i < count; i++) {
188 if (regs[i].uV > 0) {
189 ret = regulator_set_voltage(regs[i].reg,
190 regs[i].uV, INT_MAX);
191 if (ret) {
192 dev_err(qproc->dev,
193 "Failed to request voltage for %d.\n",
194 i);
195 goto err;
196 }
197 }
198
199 if (regs[i].uA > 0) {
200 ret = regulator_set_load(regs[i].reg,
201 regs[i].uA);
202 if (ret < 0) {
203 dev_err(qproc->dev,
204 "Failed to set regulator mode\n");
205 goto err;
206 }
207 }
208
209 ret = regulator_enable(regs[i].reg);
210 if (ret) {
211 dev_err(qproc->dev, "Regulator enable failed\n");
212 goto err;
213 }
214 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700215
216 return 0;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530217err:
218 for (; i >= 0; i--) {
219 if (regs[i].uV > 0)
220 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
221
222 if (regs[i].uA > 0)
223 regulator_set_load(regs[i].reg, 0);
224
225 regulator_disable(regs[i].reg);
226 }
227
228 return ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700229}
230
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530231static void q6v5_regulator_disable(struct q6v5 *qproc,
232 struct reg_info *regs, int count)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700233{
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530234 int i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700235
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530236 for (i = 0; i < count; i++) {
237 if (regs[i].uV > 0)
238 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700239
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530240 if (regs[i].uA > 0)
241 regulator_set_load(regs[i].reg, 0);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700242
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530243 regulator_disable(regs[i].reg);
244 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700245}
246
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530247static int q6v5_clk_enable(struct device *dev,
248 struct clk **clks, int count)
249{
250 int rc;
251 int i;
252
253 for (i = 0; i < count; i++) {
254 rc = clk_prepare_enable(clks[i]);
255 if (rc) {
256 dev_err(dev, "Clock enable failed\n");
257 goto err;
258 }
259 }
260
261 return 0;
262err:
263 for (i--; i >= 0; i--)
264 clk_disable_unprepare(clks[i]);
265
266 return rc;
267}
268
269static void q6v5_clk_disable(struct device *dev,
270 struct clk **clks, int count)
271{
272 int i;
273
274 for (i = 0; i < count; i++)
275 clk_disable_unprepare(clks[i]);
276}
277
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800278static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
279 const struct firmware *fw,
280 int *tablesz)
281{
282 static struct resource_table table = { .ver = 1, };
283
284 *tablesz = sizeof(table);
285 return &table;
286}
287
Bjorn Andersson051fb702016-06-20 14:28:41 -0700288static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
289{
290 struct q6v5 *qproc = rproc->priv;
291
292 memcpy(qproc->mba_region, fw->data, fw->size);
293
294 return 0;
295}
296
297static const struct rproc_fw_ops q6v5_fw_ops = {
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800298 .find_rsc_table = q6v5_find_rsc_table,
Bjorn Andersson051fb702016-06-20 14:28:41 -0700299 .load = q6v5_load,
300};
301
302static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
303{
304 unsigned long timeout;
305 s32 val;
306
307 timeout = jiffies + msecs_to_jiffies(ms);
308 for (;;) {
309 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
310 if (val)
311 break;
312
313 if (time_after(jiffies, timeout))
314 return -ETIMEDOUT;
315
316 msleep(1);
317 }
318
319 return val;
320}
321
322static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
323{
324
325 unsigned long timeout;
326 s32 val;
327
328 timeout = jiffies + msecs_to_jiffies(ms);
329 for (;;) {
330 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
331 if (val < 0)
332 break;
333
334 if (!status && val)
335 break;
336 else if (status && val == status)
337 break;
338
339 if (time_after(jiffies, timeout))
340 return -ETIMEDOUT;
341
342 msleep(1);
343 }
344
345 return val;
346}
347
348static int q6v5proc_reset(struct q6v5 *qproc)
349{
350 u32 val;
351 int ret;
352
353 /* Assert resets, stop core */
354 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
355 val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
356 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
357
358 /* Enable power block headswitch, and wait for it to stabilize */
359 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
360 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
361 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
362 udelay(1);
363
364 /*
365 * Turn on memories. L2 banks should be done individually
366 * to minimize inrush current.
367 */
368 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
369 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
370 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
371 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
372 val |= Q6SS_L2DATA_SLP_NRET_N_2;
373 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
374 val |= Q6SS_L2DATA_SLP_NRET_N_1;
375 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
376 val |= Q6SS_L2DATA_SLP_NRET_N_0;
377 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
378
379 /* Remove IO clamp */
380 val &= ~Q6SS_CLAMP_IO;
381 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
382
383 /* Bring core out of reset */
384 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
385 val &= ~Q6SS_CORE_ARES;
386 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
387
388 /* Turn on core clock */
389 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
390 val |= Q6SS_CLK_ENABLE;
391 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
392
393 /* Start core execution */
394 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
395 val &= ~Q6SS_STOP_CORE;
396 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
397
398 /* Wait for PBL status */
399 ret = q6v5_rmb_pbl_wait(qproc, 1000);
400 if (ret == -ETIMEDOUT) {
401 dev_err(qproc->dev, "PBL boot timed out\n");
402 } else if (ret != RMB_PBL_SUCCESS) {
403 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
404 ret = -EINVAL;
405 } else {
406 ret = 0;
407 }
408
409 return ret;
410}
411
412static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
413 struct regmap *halt_map,
414 u32 offset)
415{
416 unsigned long timeout;
417 unsigned int val;
418 int ret;
419
420 /* Check if we're already idle */
421 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
422 if (!ret && val)
423 return;
424
425 /* Assert halt request */
426 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
427
428 /* Wait for halt */
429 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
430 for (;;) {
431 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
432 if (ret || val || time_after(jiffies, timeout))
433 break;
434
435 msleep(1);
436 }
437
438 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
439 if (ret || !val)
440 dev_err(qproc->dev, "port failed halt\n");
441
442 /* Clear halt request (port will remain halted until reset) */
443 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
444}
445
446static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
447{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700448 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700449 dma_addr_t phys;
450 void *ptr;
451 int ret;
452
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700453 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700454 if (!ptr) {
455 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
456 return -ENOMEM;
457 }
458
459 memcpy(ptr, fw->data, fw->size);
460
461 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
462 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
463
464 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
465 if (ret == -ETIMEDOUT)
466 dev_err(qproc->dev, "MPSS header authentication timed out\n");
467 else if (ret < 0)
468 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
469
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700470 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700471
472 return ret < 0 ? ret : 0;
473}
474
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800475static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
476{
477 if (phdr->p_type != PT_LOAD)
478 return false;
479
480 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
481 return false;
482
483 if (!phdr->p_memsz)
484 return false;
485
486 return true;
487}
488
489static int q6v5_mpss_load(struct q6v5 *qproc)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700490{
491 const struct elf32_phdr *phdrs;
492 const struct elf32_phdr *phdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800493 const struct firmware *seg_fw;
494 const struct firmware *fw;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700495 struct elf32_hdr *ehdr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800496 phys_addr_t mpss_reloc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700497 phys_addr_t boot_addr;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800498 phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
499 phys_addr_t max_addr = 0;
500 bool relocate = false;
501 char seg_name[10];
502 size_t offset;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700503 size_t size;
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800504 void *ptr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700505 int ret;
506 int i;
507
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800508 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
509 if (ret < 0) {
510 dev_err(qproc->dev, "unable to load modem.mdt\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700511 return ret;
512 }
513
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800514 /* Initialize the RMB validator */
515 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
516
517 ret = q6v5_mpss_init_image(qproc, fw);
518 if (ret)
519 goto release_firmware;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700520
521 ehdr = (struct elf32_hdr *)fw->data;
522 phdrs = (struct elf32_phdr *)(ehdr + 1);
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800523
524 for (i = 0; i < ehdr->e_phnum; i++) {
Bjorn Andersson051fb702016-06-20 14:28:41 -0700525 phdr = &phdrs[i];
526
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800527 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700528 continue;
529
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800530 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
531 relocate = true;
532
533 if (phdr->p_paddr < min_addr)
534 min_addr = phdr->p_paddr;
535
536 if (phdr->p_paddr + phdr->p_memsz > max_addr)
537 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
538 }
539
540 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
541
542 for (i = 0; i < ehdr->e_phnum; i++) {
543 phdr = &phdrs[i];
544
545 if (!q6v5_phdr_valid(phdr))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700546 continue;
547
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800548 offset = phdr->p_paddr - mpss_reloc;
549 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
550 dev_err(qproc->dev, "segment outside memory range\n");
551 ret = -EINVAL;
552 goto release_firmware;
553 }
554
555 ptr = qproc->mpss_region + offset;
556
557 if (phdr->p_filesz) {
558 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
559 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
560 if (ret) {
561 dev_err(qproc->dev, "failed to load %s\n", seg_name);
562 goto release_firmware;
563 }
564
565 memcpy(ptr, seg_fw->data, seg_fw->size);
566
567 release_firmware(seg_fw);
568 }
569
570 if (phdr->p_memsz > phdr->p_filesz) {
571 memset(ptr + phdr->p_filesz, 0,
572 phdr->p_memsz - phdr->p_filesz);
573 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700574
575 size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
576 if (!size) {
Bjorn Anderssone7fd2522017-01-26 13:58:35 -0800577 boot_addr = relocate ? qproc->mpss_phys : min_addr;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700578 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
579 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
580 }
581
582 size += phdr->p_memsz;
583 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
584 }
585
Bjorn Andersson72beb492016-07-12 17:15:45 -0700586 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
587 if (ret == -ETIMEDOUT)
588 dev_err(qproc->dev, "MPSS authentication timed out\n");
589 else if (ret < 0)
590 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
591
Bjorn Andersson051fb702016-06-20 14:28:41 -0700592release_firmware:
593 release_firmware(fw);
594
595 return ret < 0 ? ret : 0;
596}
597
598static int q6v5_start(struct rproc *rproc)
599{
600 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
601 int ret;
602
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530603 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
604 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700605 if (ret) {
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530606 dev_err(qproc->dev, "failed to enable proxy supplies\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700607 return ret;
608 }
609
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530610 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
611 qproc->proxy_clk_count);
612 if (ret) {
613 dev_err(qproc->dev, "failed to enable proxy clocks\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530614 goto disable_proxy_reg;
615 }
616
617 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
618 qproc->active_reg_count);
619 if (ret) {
620 dev_err(qproc->dev, "failed to enable supplies\n");
621 goto disable_proxy_clk;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530622 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700623 ret = reset_control_deassert(qproc->mss_restart);
624 if (ret) {
625 dev_err(qproc->dev, "failed to deassert mss restart\n");
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530626 goto disable_vdd;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700627 }
628
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530629 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
630 qproc->active_clk_count);
631 if (ret) {
632 dev_err(qproc->dev, "failed to enable clocks\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700633 goto assert_reset;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530634 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700635
636 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
637
638 ret = q6v5proc_reset(qproc);
639 if (ret)
640 goto halt_axi_ports;
641
642 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
643 if (ret == -ETIMEDOUT) {
644 dev_err(qproc->dev, "MBA boot timed out\n");
645 goto halt_axi_ports;
646 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
647 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
648 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
649 ret = -EINVAL;
650 goto halt_axi_ports;
651 }
652
653 dev_info(qproc->dev, "MBA booted, loading mpss\n");
654
655 ret = q6v5_mpss_load(qproc);
656 if (ret)
657 goto halt_axi_ports;
658
659 ret = wait_for_completion_timeout(&qproc->start_done,
660 msecs_to_jiffies(5000));
661 if (ret == 0) {
662 dev_err(qproc->dev, "start timed out\n");
663 ret = -ETIMEDOUT;
664 goto halt_axi_ports;
665 }
666
667 qproc->running = true;
668
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530669 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
670 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530671 q6v5_regulator_disable(qproc, qproc->proxy_regs,
672 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700673
674 return 0;
675
676halt_axi_ports:
677 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
678 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
679 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530680 q6v5_clk_disable(qproc->dev, qproc->active_clks,
681 qproc->active_clk_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700682assert_reset:
683 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530684disable_vdd:
685 q6v5_regulator_disable(qproc, qproc->active_regs,
686 qproc->active_reg_count);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530687disable_proxy_clk:
688 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
689 qproc->proxy_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530690disable_proxy_reg:
691 q6v5_regulator_disable(qproc, qproc->proxy_regs,
692 qproc->proxy_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700693
694 return ret;
695}
696
697static int q6v5_stop(struct rproc *rproc)
698{
699 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
700 int ret;
701
702 qproc->running = false;
703
704 qcom_smem_state_update_bits(qproc->state,
705 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
706
707 ret = wait_for_completion_timeout(&qproc->stop_done,
708 msecs_to_jiffies(5000));
709 if (ret == 0)
710 dev_err(qproc->dev, "timed out on wait\n");
711
712 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
713
714 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
715 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
716 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
717
718 reset_control_assert(qproc->mss_restart);
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530719 q6v5_clk_disable(qproc->dev, qproc->active_clks,
720 qproc->active_clk_count);
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530721 q6v5_regulator_disable(qproc, qproc->active_regs,
722 qproc->active_reg_count);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700723
724 return 0;
725}
726
727static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
728{
729 struct q6v5 *qproc = rproc->priv;
730 int offset;
731
732 offset = da - qproc->mpss_reloc;
733 if (offset < 0 || offset + len > qproc->mpss_size)
734 return NULL;
735
736 return qproc->mpss_region + offset;
737}
738
739static const struct rproc_ops q6v5_ops = {
740 .start = q6v5_start,
741 .stop = q6v5_stop,
742 .da_to_va = q6v5_da_to_va,
743};
744
745static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
746{
747 struct q6v5 *qproc = dev;
748 size_t len;
749 char *msg;
750
751 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
752 if (!qproc->running) {
753 complete(&qproc->stop_done);
754 return IRQ_HANDLED;
755 }
756
757 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
758 if (!IS_ERR(msg) && len > 0 && msg[0])
759 dev_err(qproc->dev, "watchdog received: %s\n", msg);
760 else
761 dev_err(qproc->dev, "watchdog without message\n");
762
763 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
764
765 if (!IS_ERR(msg))
766 msg[0] = '\0';
767
768 return IRQ_HANDLED;
769}
770
771static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
772{
773 struct q6v5 *qproc = dev;
774 size_t len;
775 char *msg;
776
777 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
778 if (!IS_ERR(msg) && len > 0 && msg[0])
779 dev_err(qproc->dev, "fatal error received: %s\n", msg);
780 else
781 dev_err(qproc->dev, "fatal error without message\n");
782
783 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
784
785 if (!IS_ERR(msg))
786 msg[0] = '\0';
787
788 return IRQ_HANDLED;
789}
790
791static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
792{
793 struct q6v5 *qproc = dev;
794
795 complete(&qproc->start_done);
796 return IRQ_HANDLED;
797}
798
799static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
800{
801 struct q6v5 *qproc = dev;
802
803 complete(&qproc->stop_done);
804 return IRQ_HANDLED;
805}
806
807static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
808{
809 struct of_phandle_args args;
810 struct resource *res;
811 int ret;
812
813 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
814 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +0000815 if (IS_ERR(qproc->reg_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700816 return PTR_ERR(qproc->reg_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700817
818 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
819 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
Wei Yongjunb1653f22016-07-14 12:57:44 +0000820 if (IS_ERR(qproc->rmb_base))
Bjorn Andersson051fb702016-06-20 14:28:41 -0700821 return PTR_ERR(qproc->rmb_base);
Bjorn Andersson051fb702016-06-20 14:28:41 -0700822
823 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
824 "qcom,halt-regs", 3, 0, &args);
825 if (ret < 0) {
826 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
827 return -EINVAL;
828 }
829
830 qproc->halt_map = syscon_node_to_regmap(args.np);
831 of_node_put(args.np);
832 if (IS_ERR(qproc->halt_map))
833 return PTR_ERR(qproc->halt_map);
834
835 qproc->halt_q6 = args.args[0];
836 qproc->halt_modem = args.args[1];
837 qproc->halt_nc = args.args[2];
838
839 return 0;
840}
841
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530842static int q6v5_init_clocks(struct device *dev, struct clk **clks,
843 char **clk_names)
Bjorn Andersson051fb702016-06-20 14:28:41 -0700844{
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530845 int i;
846
847 if (!clk_names)
848 return 0;
849
850 for (i = 0; clk_names[i]; i++) {
851 clks[i] = devm_clk_get(dev, clk_names[i]);
852 if (IS_ERR(clks[i])) {
853 int rc = PTR_ERR(clks[i]);
854
855 if (rc != -EPROBE_DEFER)
856 dev_err(dev, "Failed to get %s clock\n",
857 clk_names[i]);
858 return rc;
859 }
Bjorn Andersson051fb702016-06-20 14:28:41 -0700860 }
861
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530862 return i;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700863}
864
865static int q6v5_init_reset(struct q6v5 *qproc)
866{
867 qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
868 if (IS_ERR(qproc->mss_restart)) {
869 dev_err(qproc->dev, "failed to acquire mss restart\n");
870 return PTR_ERR(qproc->mss_restart);
871 }
872
873 return 0;
874}
875
876static int q6v5_request_irq(struct q6v5 *qproc,
877 struct platform_device *pdev,
878 const char *name,
879 irq_handler_t thread_fn)
880{
881 int ret;
882
883 ret = platform_get_irq_byname(pdev, name);
884 if (ret < 0) {
885 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
886 return ret;
887 }
888
889 ret = devm_request_threaded_irq(&pdev->dev, ret,
890 NULL, thread_fn,
891 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
892 "q6v5", qproc);
893 if (ret)
894 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
895
896 return ret;
897}
898
899static int q6v5_alloc_memory_region(struct q6v5 *qproc)
900{
901 struct device_node *child;
902 struct device_node *node;
903 struct resource r;
904 int ret;
905
906 child = of_get_child_by_name(qproc->dev->of_node, "mba");
907 node = of_parse_phandle(child, "memory-region", 0);
908 ret = of_address_to_resource(node, 0, &r);
909 if (ret) {
910 dev_err(qproc->dev, "unable to resolve mba region\n");
911 return ret;
912 }
913
914 qproc->mba_phys = r.start;
915 qproc->mba_size = resource_size(&r);
916 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
917 if (!qproc->mba_region) {
918 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
919 &r.start, qproc->mba_size);
920 return -EBUSY;
921 }
922
923 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
924 node = of_parse_phandle(child, "memory-region", 0);
925 ret = of_address_to_resource(node, 0, &r);
926 if (ret) {
927 dev_err(qproc->dev, "unable to resolve mpss region\n");
928 return ret;
929 }
930
931 qproc->mpss_phys = qproc->mpss_reloc = r.start;
932 qproc->mpss_size = resource_size(&r);
933 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
934 if (!qproc->mpss_region) {
935 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
936 &r.start, qproc->mpss_size);
937 return -EBUSY;
938 }
939
940 return 0;
941}
942
943static int q6v5_probe(struct platform_device *pdev)
944{
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530945 const struct rproc_hexagon_res *desc;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700946 struct q6v5 *qproc;
947 struct rproc *rproc;
948 int ret;
949
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530950 desc = of_device_get_match_data(&pdev->dev);
951 if (!desc)
952 return -EINVAL;
953
Bjorn Andersson051fb702016-06-20 14:28:41 -0700954 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +0530955 desc->hexagon_mba_image, sizeof(*qproc));
Bjorn Andersson051fb702016-06-20 14:28:41 -0700956 if (!rproc) {
957 dev_err(&pdev->dev, "failed to allocate rproc\n");
958 return -ENOMEM;
959 }
960
961 rproc->fw_ops = &q6v5_fw_ops;
962
963 qproc = (struct q6v5 *)rproc->priv;
964 qproc->dev = &pdev->dev;
965 qproc->rproc = rproc;
966 platform_set_drvdata(pdev, qproc);
967
968 init_completion(&qproc->start_done);
969 init_completion(&qproc->stop_done);
970
971 ret = q6v5_init_mem(qproc, pdev);
972 if (ret)
973 goto free_rproc;
974
975 ret = q6v5_alloc_memory_region(qproc);
976 if (ret)
977 goto free_rproc;
978
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530979 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
980 desc->proxy_clk_names);
981 if (ret < 0) {
982 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700983 goto free_rproc;
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +0530984 }
985 qproc->proxy_clk_count = ret;
986
987 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
988 desc->active_clk_names);
989 if (ret < 0) {
990 dev_err(&pdev->dev, "Failed to get active clocks.\n");
991 goto free_rproc;
992 }
993 qproc->active_clk_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -0700994
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +0530995 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
996 desc->proxy_supply);
997 if (ret < 0) {
998 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
Bjorn Andersson051fb702016-06-20 14:28:41 -0700999 goto free_rproc;
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301000 }
1001 qproc->proxy_reg_count = ret;
1002
1003 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1004 desc->active_supply);
1005 if (ret < 0) {
1006 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1007 goto free_rproc;
1008 }
1009 qproc->active_reg_count = ret;
Bjorn Andersson051fb702016-06-20 14:28:41 -07001010
1011 ret = q6v5_init_reset(qproc);
1012 if (ret)
1013 goto free_rproc;
1014
1015 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
1016 if (ret < 0)
1017 goto free_rproc;
1018
1019 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
1020 if (ret < 0)
1021 goto free_rproc;
1022
1023 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
1024 if (ret < 0)
1025 goto free_rproc;
1026
1027 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
1028 if (ret < 0)
1029 goto free_rproc;
1030
1031 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
Wei Yongjun4e968d92016-07-29 15:56:52 +00001032 if (IS_ERR(qproc->state)) {
1033 ret = PTR_ERR(qproc->state);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001034 goto free_rproc;
Wei Yongjun4e968d92016-07-29 15:56:52 +00001035 }
Bjorn Andersson051fb702016-06-20 14:28:41 -07001036
1037 ret = rproc_add(rproc);
1038 if (ret)
1039 goto free_rproc;
1040
1041 return 0;
1042
1043free_rproc:
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001044 rproc_free(rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001045
1046 return ret;
1047}
1048
1049static int q6v5_remove(struct platform_device *pdev)
1050{
1051 struct q6v5 *qproc = platform_get_drvdata(pdev);
1052
1053 rproc_del(qproc->rproc);
Bjorn Andersson433c0e02016-10-02 17:46:38 -07001054 rproc_free(qproc->rproc);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001055
1056 return 0;
1057}
1058
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301059static const struct rproc_hexagon_res msm8916_mss = {
1060 .hexagon_mba_image = "mba.mbn",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301061 .proxy_supply = (struct qcom_mss_reg_res[]) {
1062 {
1063 .supply = "mx",
1064 .uV = 1050000,
1065 },
1066 {
1067 .supply = "cx",
1068 .uA = 100000,
1069 },
1070 {
1071 .supply = "pll",
1072 .uA = 100000,
1073 },
1074 {}
1075 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301076 .proxy_clk_names = (char*[]){
1077 "xo",
1078 NULL
1079 },
1080 .active_clk_names = (char*[]){
1081 "iface",
1082 "bus",
1083 "mem",
1084 NULL
1085 },
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301086};
1087
1088static const struct rproc_hexagon_res msm8974_mss = {
1089 .hexagon_mba_image = "mba.b00",
Avaneesh Kumar Dwivedi19f902b2016-12-30 19:24:02 +05301090 .proxy_supply = (struct qcom_mss_reg_res[]) {
1091 {
1092 .supply = "mx",
1093 .uV = 1050000,
1094 },
1095 {
1096 .supply = "cx",
1097 .uA = 100000,
1098 },
1099 {
1100 .supply = "pll",
1101 .uA = 100000,
1102 },
1103 {}
1104 },
1105 .active_supply = (struct qcom_mss_reg_res[]) {
1106 {
1107 .supply = "mss",
1108 .uV = 1050000,
1109 .uA = 100000,
1110 },
1111 {}
1112 },
Avaneesh Kumar Dwivedi39b24102016-12-30 19:24:01 +05301113 .proxy_clk_names = (char*[]){
1114 "xo",
1115 NULL
1116 },
1117 .active_clk_names = (char*[]){
1118 "iface",
1119 "bus",
1120 "mem",
1121 NULL
1122 },
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301123};
1124
Bjorn Andersson051fb702016-06-20 14:28:41 -07001125static const struct of_device_id q6v5_of_match[] = {
Avaneesh Kumar Dwivedi7a8ffe12016-12-30 19:24:00 +05301126 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1127 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1128 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
Bjorn Andersson051fb702016-06-20 14:28:41 -07001129 { },
1130};
Javier Martinez Canillas3227c872016-10-18 18:24:19 -03001131MODULE_DEVICE_TABLE(of, q6v5_of_match);
Bjorn Andersson051fb702016-06-20 14:28:41 -07001132
1133static struct platform_driver q6v5_driver = {
1134 .probe = q6v5_probe,
1135 .remove = q6v5_remove,
1136 .driver = {
1137 .name = "qcom-q6v5-pil",
1138 .of_match_table = q6v5_of_match,
1139 },
1140};
1141module_platform_driver(q6v5_driver);
1142
1143MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1144MODULE_LICENSE("GPL v2");