blob: a4432390856b7521ba019615402830899a985c70 [file] [log] [blame]
Kyle Yan88156d12017-01-05 15:12:45 -08001/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/firmware.h>
16#include <linux/io.h>
17#include <linux/elf.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/slab.h>
21#include <linux/suspend.h>
22#include <linux/rwsem.h>
23#include <linux/sysfs.h>
24#include <linux/workqueue.h>
25#include <linux/jiffies.h>
26#include <linux/err.h>
27#include <linux/list.h>
28#include <linux/list_sort.h>
29#include <linux/idr.h>
30#include <linux/interrupt.h>
31#include <linux/of_gpio.h>
32#include <linux/of_address.h>
33#include <linux/io.h>
34#include <linux/dma-mapping.h>
35#include <soc/qcom/ramdump.h>
36#include <soc/qcom/subsystem_restart.h>
37#include <soc/qcom/secure_buffer.h>
38
39#include <linux/uaccess.h>
40#include <asm/setup.h>
Gaurav Kohli2da45012017-05-08 15:21:43 +053041#define CREATE_TRACE_POINTS
42#include <trace/events/trace_msm_pil_event.h>
Kyle Yane45fa022016-08-29 11:40:26 -070043
44#include "peripheral-loader.h"
45
46#define pil_err(desc, fmt, ...) \
47 dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
48#define pil_info(desc, fmt, ...) \
49 dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
50
51#if defined(CONFIG_ARM)
52#define pil_memset_io(d, c, count) memset(d, c, count)
53#else
54#define pil_memset_io(d, c, count) memset_io(d, c, count)
55#endif
56
57#define PIL_NUM_DESC 10
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +053058#define NUM_OF_ENCRYPTED_KEY 3
Kyle Yane45fa022016-08-29 11:40:26 -070059static void __iomem *pil_info_base;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +053060static void __iomem *pil_minidump_base;
Kyle Yane45fa022016-08-29 11:40:26 -070061
62/**
63 * proxy_timeout - Override for proxy vote timeouts
64 * -1: Use driver-specified timeout
65 * 0: Hold proxy votes until shutdown
66 * >0: Specify a custom timeout in ms
67 */
68static int proxy_timeout_ms = -1;
69module_param(proxy_timeout_ms, int, 0644);
70
71static bool disable_timeouts;
72/**
73 * struct pil_mdt - Representation of <name>.mdt file in memory
74 * @hdr: ELF32 header
75 * @phdr: ELF32 program headers
76 */
77struct pil_mdt {
78 struct elf32_hdr hdr;
79 struct elf32_phdr phdr[];
80};
81
82/**
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +053083 * struct boot_minidump_smem_region - Representation of SMEM TOC
84 * @region_name: Name of modem segment to be dumped
85 * @region_base_address: Where segment start from
86 * @region_size: Size of segment to be dumped
87 */
88struct boot_minidump_smem_region {
89 char region_name[16];
90 u64 region_base_address;
91 u64 region_size;
92};
93
94/**
Kyle Yane45fa022016-08-29 11:40:26 -070095 * struct pil_seg - memory map representing one segment
96 * @next: points to next seg mentor NULL if last segment
97 * @paddr: physical start address of segment
98 * @sz: size of segment
99 * @filesz: size of segment on disk
100 * @num: segment number
101 * @relocated: true if segment is relocated, false otherwise
102 *
103 * Loosely based on an elf program header. Contains all necessary information
104 * to load and initialize a segment of the image in memory.
105 */
106struct pil_seg {
107 phys_addr_t paddr;
108 unsigned long sz;
109 unsigned long filesz;
110 int num;
111 struct list_head list;
112 bool relocated;
113};
114
115/**
116 * struct pil_priv - Private state for a pil_desc
117 * @proxy: work item used to run the proxy unvoting routine
118 * @ws: wakeup source to prevent suspend during pil_boot
119 * @wname: name of @ws
120 * @desc: pointer to pil_desc this is private data for
121 * @seg: list of segments sorted by physical address
122 * @entry_addr: physical address where processor starts booting at
123 * @base_addr: smallest start address among all segments that are relocatable
124 * @region_start: address where relocatable region starts or lowest address
125 * for non-relocatable images
126 * @region_end: address where relocatable region ends or highest address for
127 * non-relocatable images
128 * @region: region allocated for relocatable images
129 * @unvoted_flag: flag to keep track if we have unvoted or not.
130 *
131 * This struct contains data for a pil_desc that should not be exposed outside
132 * of this file. This structure points to the descriptor and the descriptor
133 * points to this structure so that PIL drivers can't access the private
134 * data of a descriptor but this file can access both.
135 */
136struct pil_priv {
137 struct delayed_work proxy;
138 struct wakeup_source ws;
139 char wname[32];
140 struct pil_desc *desc;
141 struct list_head segs;
142 phys_addr_t entry_addr;
143 phys_addr_t base_addr;
144 phys_addr_t region_start;
145 phys_addr_t region_end;
146 void *region;
147 struct pil_image_info __iomem *info;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530148 struct md_ssr_ss_info __iomem *minidump;
149 int minidump_id;
Kyle Yane45fa022016-08-29 11:40:26 -0700150 int id;
151 int unvoted_flag;
152 size_t region_size;
153};
154
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530155static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
156{
157 struct boot_minidump_smem_region __iomem *region_info;
158 struct ramdump_segment *ramdump_segs, *s;
159 struct pil_priv *priv = desc->priv;
160 void __iomem *subsys_smem_base;
161 void __iomem *offset;
162 int ss_mdump_seg_cnt;
163 int ret, i;
164
165 memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
166 offset = offset + sizeof(priv->minidump->md_ss_smem_regions_baseptr);
167 /* There are 3 encryption keys which also need to be dumped */
168 ss_mdump_seg_cnt = readb_relaxed(offset) +
169 NUM_OF_ENCRYPTED_KEY;
170
171 subsys_smem_base = ioremap(__raw_readl(priv->minidump),
172 ss_mdump_seg_cnt * sizeof(*region_info));
173 region_info =
174 (struct boot_minidump_smem_region __iomem *)subsys_smem_base;
175 ramdump_segs = kcalloc(ss_mdump_seg_cnt,
176 sizeof(*ramdump_segs), GFP_KERNEL);
177 if (!ramdump_segs)
178 return -ENOMEM;
179
180 if (desc->subsys_vmid > 0)
181 ret = pil_assign_mem_to_linux(desc, priv->region_start,
182 (priv->region_end - priv->region_start));
183
184 s = ramdump_segs;
185 for (i = 0; i < ss_mdump_seg_cnt; i++) {
186 memcpy(&offset, &region_info, sizeof(region_info));
187 memcpy(&s->name, &region_info, sizeof(region_info));
188 offset = offset + sizeof(region_info->region_name);
189 s->address = __raw_readl(offset);
190 offset = offset + sizeof(region_info->region_base_address);
191 s->size = __raw_readl(offset);
192 s++;
193 region_info++;
194 }
195 ret = do_minidump(ramdump_dev, ramdump_segs, ss_mdump_seg_cnt);
196 kfree(ramdump_segs);
197 if (ret)
198 pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
199 __func__, desc->name, ret);
200 writel_relaxed(0, &priv->minidump->md_ss_smem_regions_baseptr);
201 writeb_relaxed(1, &priv->minidump->md_ss_ssr_cause);
202
203 if (desc->subsys_vmid > 0)
204 ret = pil_assign_mem_to_subsys(desc, priv->region_start,
205 (priv->region_end - priv->region_start));
206 return ret;
207}
208
Kyle Yane45fa022016-08-29 11:40:26 -0700209/**
210 * pil_do_ramdump() - Ramdump an image
211 * @desc: descriptor from pil_desc_init()
212 * @ramdump_dev: ramdump device returned from create_ramdump_device()
213 *
214 * Calls the ramdump API with a list of segments generated from the addresses
215 * that the descriptor corresponds to.
216 */
217int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
218{
219 struct pil_priv *priv = desc->priv;
220 struct pil_seg *seg;
221 int count = 0, ret;
222 struct ramdump_segment *ramdump_segs, *s;
223
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530224 if (priv->minidump && (__raw_readl(priv->minidump) > 0))
225 return pil_do_minidump(desc, ramdump_dev);
226
Kyle Yane45fa022016-08-29 11:40:26 -0700227 list_for_each_entry(seg, &priv->segs, list)
228 count++;
229
230 ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
231 if (!ramdump_segs)
232 return -ENOMEM;
233
234 if (desc->subsys_vmid > 0)
235 ret = pil_assign_mem_to_linux(desc, priv->region_start,
236 (priv->region_end - priv->region_start));
237
238 s = ramdump_segs;
239 list_for_each_entry(seg, &priv->segs, list) {
240 s->address = seg->paddr;
241 s->size = seg->sz;
242 s++;
243 }
244
245 ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
246 kfree(ramdump_segs);
247
Puja Gupta41ff9242017-02-02 13:41:04 -0800248 if (ret)
249 pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
250 __func__, desc->name, ret);
251
252 if (desc->subsys_vmid > 0)
Kyle Yane45fa022016-08-29 11:40:26 -0700253 ret = pil_assign_mem_to_subsys(desc, priv->region_start,
254 (priv->region_end - priv->region_start));
255
256 return ret;
257}
258EXPORT_SYMBOL(pil_do_ramdump);
259
260int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
261 size_t size)
262{
263 int ret;
264 int srcVM[1] = {VMID_HLOS};
265 int destVM[1] = {desc->subsys_vmid};
266 int destVMperm[1] = {PERM_READ | PERM_WRITE};
267
268 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
269 if (ret)
270 pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
271 __func__, &addr, size, desc->subsys_vmid, ret);
272 return ret;
273}
274EXPORT_SYMBOL(pil_assign_mem_to_subsys);
275
276int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
277 size_t size)
278{
279 int ret;
280 int srcVM[1] = {desc->subsys_vmid};
281 int destVM[1] = {VMID_HLOS};
282 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
283
284 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
285 if (ret)
286 panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
287 __func__, &addr, size, desc->subsys_vmid, ret);
288
289 return ret;
290}
291EXPORT_SYMBOL(pil_assign_mem_to_linux);
292
293int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
294 phys_addr_t addr, size_t size)
295{
296 int ret;
297 int srcVM[1] = {VMID_HLOS};
298 int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
299 int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
300
301 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
302 if (ret)
303 pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
304 __func__, &addr, size, desc->subsys_vmid, ret);
305
306 return ret;
307}
308EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
309
310int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
311 int VMid)
312{
313 int ret;
314 int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
315 int destVM[1] = {VMid};
316 int destVMperm[1] = {PERM_READ | PERM_WRITE};
317
318 if (VMid == VMID_HLOS)
319 destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
320
321 ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
322 if (ret)
323 panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
324 __func__, &addr, size, desc->subsys_vmid);
325
326 return ret;
327}
328EXPORT_SYMBOL(pil_reclaim_mem);
329
330/**
331 * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
332 * @desc: descriptor from pil_desc_init()
333 *
334 * Returns the physical address where the image boots at or 0 if unknown.
335 */
336phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
337{
338 return desc->priv ? desc->priv->entry_addr : 0;
339}
340EXPORT_SYMBOL(pil_get_entry_addr);
341
342static void __pil_proxy_unvote(struct pil_priv *priv)
343{
344 struct pil_desc *desc = priv->desc;
345
346 desc->ops->proxy_unvote(desc);
347 notify_proxy_unvote(desc->dev);
348 __pm_relax(&priv->ws);
349 module_put(desc->owner);
350
351}
352
353static void pil_proxy_unvote_work(struct work_struct *work)
354{
355 struct delayed_work *delayed = to_delayed_work(work);
356 struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
357
358 __pil_proxy_unvote(priv);
359}
360
361static int pil_proxy_vote(struct pil_desc *desc)
362{
363 int ret = 0;
364 struct pil_priv *priv = desc->priv;
365
366 if (desc->ops->proxy_vote) {
367 __pm_stay_awake(&priv->ws);
368 ret = desc->ops->proxy_vote(desc);
369 if (ret)
370 __pm_relax(&priv->ws);
371 }
372
373 if (desc->proxy_unvote_irq)
374 enable_irq(desc->proxy_unvote_irq);
375 notify_proxy_vote(desc->dev);
376
377 return ret;
378}
379
380static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
381{
382 struct pil_priv *priv = desc->priv;
383 unsigned long timeout;
384
385 if (proxy_timeout_ms == 0 && !immediate)
386 return;
387 else if (proxy_timeout_ms > 0)
388 timeout = proxy_timeout_ms;
389 else
390 timeout = desc->proxy_timeout;
391
392 if (desc->ops->proxy_unvote) {
393 if (WARN_ON(!try_module_get(desc->owner)))
394 return;
395
396 if (immediate)
397 timeout = 0;
398
399 if (!desc->proxy_unvote_irq || immediate)
400 schedule_delayed_work(&priv->proxy,
401 msecs_to_jiffies(timeout));
402 }
403}
404
405static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
406{
407 struct pil_desc *desc = dev_id;
408 struct pil_priv *priv = desc->priv;
409
410 pil_info(desc, "Power/Clock ready interrupt received\n");
411 if (!desc->priv->unvoted_flag) {
412 desc->priv->unvoted_flag = 1;
413 __pil_proxy_unvote(priv);
414 }
415
416 return IRQ_HANDLED;
417}
418
419static bool segment_is_relocatable(const struct elf32_phdr *p)
420{
421 return !!(p->p_flags & BIT(27));
422}
423
424static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
425{
426 return addr - priv->base_addr + priv->region_start;
427}
428
429static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
430 const struct elf32_phdr *phdr, int num)
431{
432 bool reloc = segment_is_relocatable(phdr);
433 const struct pil_priv *priv = desc->priv;
434 struct pil_seg *seg;
435
436 if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
437 pil_err(desc, "Segment not relocatable,kernel memory would be overwritten[%#08lx, %#08lx)\n",
438 (unsigned long)phdr->p_paddr,
439 (unsigned long)(phdr->p_paddr + phdr->p_memsz));
440 return ERR_PTR(-EPERM);
441 }
442
443 if (phdr->p_filesz > phdr->p_memsz) {
444 pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
445 num, phdr->p_filesz, phdr->p_memsz);
446 return ERR_PTR(-EINVAL);
447 }
448
449 seg = kmalloc(sizeof(*seg), GFP_KERNEL);
450 if (!seg)
451 return ERR_PTR(-ENOMEM);
452 seg->num = num;
453 seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
454 seg->filesz = phdr->p_filesz;
455 seg->sz = phdr->p_memsz;
456 seg->relocated = reloc;
457 INIT_LIST_HEAD(&seg->list);
458
459 return seg;
460}
461
462#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
463
464static int segment_is_loadable(const struct elf32_phdr *p)
465{
466 return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
467 p->p_memsz;
468}
469
470static void pil_dump_segs(const struct pil_priv *priv)
471{
472 struct pil_seg *seg;
473 phys_addr_t seg_h_paddr;
474
475 list_for_each_entry(seg, &priv->segs, list) {
476 seg_h_paddr = seg->paddr + seg->sz;
477 pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
478 &seg->paddr, &seg_h_paddr);
479 }
480}
481
482/*
483 * Ensure the entry address lies within the image limits and if the image is
484 * relocatable ensure it lies within a relocatable segment.
485 */
486static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
487{
488 struct pil_seg *seg;
489 phys_addr_t entry = mdt->hdr.e_entry;
490 bool image_relocated = priv->region;
491
492 if (image_relocated)
493 entry = pil_reloc(priv, entry);
494 priv->entry_addr = entry;
495
496 if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
497 return 0;
498
499 list_for_each_entry(seg, &priv->segs, list) {
500 if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
501 if (!image_relocated)
502 return 0;
503 else if (seg->relocated)
504 return 0;
505 }
506 }
507 pil_err(priv->desc, "entry address %pa not within range\n", &entry);
508 pil_dump_segs(priv);
509 return -EADDRNOTAVAIL;
510}
511
512static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
513 phys_addr_t max_addr, size_t align)
514{
515 void *region;
516 size_t size = max_addr - min_addr;
517 size_t aligned_size;
518
519 /* Don't reallocate due to fragmentation concerns, just sanity check */
520 if (priv->region) {
521 if (WARN(priv->region_end - priv->region_start < size,
522 "Can't reuse PIL memory, too small\n"))
523 return -ENOMEM;
524 return 0;
525 }
526
527 if (align > SZ_4M)
528 aligned_size = ALIGN(size, SZ_4M);
529 else
530 aligned_size = ALIGN(size, SZ_1M);
531
532 priv->desc->attrs = 0;
533 priv->desc->attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
534
535 region = dma_alloc_attrs(priv->desc->dev, aligned_size,
536 &priv->region_start, GFP_KERNEL,
537 priv->desc->attrs);
538
539 if (region == NULL) {
540 pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
541 size);
Gaurav Kohli38b9bad2017-03-25 17:36:30 +0530542 priv->region_start = 0;
543 priv->region_end = 0;
Kyle Yane45fa022016-08-29 11:40:26 -0700544 return -ENOMEM;
545 }
546
547 priv->region = region;
548 priv->region_end = priv->region_start + size;
549 priv->base_addr = min_addr;
550 priv->region_size = aligned_size;
551
552 return 0;
553}
554
555static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
556{
557 const struct elf32_phdr *phdr;
558 phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
559 size_t align = 0;
560 int i, ret = 0;
561 bool relocatable = false;
562
563 min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
564 max_addr_n = max_addr_r = 0;
565
566 /* Find the image limits */
567 for (i = 0; i < mdt->hdr.e_phnum; i++) {
568 phdr = &mdt->phdr[i];
569 if (!segment_is_loadable(phdr))
570 continue;
571
572 start = phdr->p_paddr;
573 end = start + phdr->p_memsz;
574
575 if (segment_is_relocatable(phdr)) {
576 min_addr_r = min(min_addr_r, start);
577 max_addr_r = max(max_addr_r, end);
578 /*
579 * Lowest relocatable segment dictates alignment of
580 * relocatable region
581 */
582 if (min_addr_r == start)
583 align = phdr->p_align;
584 relocatable = true;
585 } else {
586 min_addr_n = min(min_addr_n, start);
587 max_addr_n = max(max_addr_n, end);
588 }
589
590 }
591
592 /*
593 * Align the max address to the next 4K boundary to satisfy iommus and
594 * XPUs that operate on 4K chunks.
595 */
596 max_addr_n = ALIGN(max_addr_n, SZ_4K);
597 max_addr_r = ALIGN(max_addr_r, SZ_4K);
598
599 if (relocatable) {
600 ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
601 } else {
602 priv->region_start = min_addr_n;
603 priv->region_end = max_addr_n;
604 priv->base_addr = min_addr_n;
605 }
606
607 if (priv->info) {
608 __iowrite32_copy(&priv->info->start, &priv->region_start,
609 sizeof(priv->region_start) / 4);
610 writel_relaxed(priv->region_end - priv->region_start,
611 &priv->info->size);
612 }
613
614 return ret;
615}
616
617static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
618{
619 int ret = 0;
620 struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
621 struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
622
623 if (seg_a->paddr < seg_b->paddr)
624 ret = -1;
625 else if (seg_a->paddr > seg_b->paddr)
626 ret = 1;
627
628 return ret;
629}
630
631static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
632{
633 struct pil_priv *priv = desc->priv;
634 const struct elf32_phdr *phdr;
635 struct pil_seg *seg;
636 int i, ret;
637
638 ret = pil_setup_region(priv, mdt);
639 if (ret)
640 return ret;
641
642
643 pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
644 &priv->region_end);
645
646 for (i = 0; i < mdt->hdr.e_phnum; i++) {
647 phdr = &mdt->phdr[i];
648 if (!segment_is_loadable(phdr))
649 continue;
650
651 seg = pil_init_seg(desc, phdr, i);
652 if (IS_ERR(seg))
653 return PTR_ERR(seg);
654
655 list_add_tail(&seg->list, &priv->segs);
656 }
657 list_sort(NULL, &priv->segs, pil_cmp_seg);
658
659 return pil_init_entry_addr(priv, mdt);
660}
661
Puja Gupta7c187e82017-02-06 14:33:19 -0800662struct pil_map_fw_info {
663 void *region;
664 unsigned long attrs;
665 phys_addr_t base_addr;
666 struct device *dev;
667};
668
Kyle Yane45fa022016-08-29 11:40:26 -0700669static void pil_release_mmap(struct pil_desc *desc)
670{
671 struct pil_priv *priv = desc->priv;
672 struct pil_seg *p, *tmp;
673 u64 zero = 0ULL;
Gaurav Kohliebb42832017-02-21 12:08:50 +0530674
675 if (priv->info) {
676 __iowrite32_copy(&priv->info->start, &zero,
677 sizeof(zero) / 4);
678 writel_relaxed(0, &priv->info->size);
679 }
680
681 list_for_each_entry_safe(p, tmp, &priv->segs, list) {
682 list_del(&p->list);
683 kfree(p);
684 }
685}
686
687static void pil_clear_segment(struct pil_desc *desc)
688{
689 struct pil_priv *priv = desc->priv;
Puja Gupta7c187e82017-02-06 14:33:19 -0800690 u8 __iomem *buf;
691
692 struct pil_map_fw_info map_fw_info = {
693 .attrs = desc->attrs,
694 .region = priv->region,
695 .base_addr = priv->region_start,
696 .dev = desc->dev,
697 };
698
699 void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
700
701 /* Clear memory so that unauthorized ELF code is not left behind */
702 buf = desc->map_fw_mem(priv->region_start, (priv->region_end -
703 priv->region_start), map_data);
704 pil_memset_io(buf, 0, (priv->region_end - priv->region_start));
705 desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
706 map_data);
Kyle Yane45fa022016-08-29 11:40:26 -0700707
Kyle Yane45fa022016-08-29 11:40:26 -0700708}
709
710#define IOMAP_SIZE SZ_1M
711
Kyle Yane45fa022016-08-29 11:40:26 -0700712static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
713{
714 struct pil_map_fw_info *info = data;
715
716 return dma_remap(info->dev, info->region, paddr, size,
717 info->attrs);
718}
719
720static void unmap_fw_mem(void *vaddr, size_t size, void *data)
721{
722 struct pil_map_fw_info *info = data;
723
724 dma_unremap(info->dev, vaddr, size);
725}
726
727static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
728{
729 int ret = 0, count;
730 phys_addr_t paddr;
731 char fw_name[30];
732 int num = seg->num;
733 const struct firmware *fw = NULL;
Kyle Yan88156d12017-01-05 15:12:45 -0800734 void __iomem *firmware_buf;
Kyle Yane45fa022016-08-29 11:40:26 -0700735 struct pil_map_fw_info map_fw_info = {
736 .attrs = desc->attrs,
737 .region = desc->priv->region,
738 .base_addr = desc->priv->region_start,
739 .dev = desc->dev,
740 };
741 void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
742
743 if (seg->filesz) {
744 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
745 desc->fw_name, num);
Kyle Yan88156d12017-01-05 15:12:45 -0800746 firmware_buf = desc->map_fw_mem(seg->paddr, seg->filesz,
747 map_data);
748 if (!firmware_buf) {
749 pil_err(desc, "Failed to map memory for firmware buffer\n");
750 return -ENOMEM;
751 }
752
Kyle Yane45fa022016-08-29 11:40:26 -0700753 ret = request_firmware_into_buf(&fw, fw_name, desc->dev,
Kyle Yan88156d12017-01-05 15:12:45 -0800754 firmware_buf, seg->filesz);
755 desc->unmap_fw_mem(firmware_buf, seg->filesz, map_data);
756
757 if (ret) {
Kyle Yane45fa022016-08-29 11:40:26 -0700758 pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
759 fw_name, ret);
760 return ret;
761 }
762
Kyle Yan88156d12017-01-05 15:12:45 -0800763 if (fw->size != seg->filesz) {
Kyle Yane45fa022016-08-29 11:40:26 -0700764 pil_err(desc, "Blob size %u doesn't match %lu\n",
765 ret, seg->filesz);
766 return -EPERM;
767 }
Kyle Yane45fa022016-08-29 11:40:26 -0700768 }
769
770 /* Zero out trailing memory */
771 paddr = seg->paddr + seg->filesz;
772 count = seg->sz - seg->filesz;
773 while (count > 0) {
774 int size;
775 u8 __iomem *buf;
776
777 size = min_t(size_t, IOMAP_SIZE, count);
778 buf = desc->map_fw_mem(paddr, size, map_data);
779 if (!buf) {
780 pil_err(desc, "Failed to map memory\n");
781 return -ENOMEM;
782 }
783 pil_memset_io(buf, 0, size);
784
785 desc->unmap_fw_mem(buf, size, map_data);
786
787 count -= size;
788 paddr += size;
789 }
790
791 if (desc->ops->verify_blob) {
792 ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
793 if (ret)
794 pil_err(desc, "Blob%u failed verification(rc:%d)\n",
795 num, ret);
796 }
797
798 return ret;
799}
800
801static int pil_parse_devicetree(struct pil_desc *desc)
802{
803 struct device_node *ofnode = desc->dev->of_node;
804 int clk_ready = 0;
805
806 if (!ofnode)
807 return -EINVAL;
808
809 if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
810 &desc->subsys_vmid))
811 pr_debug("Unable to read the addr-protect-id for %s\n",
812 desc->name);
813
814 if (desc->ops->proxy_unvote && of_find_property(ofnode,
815 "qcom,gpio-proxy-unvote",
816 NULL)) {
817 clk_ready = of_get_named_gpio(ofnode,
818 "qcom,gpio-proxy-unvote", 0);
819
820 if (clk_ready < 0) {
821 dev_dbg(desc->dev,
822 "[%s]: Error getting proxy unvoting gpio\n",
823 desc->name);
824 return clk_ready;
825 }
826
827 clk_ready = gpio_to_irq(clk_ready);
828 if (clk_ready < 0) {
829 dev_err(desc->dev,
830 "[%s]: Error getting proxy unvote IRQ\n",
831 desc->name);
832 return clk_ready;
833 }
834 }
835 desc->proxy_unvote_irq = clk_ready;
836 return 0;
837}
838
839/* Synchronize request_firmware() with suspend */
840static DECLARE_RWSEM(pil_pm_rwsem);
841
842/**
843 * pil_boot() - Load a peripheral image into memory and boot it
844 * @desc: descriptor from pil_desc_init()
845 *
846 * Returns 0 on success or -ERROR on failure.
847 */
848int pil_boot(struct pil_desc *desc)
849{
850 int ret;
851 char fw_name[30];
852 const struct pil_mdt *mdt;
853 const struct elf32_hdr *ehdr;
854 struct pil_seg *seg;
855 const struct firmware *fw;
856 struct pil_priv *priv = desc->priv;
857 bool mem_protect = false;
858 bool hyp_assign = false;
859
860 if (desc->shutdown_fail)
861 pil_err(desc, "Subsystem shutdown failed previously!\n");
862
863 /* Reinitialize for new image */
864 pil_release_mmap(desc);
865
866 down_read(&pil_pm_rwsem);
867 snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
868 ret = request_firmware(&fw, fw_name, desc->dev);
869 if (ret) {
870 pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
871 goto out;
872 }
873
874 if (fw->size < sizeof(*ehdr)) {
875 pil_err(desc, "Not big enough to be an elf header\n");
876 ret = -EIO;
877 goto release_fw;
878 }
879
880 mdt = (const struct pil_mdt *)fw->data;
881 ehdr = &mdt->hdr;
882
883 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
884 pil_err(desc, "Not an elf header\n");
885 ret = -EIO;
886 goto release_fw;
887 }
888
889 if (ehdr->e_phnum == 0) {
890 pil_err(desc, "No loadable segments\n");
891 ret = -EIO;
892 goto release_fw;
893 }
894 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
895 sizeof(struct elf32_hdr) > fw->size) {
896 pil_err(desc, "Program headers not within mdt\n");
897 ret = -EIO;
898 goto release_fw;
899 }
900
901 ret = pil_init_mmap(desc, mdt);
902 if (ret)
903 goto release_fw;
904
905 desc->priv->unvoted_flag = 0;
906 ret = pil_proxy_vote(desc);
907 if (ret) {
908 pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
909 goto release_fw;
910 }
911
Gaurav Kohli2da45012017-05-08 15:21:43 +0530912 trace_pil_event("before_init_image", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700913 if (desc->ops->init_image)
914 ret = desc->ops->init_image(desc, fw->data, fw->size);
915 if (ret) {
916 pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
917 goto err_boot;
918 }
919
Gaurav Kohli2da45012017-05-08 15:21:43 +0530920 trace_pil_event("before_mem_setup", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700921 if (desc->ops->mem_setup)
922 ret = desc->ops->mem_setup(desc, priv->region_start,
923 priv->region_end - priv->region_start);
924 if (ret) {
925 pil_err(desc, "Memory setup error(rc:%d)\n", ret);
926 goto err_deinit_image;
927 }
928
929 if (desc->subsys_vmid > 0) {
930 /**
931 * In case of modem ssr, we need to assign memory back to linux.
932 * This is not true after cold boot since linux already owns it.
933 * Also for secure boot devices, modem memory has to be released
934 * after MBA is booted
935 */
Gaurav Kohli2da45012017-05-08 15:21:43 +0530936 trace_pil_event("before_assign_mem", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700937 if (desc->modem_ssr) {
938 ret = pil_assign_mem_to_linux(desc, priv->region_start,
939 (priv->region_end - priv->region_start));
940 if (ret)
941 pil_err(desc, "Failed to assign to linux, ret- %d\n",
942 ret);
943 }
944 ret = pil_assign_mem_to_subsys_and_linux(desc,
945 priv->region_start,
946 (priv->region_end - priv->region_start));
947 if (ret) {
948 pil_err(desc, "Failed to assign memory, ret - %d\n",
949 ret);
950 goto err_deinit_image;
951 }
952 hyp_assign = true;
953 }
954
Gaurav Kohli2da45012017-05-08 15:21:43 +0530955 trace_pil_event("before_load_seg", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700956 list_for_each_entry(seg, &desc->priv->segs, list) {
957 ret = pil_load_seg(desc, seg);
958 if (ret)
959 goto err_deinit_image;
960 }
961
962 if (desc->subsys_vmid > 0) {
Gaurav Kohli2da45012017-05-08 15:21:43 +0530963 trace_pil_event("before_reclaim_mem", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700964 ret = pil_reclaim_mem(desc, priv->region_start,
965 (priv->region_end - priv->region_start),
966 desc->subsys_vmid);
967 if (ret) {
968 pil_err(desc, "Failed to assign %s memory, ret - %d\n",
969 desc->name, ret);
970 goto err_deinit_image;
971 }
972 hyp_assign = false;
973 }
974
Gaurav Kohli2da45012017-05-08 15:21:43 +0530975 trace_pil_event("before_auth_reset", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700976 ret = desc->ops->auth_and_reset(desc);
977 if (ret) {
978 pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
979 goto err_auth_and_reset;
980 }
Gaurav Kohli2da45012017-05-08 15:21:43 +0530981 trace_pil_event("reset_done", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700982 pil_info(desc, "Brought out of reset\n");
983 desc->modem_ssr = false;
984err_auth_and_reset:
985 if (ret && desc->subsys_vmid > 0) {
986 pil_assign_mem_to_linux(desc, priv->region_start,
987 (priv->region_end - priv->region_start));
988 mem_protect = true;
989 }
990err_deinit_image:
991 if (ret && desc->ops->deinit_image)
992 desc->ops->deinit_image(desc);
993err_boot:
994 if (ret && desc->proxy_unvote_irq)
995 disable_irq(desc->proxy_unvote_irq);
996 pil_proxy_unvote(desc, ret);
997release_fw:
998 release_firmware(fw);
999out:
1000 up_read(&pil_pm_rwsem);
1001 if (ret) {
1002 if (priv->region) {
1003 if (desc->subsys_vmid > 0 && !mem_protect &&
1004 hyp_assign) {
1005 pil_reclaim_mem(desc, priv->region_start,
1006 (priv->region_end -
1007 priv->region_start),
1008 VMID_HLOS);
1009 }
Avaneesh Kumar Dwivedib2c72692017-04-24 21:51:42 +05301010 if (desc->clear_fw_region && priv->region_start)
1011 pil_clear_segment(desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001012 dma_free_attrs(desc->dev, priv->region_size,
1013 priv->region, priv->region_start,
1014 desc->attrs);
1015 priv->region = NULL;
1016 }
Kyle Yane45fa022016-08-29 11:40:26 -07001017 pil_release_mmap(desc);
1018 }
1019 return ret;
1020}
1021EXPORT_SYMBOL(pil_boot);
1022
1023/**
1024 * pil_shutdown() - Shutdown a peripheral
1025 * @desc: descriptor from pil_desc_init()
1026 */
1027void pil_shutdown(struct pil_desc *desc)
1028{
1029 struct pil_priv *priv = desc->priv;
1030
1031 if (desc->ops->shutdown) {
1032 if (desc->ops->shutdown(desc))
1033 desc->shutdown_fail = true;
1034 else
1035 desc->shutdown_fail = false;
1036 }
1037
1038 if (desc->proxy_unvote_irq) {
1039 disable_irq(desc->proxy_unvote_irq);
1040 if (!desc->priv->unvoted_flag)
1041 pil_proxy_unvote(desc, 1);
1042 } else if (!proxy_timeout_ms)
1043 pil_proxy_unvote(desc, 1);
1044 else
1045 flush_delayed_work(&priv->proxy);
1046 desc->modem_ssr = true;
1047}
1048EXPORT_SYMBOL(pil_shutdown);
1049
1050/**
1051 * pil_free_memory() - Free memory resources associated with a peripheral
1052 * @desc: descriptor from pil_desc_init()
1053 */
1054void pil_free_memory(struct pil_desc *desc)
1055{
1056 struct pil_priv *priv = desc->priv;
1057
1058 if (priv->region) {
1059 if (desc->subsys_vmid > 0)
1060 pil_assign_mem_to_linux(desc, priv->region_start,
1061 (priv->region_end - priv->region_start));
1062 dma_free_attrs(desc->dev, priv->region_size,
1063 priv->region, priv->region_start, desc->attrs);
1064 priv->region = NULL;
1065 }
1066}
1067EXPORT_SYMBOL(pil_free_memory);
1068
1069static DEFINE_IDA(pil_ida);
1070
1071bool is_timeout_disabled(void)
1072{
1073 return disable_timeouts;
1074}
1075/**
1076 * pil_desc_init() - Initialize a pil descriptor
1077 * @desc: descriptor to initialize
1078 *
1079 * Initialize a pil descriptor for use by other pil functions. This function
1080 * must be called before calling pil_boot() or pil_shutdown().
1081 *
1082 * Returns 0 for success and -ERROR on failure.
1083 */
1084int pil_desc_init(struct pil_desc *desc)
1085{
1086 struct pil_priv *priv;
Kyle Yane45fa022016-08-29 11:40:26 -07001087 void __iomem *addr;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301088 int ret, ss_imem_offset_mdump;
Kyle Yane45fa022016-08-29 11:40:26 -07001089 char buf[sizeof(priv->info->name)];
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301090 struct device_node *ofnode = desc->dev->of_node;
Kyle Yane45fa022016-08-29 11:40:26 -07001091
1092 if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
1093 "Invalid proxy voting. Ignoring\n"))
1094 ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
1095
1096 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1097 if (!priv)
1098 return -ENOMEM;
1099 desc->priv = priv;
1100 priv->desc = desc;
1101
1102 priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
1103 if (priv->id < 0)
1104 goto err;
1105
1106 if (pil_info_base) {
1107 addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
1108 priv->info = (struct pil_image_info __iomem *)addr;
1109
1110 strlcpy(buf, desc->name, sizeof(buf));
1111 __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
1112 }
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301113 if (of_property_read_u32(ofnode, "qcom,minidump-id",
1114 &priv->minidump_id))
1115 pr_debug("minidump-id not found for %s\n", desc->name);
1116 else {
1117 ss_imem_offset_mdump =
1118 sizeof(struct md_ssr_ss_info) * priv->minidump_id;
1119 if (pil_minidump_base) {
1120 /* Add 0x4 to get start of struct md_ssr_ss_info base
1121 * from struct md_ssr_toc for any subsystem,
1122 * struct md_ssr_ss_info is actually the pointer
1123 * of ToC in smem for any subsystem.
1124 */
1125 addr = pil_minidump_base + ss_imem_offset_mdump + 0x4;
1126 priv->minidump = (struct md_ssr_ss_info __iomem *)addr;
1127 }
1128 }
Kyle Yane45fa022016-08-29 11:40:26 -07001129
1130 ret = pil_parse_devicetree(desc);
1131 if (ret)
1132 goto err_parse_dt;
1133
1134 /* Ignore users who don't make any sense */
1135 WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
1136 && !desc->proxy_timeout,
1137 "Invalid proxy unvote callback or a proxy timeout of 0 was specified or no proxy unvote IRQ was specified.\n");
1138
1139 if (desc->proxy_unvote_irq) {
1140 ret = request_threaded_irq(desc->proxy_unvote_irq,
1141 NULL,
1142 proxy_unvote_intr_handler,
1143 IRQF_ONESHOT | IRQF_TRIGGER_RISING,
1144 desc->name, desc);
1145 if (ret < 0) {
1146 dev_err(desc->dev,
1147 "Unable to request proxy unvote IRQ: %d\n",
1148 ret);
1149 goto err;
1150 }
1151 disable_irq(desc->proxy_unvote_irq);
1152 }
1153
1154 snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
1155 wakeup_source_init(&priv->ws, priv->wname);
1156 INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
1157 INIT_LIST_HEAD(&priv->segs);
1158
1159 /* Make sure mapping functions are set. */
1160 if (!desc->map_fw_mem)
1161 desc->map_fw_mem = map_fw_mem;
1162
1163 if (!desc->unmap_fw_mem)
1164 desc->unmap_fw_mem = unmap_fw_mem;
1165
1166 return 0;
1167err_parse_dt:
1168 ida_simple_remove(&pil_ida, priv->id);
1169err:
1170 kfree(priv);
1171 return ret;
1172}
1173EXPORT_SYMBOL(pil_desc_init);
1174
1175/**
1176 * pil_desc_release() - Release a pil descriptor
1177 * @desc: descriptor to free
1178 */
1179void pil_desc_release(struct pil_desc *desc)
1180{
1181 struct pil_priv *priv = desc->priv;
1182
1183 if (priv) {
1184 ida_simple_remove(&pil_ida, priv->id);
1185 flush_delayed_work(&priv->proxy);
1186 wakeup_source_trash(&priv->ws);
1187 }
1188 desc->priv = NULL;
1189 kfree(priv);
1190}
1191EXPORT_SYMBOL(pil_desc_release);
1192
1193static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
1194{
1195 switch (event) {
1196 case PM_SUSPEND_PREPARE:
1197 down_write(&pil_pm_rwsem);
1198 break;
1199 case PM_POST_SUSPEND:
1200 up_write(&pil_pm_rwsem);
1201 break;
1202 }
1203 return NOTIFY_DONE;
1204}
1205
1206static struct notifier_block pil_pm_notifier = {
1207 .notifier_call = pil_pm_notify,
1208};
1209
1210static int __init msm_pil_init(void)
1211{
1212 struct device_node *np;
1213 struct resource res;
1214 int i;
1215
1216 np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
1217 if (!np) {
1218 pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
1219 goto out;
1220 }
1221 if (of_address_to_resource(np, 0, &res)) {
1222 pr_warn("pil: address to resource on imem region failed\n");
1223 goto out;
1224 }
1225 pil_info_base = ioremap(res.start, resource_size(&res));
1226 if (!pil_info_base) {
1227 pr_warn("pil: could not map imem region\n");
1228 goto out;
1229 }
1230 if (__raw_readl(pil_info_base) == 0x53444247) {
1231 pr_info("pil: pil-imem set to disable pil timeouts\n");
1232 disable_timeouts = true;
1233 }
1234 for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
1235 writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
1236
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301237 np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-minidump");
1238 if (!np) {
1239 pr_warn("pil: failed to find qcom,msm-imem-minidump node\n");
1240 goto out;
1241 } else {
1242 pil_minidump_base = of_iomap(np, 0);
1243 if (!pil_minidump_base) {
1244 pr_err("unable to map pil minidump imem offset\n");
1245 goto out;
1246 }
1247 }
1248 for (i = 0; i < sizeof(struct md_ssr_toc)/sizeof(u32); i++)
1249 writel_relaxed(0, pil_minidump_base + (i * sizeof(u32)));
1250 writel_relaxed(1, pil_minidump_base);
Kyle Yane45fa022016-08-29 11:40:26 -07001251out:
1252 return register_pm_notifier(&pil_pm_notifier);
1253}
1254device_initcall(msm_pil_init);
1255
1256static void __exit msm_pil_exit(void)
1257{
1258 unregister_pm_notifier(&pil_pm_notifier);
1259 if (pil_info_base)
1260 iounmap(pil_info_base);
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301261 if (pil_minidump_base)
1262 iounmap(pil_minidump_base);
Kyle Yane45fa022016-08-29 11:40:26 -07001263}
1264module_exit(msm_pil_exit);
1265
1266MODULE_LICENSE("GPL v2");
1267MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");