blob: 91c94415c78c4cf11b83c9dcc553f79fa77f0082 [file] [log] [blame]
Kyle Yan88156d12017-01-05 15:12:45 -08001/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/firmware.h>
16#include <linux/io.h>
17#include <linux/elf.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/slab.h>
21#include <linux/suspend.h>
22#include <linux/rwsem.h>
23#include <linux/sysfs.h>
24#include <linux/workqueue.h>
25#include <linux/jiffies.h>
26#include <linux/err.h>
27#include <linux/list.h>
28#include <linux/list_sort.h>
29#include <linux/idr.h>
30#include <linux/interrupt.h>
31#include <linux/of_gpio.h>
32#include <linux/of_address.h>
33#include <linux/io.h>
34#include <linux/dma-mapping.h>
35#include <soc/qcom/ramdump.h>
36#include <soc/qcom/subsystem_restart.h>
37#include <soc/qcom/secure_buffer.h>
38
39#include <linux/uaccess.h>
40#include <asm/setup.h>
Gaurav Kohli2da45012017-05-08 15:21:43 +053041#define CREATE_TRACE_POINTS
42#include <trace/events/trace_msm_pil_event.h>
Kyle Yane45fa022016-08-29 11:40:26 -070043
44#include "peripheral-loader.h"
45
46#define pil_err(desc, fmt, ...) \
47 dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
48#define pil_info(desc, fmt, ...) \
49 dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
50
51#if defined(CONFIG_ARM)
52#define pil_memset_io(d, c, count) memset(d, c, count)
53#else
54#define pil_memset_io(d, c, count) memset_io(d, c, count)
55#endif
56
57#define PIL_NUM_DESC 10
58static void __iomem *pil_info_base;
59
60/**
61 * proxy_timeout - Override for proxy vote timeouts
62 * -1: Use driver-specified timeout
63 * 0: Hold proxy votes until shutdown
64 * >0: Specify a custom timeout in ms
65 */
66static int proxy_timeout_ms = -1;
67module_param(proxy_timeout_ms, int, 0644);
68
69static bool disable_timeouts;
70/**
71 * struct pil_mdt - Representation of <name>.mdt file in memory
72 * @hdr: ELF32 header
73 * @phdr: ELF32 program headers
74 */
75struct pil_mdt {
76 struct elf32_hdr hdr;
77 struct elf32_phdr phdr[];
78};
79
80/**
81 * struct pil_seg - memory map representing one segment
82 * @next: points to next seg mentor NULL if last segment
83 * @paddr: physical start address of segment
84 * @sz: size of segment
85 * @filesz: size of segment on disk
86 * @num: segment number
87 * @relocated: true if segment is relocated, false otherwise
88 *
89 * Loosely based on an elf program header. Contains all necessary information
90 * to load and initialize a segment of the image in memory.
91 */
92struct pil_seg {
93 phys_addr_t paddr;
94 unsigned long sz;
95 unsigned long filesz;
96 int num;
97 struct list_head list;
98 bool relocated;
99};
100
101/**
102 * struct pil_priv - Private state for a pil_desc
103 * @proxy: work item used to run the proxy unvoting routine
104 * @ws: wakeup source to prevent suspend during pil_boot
105 * @wname: name of @ws
106 * @desc: pointer to pil_desc this is private data for
107 * @seg: list of segments sorted by physical address
108 * @entry_addr: physical address where processor starts booting at
109 * @base_addr: smallest start address among all segments that are relocatable
110 * @region_start: address where relocatable region starts or lowest address
111 * for non-relocatable images
112 * @region_end: address where relocatable region ends or highest address for
113 * non-relocatable images
114 * @region: region allocated for relocatable images
115 * @unvoted_flag: flag to keep track if we have unvoted or not.
116 *
117 * This struct contains data for a pil_desc that should not be exposed outside
118 * of this file. This structure points to the descriptor and the descriptor
119 * points to this structure so that PIL drivers can't access the private
120 * data of a descriptor but this file can access both.
121 */
122struct pil_priv {
123 struct delayed_work proxy;
124 struct wakeup_source ws;
125 char wname[32];
126 struct pil_desc *desc;
127 struct list_head segs;
128 phys_addr_t entry_addr;
129 phys_addr_t base_addr;
130 phys_addr_t region_start;
131 phys_addr_t region_end;
132 void *region;
133 struct pil_image_info __iomem *info;
134 int id;
135 int unvoted_flag;
136 size_t region_size;
137};
138
139/**
140 * pil_do_ramdump() - Ramdump an image
141 * @desc: descriptor from pil_desc_init()
142 * @ramdump_dev: ramdump device returned from create_ramdump_device()
143 *
144 * Calls the ramdump API with a list of segments generated from the addresses
145 * that the descriptor corresponds to.
146 */
147int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
148{
149 struct pil_priv *priv = desc->priv;
150 struct pil_seg *seg;
151 int count = 0, ret;
152 struct ramdump_segment *ramdump_segs, *s;
153
154 list_for_each_entry(seg, &priv->segs, list)
155 count++;
156
157 ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
158 if (!ramdump_segs)
159 return -ENOMEM;
160
161 if (desc->subsys_vmid > 0)
162 ret = pil_assign_mem_to_linux(desc, priv->region_start,
163 (priv->region_end - priv->region_start));
164
165 s = ramdump_segs;
166 list_for_each_entry(seg, &priv->segs, list) {
167 s->address = seg->paddr;
168 s->size = seg->sz;
169 s++;
170 }
171
172 ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
173 kfree(ramdump_segs);
174
Puja Gupta41ff9242017-02-02 13:41:04 -0800175 if (ret)
176 pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
177 __func__, desc->name, ret);
178
179 if (desc->subsys_vmid > 0)
Kyle Yane45fa022016-08-29 11:40:26 -0700180 ret = pil_assign_mem_to_subsys(desc, priv->region_start,
181 (priv->region_end - priv->region_start));
182
183 return ret;
184}
185EXPORT_SYMBOL(pil_do_ramdump);
186
187int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
188 size_t size)
189{
190 int ret;
191 int srcVM[1] = {VMID_HLOS};
192 int destVM[1] = {desc->subsys_vmid};
193 int destVMperm[1] = {PERM_READ | PERM_WRITE};
194
195 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
196 if (ret)
197 pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
198 __func__, &addr, size, desc->subsys_vmid, ret);
199 return ret;
200}
201EXPORT_SYMBOL(pil_assign_mem_to_subsys);
202
203int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
204 size_t size)
205{
206 int ret;
207 int srcVM[1] = {desc->subsys_vmid};
208 int destVM[1] = {VMID_HLOS};
209 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
210
211 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
212 if (ret)
213 panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
214 __func__, &addr, size, desc->subsys_vmid, ret);
215
216 return ret;
217}
218EXPORT_SYMBOL(pil_assign_mem_to_linux);
219
220int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
221 phys_addr_t addr, size_t size)
222{
223 int ret;
224 int srcVM[1] = {VMID_HLOS};
225 int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
226 int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
227
228 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
229 if (ret)
230 pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
231 __func__, &addr, size, desc->subsys_vmid, ret);
232
233 return ret;
234}
235EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
236
237int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
238 int VMid)
239{
240 int ret;
241 int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
242 int destVM[1] = {VMid};
243 int destVMperm[1] = {PERM_READ | PERM_WRITE};
244
245 if (VMid == VMID_HLOS)
246 destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
247
248 ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
249 if (ret)
250 panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
251 __func__, &addr, size, desc->subsys_vmid);
252
253 return ret;
254}
255EXPORT_SYMBOL(pil_reclaim_mem);
256
257/**
258 * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
259 * @desc: descriptor from pil_desc_init()
260 *
261 * Returns the physical address where the image boots at or 0 if unknown.
262 */
263phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
264{
265 return desc->priv ? desc->priv->entry_addr : 0;
266}
267EXPORT_SYMBOL(pil_get_entry_addr);
268
269static void __pil_proxy_unvote(struct pil_priv *priv)
270{
271 struct pil_desc *desc = priv->desc;
272
273 desc->ops->proxy_unvote(desc);
274 notify_proxy_unvote(desc->dev);
275 __pm_relax(&priv->ws);
276 module_put(desc->owner);
277
278}
279
280static void pil_proxy_unvote_work(struct work_struct *work)
281{
282 struct delayed_work *delayed = to_delayed_work(work);
283 struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
284
285 __pil_proxy_unvote(priv);
286}
287
288static int pil_proxy_vote(struct pil_desc *desc)
289{
290 int ret = 0;
291 struct pil_priv *priv = desc->priv;
292
293 if (desc->ops->proxy_vote) {
294 __pm_stay_awake(&priv->ws);
295 ret = desc->ops->proxy_vote(desc);
296 if (ret)
297 __pm_relax(&priv->ws);
298 }
299
300 if (desc->proxy_unvote_irq)
301 enable_irq(desc->proxy_unvote_irq);
302 notify_proxy_vote(desc->dev);
303
304 return ret;
305}
306
307static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
308{
309 struct pil_priv *priv = desc->priv;
310 unsigned long timeout;
311
312 if (proxy_timeout_ms == 0 && !immediate)
313 return;
314 else if (proxy_timeout_ms > 0)
315 timeout = proxy_timeout_ms;
316 else
317 timeout = desc->proxy_timeout;
318
319 if (desc->ops->proxy_unvote) {
320 if (WARN_ON(!try_module_get(desc->owner)))
321 return;
322
323 if (immediate)
324 timeout = 0;
325
326 if (!desc->proxy_unvote_irq || immediate)
327 schedule_delayed_work(&priv->proxy,
328 msecs_to_jiffies(timeout));
329 }
330}
331
332static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
333{
334 struct pil_desc *desc = dev_id;
335 struct pil_priv *priv = desc->priv;
336
337 pil_info(desc, "Power/Clock ready interrupt received\n");
338 if (!desc->priv->unvoted_flag) {
339 desc->priv->unvoted_flag = 1;
340 __pil_proxy_unvote(priv);
341 }
342
343 return IRQ_HANDLED;
344}
345
346static bool segment_is_relocatable(const struct elf32_phdr *p)
347{
348 return !!(p->p_flags & BIT(27));
349}
350
351static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
352{
353 return addr - priv->base_addr + priv->region_start;
354}
355
356static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
357 const struct elf32_phdr *phdr, int num)
358{
359 bool reloc = segment_is_relocatable(phdr);
360 const struct pil_priv *priv = desc->priv;
361 struct pil_seg *seg;
362
363 if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
364 pil_err(desc, "Segment not relocatable,kernel memory would be overwritten[%#08lx, %#08lx)\n",
365 (unsigned long)phdr->p_paddr,
366 (unsigned long)(phdr->p_paddr + phdr->p_memsz));
367 return ERR_PTR(-EPERM);
368 }
369
370 if (phdr->p_filesz > phdr->p_memsz) {
371 pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
372 num, phdr->p_filesz, phdr->p_memsz);
373 return ERR_PTR(-EINVAL);
374 }
375
376 seg = kmalloc(sizeof(*seg), GFP_KERNEL);
377 if (!seg)
378 return ERR_PTR(-ENOMEM);
379 seg->num = num;
380 seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
381 seg->filesz = phdr->p_filesz;
382 seg->sz = phdr->p_memsz;
383 seg->relocated = reloc;
384 INIT_LIST_HEAD(&seg->list);
385
386 return seg;
387}
388
389#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
390
391static int segment_is_loadable(const struct elf32_phdr *p)
392{
393 return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
394 p->p_memsz;
395}
396
397static void pil_dump_segs(const struct pil_priv *priv)
398{
399 struct pil_seg *seg;
400 phys_addr_t seg_h_paddr;
401
402 list_for_each_entry(seg, &priv->segs, list) {
403 seg_h_paddr = seg->paddr + seg->sz;
404 pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
405 &seg->paddr, &seg_h_paddr);
406 }
407}
408
409/*
410 * Ensure the entry address lies within the image limits and if the image is
411 * relocatable ensure it lies within a relocatable segment.
412 */
413static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
414{
415 struct pil_seg *seg;
416 phys_addr_t entry = mdt->hdr.e_entry;
417 bool image_relocated = priv->region;
418
419 if (image_relocated)
420 entry = pil_reloc(priv, entry);
421 priv->entry_addr = entry;
422
423 if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
424 return 0;
425
426 list_for_each_entry(seg, &priv->segs, list) {
427 if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
428 if (!image_relocated)
429 return 0;
430 else if (seg->relocated)
431 return 0;
432 }
433 }
434 pil_err(priv->desc, "entry address %pa not within range\n", &entry);
435 pil_dump_segs(priv);
436 return -EADDRNOTAVAIL;
437}
438
439static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
440 phys_addr_t max_addr, size_t align)
441{
442 void *region;
443 size_t size = max_addr - min_addr;
444 size_t aligned_size;
445
446 /* Don't reallocate due to fragmentation concerns, just sanity check */
447 if (priv->region) {
448 if (WARN(priv->region_end - priv->region_start < size,
449 "Can't reuse PIL memory, too small\n"))
450 return -ENOMEM;
451 return 0;
452 }
453
454 if (align > SZ_4M)
455 aligned_size = ALIGN(size, SZ_4M);
456 else
457 aligned_size = ALIGN(size, SZ_1M);
458
459 priv->desc->attrs = 0;
460 priv->desc->attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
461
462 region = dma_alloc_attrs(priv->desc->dev, aligned_size,
463 &priv->region_start, GFP_KERNEL,
464 priv->desc->attrs);
465
466 if (region == NULL) {
467 pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
468 size);
Gaurav Kohli38b9bad2017-03-25 17:36:30 +0530469 priv->region_start = 0;
470 priv->region_end = 0;
Kyle Yane45fa022016-08-29 11:40:26 -0700471 return -ENOMEM;
472 }
473
474 priv->region = region;
475 priv->region_end = priv->region_start + size;
476 priv->base_addr = min_addr;
477 priv->region_size = aligned_size;
478
479 return 0;
480}
481
482static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
483{
484 const struct elf32_phdr *phdr;
485 phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
486 size_t align = 0;
487 int i, ret = 0;
488 bool relocatable = false;
489
490 min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
491 max_addr_n = max_addr_r = 0;
492
493 /* Find the image limits */
494 for (i = 0; i < mdt->hdr.e_phnum; i++) {
495 phdr = &mdt->phdr[i];
496 if (!segment_is_loadable(phdr))
497 continue;
498
499 start = phdr->p_paddr;
500 end = start + phdr->p_memsz;
501
502 if (segment_is_relocatable(phdr)) {
503 min_addr_r = min(min_addr_r, start);
504 max_addr_r = max(max_addr_r, end);
505 /*
506 * Lowest relocatable segment dictates alignment of
507 * relocatable region
508 */
509 if (min_addr_r == start)
510 align = phdr->p_align;
511 relocatable = true;
512 } else {
513 min_addr_n = min(min_addr_n, start);
514 max_addr_n = max(max_addr_n, end);
515 }
516
517 }
518
519 /*
520 * Align the max address to the next 4K boundary to satisfy iommus and
521 * XPUs that operate on 4K chunks.
522 */
523 max_addr_n = ALIGN(max_addr_n, SZ_4K);
524 max_addr_r = ALIGN(max_addr_r, SZ_4K);
525
526 if (relocatable) {
527 ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
528 } else {
529 priv->region_start = min_addr_n;
530 priv->region_end = max_addr_n;
531 priv->base_addr = min_addr_n;
532 }
533
534 if (priv->info) {
535 __iowrite32_copy(&priv->info->start, &priv->region_start,
536 sizeof(priv->region_start) / 4);
537 writel_relaxed(priv->region_end - priv->region_start,
538 &priv->info->size);
539 }
540
541 return ret;
542}
543
544static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
545{
546 int ret = 0;
547 struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
548 struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
549
550 if (seg_a->paddr < seg_b->paddr)
551 ret = -1;
552 else if (seg_a->paddr > seg_b->paddr)
553 ret = 1;
554
555 return ret;
556}
557
558static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
559{
560 struct pil_priv *priv = desc->priv;
561 const struct elf32_phdr *phdr;
562 struct pil_seg *seg;
563 int i, ret;
564
565 ret = pil_setup_region(priv, mdt);
566 if (ret)
567 return ret;
568
569
570 pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
571 &priv->region_end);
572
573 for (i = 0; i < mdt->hdr.e_phnum; i++) {
574 phdr = &mdt->phdr[i];
575 if (!segment_is_loadable(phdr))
576 continue;
577
578 seg = pil_init_seg(desc, phdr, i);
579 if (IS_ERR(seg))
580 return PTR_ERR(seg);
581
582 list_add_tail(&seg->list, &priv->segs);
583 }
584 list_sort(NULL, &priv->segs, pil_cmp_seg);
585
586 return pil_init_entry_addr(priv, mdt);
587}
588
Puja Gupta7c187e82017-02-06 14:33:19 -0800589struct pil_map_fw_info {
590 void *region;
591 unsigned long attrs;
592 phys_addr_t base_addr;
593 struct device *dev;
594};
595
Kyle Yane45fa022016-08-29 11:40:26 -0700596static void pil_release_mmap(struct pil_desc *desc)
597{
598 struct pil_priv *priv = desc->priv;
599 struct pil_seg *p, *tmp;
600 u64 zero = 0ULL;
Gaurav Kohliebb42832017-02-21 12:08:50 +0530601
602 if (priv->info) {
603 __iowrite32_copy(&priv->info->start, &zero,
604 sizeof(zero) / 4);
605 writel_relaxed(0, &priv->info->size);
606 }
607
608 list_for_each_entry_safe(p, tmp, &priv->segs, list) {
609 list_del(&p->list);
610 kfree(p);
611 }
612}
613
614static void pil_clear_segment(struct pil_desc *desc)
615{
616 struct pil_priv *priv = desc->priv;
Puja Gupta7c187e82017-02-06 14:33:19 -0800617 u8 __iomem *buf;
618
619 struct pil_map_fw_info map_fw_info = {
620 .attrs = desc->attrs,
621 .region = priv->region,
622 .base_addr = priv->region_start,
623 .dev = desc->dev,
624 };
625
626 void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
627
628 /* Clear memory so that unauthorized ELF code is not left behind */
629 buf = desc->map_fw_mem(priv->region_start, (priv->region_end -
630 priv->region_start), map_data);
631 pil_memset_io(buf, 0, (priv->region_end - priv->region_start));
632 desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
633 map_data);
Kyle Yane45fa022016-08-29 11:40:26 -0700634
Kyle Yane45fa022016-08-29 11:40:26 -0700635}
636
637#define IOMAP_SIZE SZ_1M
638
Kyle Yane45fa022016-08-29 11:40:26 -0700639static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
640{
641 struct pil_map_fw_info *info = data;
642
643 return dma_remap(info->dev, info->region, paddr, size,
644 info->attrs);
645}
646
647static void unmap_fw_mem(void *vaddr, size_t size, void *data)
648{
649 struct pil_map_fw_info *info = data;
650
651 dma_unremap(info->dev, vaddr, size);
652}
653
654static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
655{
656 int ret = 0, count;
657 phys_addr_t paddr;
658 char fw_name[30];
659 int num = seg->num;
660 const struct firmware *fw = NULL;
Kyle Yan88156d12017-01-05 15:12:45 -0800661 void __iomem *firmware_buf;
Kyle Yane45fa022016-08-29 11:40:26 -0700662 struct pil_map_fw_info map_fw_info = {
663 .attrs = desc->attrs,
664 .region = desc->priv->region,
665 .base_addr = desc->priv->region_start,
666 .dev = desc->dev,
667 };
668 void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
669
670 if (seg->filesz) {
671 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
672 desc->fw_name, num);
Kyle Yan88156d12017-01-05 15:12:45 -0800673 firmware_buf = desc->map_fw_mem(seg->paddr, seg->filesz,
674 map_data);
675 if (!firmware_buf) {
676 pil_err(desc, "Failed to map memory for firmware buffer\n");
677 return -ENOMEM;
678 }
679
Kyle Yane45fa022016-08-29 11:40:26 -0700680 ret = request_firmware_into_buf(&fw, fw_name, desc->dev,
Kyle Yan88156d12017-01-05 15:12:45 -0800681 firmware_buf, seg->filesz);
682 desc->unmap_fw_mem(firmware_buf, seg->filesz, map_data);
683
684 if (ret) {
Kyle Yane45fa022016-08-29 11:40:26 -0700685 pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
686 fw_name, ret);
687 return ret;
688 }
689
Kyle Yan88156d12017-01-05 15:12:45 -0800690 if (fw->size != seg->filesz) {
Kyle Yane45fa022016-08-29 11:40:26 -0700691 pil_err(desc, "Blob size %u doesn't match %lu\n",
692 ret, seg->filesz);
693 return -EPERM;
694 }
Kyle Yane45fa022016-08-29 11:40:26 -0700695 }
696
697 /* Zero out trailing memory */
698 paddr = seg->paddr + seg->filesz;
699 count = seg->sz - seg->filesz;
700 while (count > 0) {
701 int size;
702 u8 __iomem *buf;
703
704 size = min_t(size_t, IOMAP_SIZE, count);
705 buf = desc->map_fw_mem(paddr, size, map_data);
706 if (!buf) {
707 pil_err(desc, "Failed to map memory\n");
708 return -ENOMEM;
709 }
710 pil_memset_io(buf, 0, size);
711
712 desc->unmap_fw_mem(buf, size, map_data);
713
714 count -= size;
715 paddr += size;
716 }
717
718 if (desc->ops->verify_blob) {
719 ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
720 if (ret)
721 pil_err(desc, "Blob%u failed verification(rc:%d)\n",
722 num, ret);
723 }
724
725 return ret;
726}
727
728static int pil_parse_devicetree(struct pil_desc *desc)
729{
730 struct device_node *ofnode = desc->dev->of_node;
731 int clk_ready = 0;
732
733 if (!ofnode)
734 return -EINVAL;
735
736 if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
737 &desc->subsys_vmid))
738 pr_debug("Unable to read the addr-protect-id for %s\n",
739 desc->name);
740
741 if (desc->ops->proxy_unvote && of_find_property(ofnode,
742 "qcom,gpio-proxy-unvote",
743 NULL)) {
744 clk_ready = of_get_named_gpio(ofnode,
745 "qcom,gpio-proxy-unvote", 0);
746
747 if (clk_ready < 0) {
748 dev_dbg(desc->dev,
749 "[%s]: Error getting proxy unvoting gpio\n",
750 desc->name);
751 return clk_ready;
752 }
753
754 clk_ready = gpio_to_irq(clk_ready);
755 if (clk_ready < 0) {
756 dev_err(desc->dev,
757 "[%s]: Error getting proxy unvote IRQ\n",
758 desc->name);
759 return clk_ready;
760 }
761 }
762 desc->proxy_unvote_irq = clk_ready;
763 return 0;
764}
765
766/* Synchronize request_firmware() with suspend */
767static DECLARE_RWSEM(pil_pm_rwsem);
768
769/**
770 * pil_boot() - Load a peripheral image into memory and boot it
771 * @desc: descriptor from pil_desc_init()
772 *
773 * Returns 0 on success or -ERROR on failure.
774 */
775int pil_boot(struct pil_desc *desc)
776{
777 int ret;
778 char fw_name[30];
779 const struct pil_mdt *mdt;
780 const struct elf32_hdr *ehdr;
781 struct pil_seg *seg;
782 const struct firmware *fw;
783 struct pil_priv *priv = desc->priv;
784 bool mem_protect = false;
785 bool hyp_assign = false;
786
787 if (desc->shutdown_fail)
788 pil_err(desc, "Subsystem shutdown failed previously!\n");
789
790 /* Reinitialize for new image */
791 pil_release_mmap(desc);
792
793 down_read(&pil_pm_rwsem);
794 snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
795 ret = request_firmware(&fw, fw_name, desc->dev);
796 if (ret) {
797 pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
798 goto out;
799 }
800
801 if (fw->size < sizeof(*ehdr)) {
802 pil_err(desc, "Not big enough to be an elf header\n");
803 ret = -EIO;
804 goto release_fw;
805 }
806
807 mdt = (const struct pil_mdt *)fw->data;
808 ehdr = &mdt->hdr;
809
810 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
811 pil_err(desc, "Not an elf header\n");
812 ret = -EIO;
813 goto release_fw;
814 }
815
816 if (ehdr->e_phnum == 0) {
817 pil_err(desc, "No loadable segments\n");
818 ret = -EIO;
819 goto release_fw;
820 }
821 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
822 sizeof(struct elf32_hdr) > fw->size) {
823 pil_err(desc, "Program headers not within mdt\n");
824 ret = -EIO;
825 goto release_fw;
826 }
827
828 ret = pil_init_mmap(desc, mdt);
829 if (ret)
830 goto release_fw;
831
832 desc->priv->unvoted_flag = 0;
833 ret = pil_proxy_vote(desc);
834 if (ret) {
835 pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
836 goto release_fw;
837 }
838
Gaurav Kohli2da45012017-05-08 15:21:43 +0530839 trace_pil_event("before_init_image", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700840 if (desc->ops->init_image)
841 ret = desc->ops->init_image(desc, fw->data, fw->size);
842 if (ret) {
843 pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
844 goto err_boot;
845 }
846
Gaurav Kohli2da45012017-05-08 15:21:43 +0530847 trace_pil_event("before_mem_setup", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700848 if (desc->ops->mem_setup)
849 ret = desc->ops->mem_setup(desc, priv->region_start,
850 priv->region_end - priv->region_start);
851 if (ret) {
852 pil_err(desc, "Memory setup error(rc:%d)\n", ret);
853 goto err_deinit_image;
854 }
855
856 if (desc->subsys_vmid > 0) {
857 /**
858 * In case of modem ssr, we need to assign memory back to linux.
859 * This is not true after cold boot since linux already owns it.
860 * Also for secure boot devices, modem memory has to be released
861 * after MBA is booted
862 */
Gaurav Kohli2da45012017-05-08 15:21:43 +0530863 trace_pil_event("before_assign_mem", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700864 if (desc->modem_ssr) {
865 ret = pil_assign_mem_to_linux(desc, priv->region_start,
866 (priv->region_end - priv->region_start));
867 if (ret)
868 pil_err(desc, "Failed to assign to linux, ret- %d\n",
869 ret);
870 }
871 ret = pil_assign_mem_to_subsys_and_linux(desc,
872 priv->region_start,
873 (priv->region_end - priv->region_start));
874 if (ret) {
875 pil_err(desc, "Failed to assign memory, ret - %d\n",
876 ret);
877 goto err_deinit_image;
878 }
879 hyp_assign = true;
880 }
881
Gaurav Kohli2da45012017-05-08 15:21:43 +0530882 trace_pil_event("before_load_seg", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700883 list_for_each_entry(seg, &desc->priv->segs, list) {
884 ret = pil_load_seg(desc, seg);
885 if (ret)
886 goto err_deinit_image;
887 }
888
889 if (desc->subsys_vmid > 0) {
Gaurav Kohli2da45012017-05-08 15:21:43 +0530890 trace_pil_event("before_reclaim_mem", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700891 ret = pil_reclaim_mem(desc, priv->region_start,
892 (priv->region_end - priv->region_start),
893 desc->subsys_vmid);
894 if (ret) {
895 pil_err(desc, "Failed to assign %s memory, ret - %d\n",
896 desc->name, ret);
897 goto err_deinit_image;
898 }
899 hyp_assign = false;
900 }
901
Gaurav Kohli2da45012017-05-08 15:21:43 +0530902 trace_pil_event("before_auth_reset", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700903 ret = desc->ops->auth_and_reset(desc);
904 if (ret) {
905 pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
906 goto err_auth_and_reset;
907 }
Gaurav Kohli2da45012017-05-08 15:21:43 +0530908 trace_pil_event("reset_done", desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700909 pil_info(desc, "Brought out of reset\n");
910 desc->modem_ssr = false;
911err_auth_and_reset:
912 if (ret && desc->subsys_vmid > 0) {
913 pil_assign_mem_to_linux(desc, priv->region_start,
914 (priv->region_end - priv->region_start));
915 mem_protect = true;
916 }
917err_deinit_image:
918 if (ret && desc->ops->deinit_image)
919 desc->ops->deinit_image(desc);
920err_boot:
921 if (ret && desc->proxy_unvote_irq)
922 disable_irq(desc->proxy_unvote_irq);
923 pil_proxy_unvote(desc, ret);
924release_fw:
925 release_firmware(fw);
926out:
927 up_read(&pil_pm_rwsem);
928 if (ret) {
929 if (priv->region) {
930 if (desc->subsys_vmid > 0 && !mem_protect &&
931 hyp_assign) {
932 pil_reclaim_mem(desc, priv->region_start,
933 (priv->region_end -
934 priv->region_start),
935 VMID_HLOS);
936 }
Avaneesh Kumar Dwivedib2c72692017-04-24 21:51:42 +0530937 if (desc->clear_fw_region && priv->region_start)
938 pil_clear_segment(desc);
Kyle Yane45fa022016-08-29 11:40:26 -0700939 dma_free_attrs(desc->dev, priv->region_size,
940 priv->region, priv->region_start,
941 desc->attrs);
942 priv->region = NULL;
943 }
Kyle Yane45fa022016-08-29 11:40:26 -0700944 pil_release_mmap(desc);
945 }
946 return ret;
947}
948EXPORT_SYMBOL(pil_boot);
949
950/**
951 * pil_shutdown() - Shutdown a peripheral
952 * @desc: descriptor from pil_desc_init()
953 */
954void pil_shutdown(struct pil_desc *desc)
955{
956 struct pil_priv *priv = desc->priv;
957
958 if (desc->ops->shutdown) {
959 if (desc->ops->shutdown(desc))
960 desc->shutdown_fail = true;
961 else
962 desc->shutdown_fail = false;
963 }
964
965 if (desc->proxy_unvote_irq) {
966 disable_irq(desc->proxy_unvote_irq);
967 if (!desc->priv->unvoted_flag)
968 pil_proxy_unvote(desc, 1);
969 } else if (!proxy_timeout_ms)
970 pil_proxy_unvote(desc, 1);
971 else
972 flush_delayed_work(&priv->proxy);
973 desc->modem_ssr = true;
974}
975EXPORT_SYMBOL(pil_shutdown);
976
977/**
978 * pil_free_memory() - Free memory resources associated with a peripheral
979 * @desc: descriptor from pil_desc_init()
980 */
981void pil_free_memory(struct pil_desc *desc)
982{
983 struct pil_priv *priv = desc->priv;
984
985 if (priv->region) {
986 if (desc->subsys_vmid > 0)
987 pil_assign_mem_to_linux(desc, priv->region_start,
988 (priv->region_end - priv->region_start));
989 dma_free_attrs(desc->dev, priv->region_size,
990 priv->region, priv->region_start, desc->attrs);
991 priv->region = NULL;
992 }
993}
994EXPORT_SYMBOL(pil_free_memory);
995
996static DEFINE_IDA(pil_ida);
997
998bool is_timeout_disabled(void)
999{
1000 return disable_timeouts;
1001}
1002/**
1003 * pil_desc_init() - Initialize a pil descriptor
1004 * @desc: descriptor to initialize
1005 *
1006 * Initialize a pil descriptor for use by other pil functions. This function
1007 * must be called before calling pil_boot() or pil_shutdown().
1008 *
1009 * Returns 0 for success and -ERROR on failure.
1010 */
1011int pil_desc_init(struct pil_desc *desc)
1012{
1013 struct pil_priv *priv;
1014 int ret;
1015 void __iomem *addr;
1016 char buf[sizeof(priv->info->name)];
1017
1018 if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
1019 "Invalid proxy voting. Ignoring\n"))
1020 ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
1021
1022 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1023 if (!priv)
1024 return -ENOMEM;
1025 desc->priv = priv;
1026 priv->desc = desc;
1027
1028 priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
1029 if (priv->id < 0)
1030 goto err;
1031
1032 if (pil_info_base) {
1033 addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
1034 priv->info = (struct pil_image_info __iomem *)addr;
1035
1036 strlcpy(buf, desc->name, sizeof(buf));
1037 __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
1038 }
1039
1040 ret = pil_parse_devicetree(desc);
1041 if (ret)
1042 goto err_parse_dt;
1043
1044 /* Ignore users who don't make any sense */
1045 WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
1046 && !desc->proxy_timeout,
1047 "Invalid proxy unvote callback or a proxy timeout of 0 was specified or no proxy unvote IRQ was specified.\n");
1048
1049 if (desc->proxy_unvote_irq) {
1050 ret = request_threaded_irq(desc->proxy_unvote_irq,
1051 NULL,
1052 proxy_unvote_intr_handler,
1053 IRQF_ONESHOT | IRQF_TRIGGER_RISING,
1054 desc->name, desc);
1055 if (ret < 0) {
1056 dev_err(desc->dev,
1057 "Unable to request proxy unvote IRQ: %d\n",
1058 ret);
1059 goto err;
1060 }
1061 disable_irq(desc->proxy_unvote_irq);
1062 }
1063
1064 snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
1065 wakeup_source_init(&priv->ws, priv->wname);
1066 INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
1067 INIT_LIST_HEAD(&priv->segs);
1068
1069 /* Make sure mapping functions are set. */
1070 if (!desc->map_fw_mem)
1071 desc->map_fw_mem = map_fw_mem;
1072
1073 if (!desc->unmap_fw_mem)
1074 desc->unmap_fw_mem = unmap_fw_mem;
1075
1076 return 0;
1077err_parse_dt:
1078 ida_simple_remove(&pil_ida, priv->id);
1079err:
1080 kfree(priv);
1081 return ret;
1082}
1083EXPORT_SYMBOL(pil_desc_init);
1084
1085/**
1086 * pil_desc_release() - Release a pil descriptor
1087 * @desc: descriptor to free
1088 */
1089void pil_desc_release(struct pil_desc *desc)
1090{
1091 struct pil_priv *priv = desc->priv;
1092
1093 if (priv) {
1094 ida_simple_remove(&pil_ida, priv->id);
1095 flush_delayed_work(&priv->proxy);
1096 wakeup_source_trash(&priv->ws);
1097 }
1098 desc->priv = NULL;
1099 kfree(priv);
1100}
1101EXPORT_SYMBOL(pil_desc_release);
1102
1103static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
1104{
1105 switch (event) {
1106 case PM_SUSPEND_PREPARE:
1107 down_write(&pil_pm_rwsem);
1108 break;
1109 case PM_POST_SUSPEND:
1110 up_write(&pil_pm_rwsem);
1111 break;
1112 }
1113 return NOTIFY_DONE;
1114}
1115
1116static struct notifier_block pil_pm_notifier = {
1117 .notifier_call = pil_pm_notify,
1118};
1119
1120static int __init msm_pil_init(void)
1121{
1122 struct device_node *np;
1123 struct resource res;
1124 int i;
1125
1126 np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
1127 if (!np) {
1128 pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
1129 goto out;
1130 }
1131 if (of_address_to_resource(np, 0, &res)) {
1132 pr_warn("pil: address to resource on imem region failed\n");
1133 goto out;
1134 }
1135 pil_info_base = ioremap(res.start, resource_size(&res));
1136 if (!pil_info_base) {
1137 pr_warn("pil: could not map imem region\n");
1138 goto out;
1139 }
1140 if (__raw_readl(pil_info_base) == 0x53444247) {
1141 pr_info("pil: pil-imem set to disable pil timeouts\n");
1142 disable_timeouts = true;
1143 }
1144 for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
1145 writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
1146
1147out:
1148 return register_pm_notifier(&pil_pm_notifier);
1149}
1150device_initcall(msm_pil_init);
1151
1152static void __exit msm_pil_exit(void)
1153{
1154 unregister_pm_notifier(&pil_pm_notifier);
1155 if (pil_info_base)
1156 iounmap(pil_info_base);
1157}
1158module_exit(msm_pil_exit);
1159
1160MODULE_LICENSE("GPL v2");
1161MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");