blob: 958edaf5d47b64da00ba1dae26aa9f6642e54783 [file] [log] [blame]
Stephen Boyd06ce3962013-01-02 15:03:14 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/string.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/firmware.h>
16#include <linux/io.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/elf.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
Stephen Boyd3f4da322011-08-30 01:03:23 -070020#include <linux/slab.h>
Stephen Boyd80bde032012-03-16 00:14:42 -070021#include <linux/suspend.h>
22#include <linux/rwsem.h>
Stephen Boyd20ad8102011-10-09 21:28:01 -070023#include <linux/sysfs.h>
Stephen Boyd36974ec2012-03-22 01:30:59 -070024#include <linux/workqueue.h>
25#include <linux/jiffies.h>
26#include <linux/wakelock.h>
Stephen Boydedff6cf2012-07-11 19:39:27 -070027#include <linux/err.h>
Stephen Boyd2db158c2012-07-26 21:47:17 -070028#include <linux/msm_ion.h>
Stephen Boydedff6cf2012-07-11 19:39:27 -070029#include <linux/list.h>
30#include <linux/list_sort.h>
Stephen Boydb455f322012-11-27 19:00:01 -080031#include <linux/idr.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#include <asm/uaccess.h>
34#include <asm/setup.h>
Stephen Boydb455f322012-11-27 19:00:01 -080035#include <asm-generic/io-64-nonatomic-lo-hi.h>
36
37#include <mach/msm_iomap.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
39#include "peripheral-loader.h"
Stephen Boyd10667772012-11-28 16:45:35 -080040#include "ramdump.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
Stephen Boyd163f1c32012-06-29 13:20:20 -070042#define pil_err(desc, fmt, ...) \
43 dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
44#define pil_info(desc, fmt, ...) \
45 dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
46
Stephen Boydb455f322012-11-27 19:00:01 -080047#define PIL_IMAGE_INFO_BASE (MSM_IMEM_BASE + 0x94c)
48
Matt Wagantall0aafa352012-05-14 16:40:41 -070049/**
50 * proxy_timeout - Override for proxy vote timeouts
51 * -1: Use driver-specified timeout
52 * 0: Hold proxy votes until shutdown
53 * >0: Specify a custom timeout in ms
54 */
55static int proxy_timeout_ms = -1;
56module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
57
Stephen Boyd163f1c32012-06-29 13:20:20 -070058/**
Stephen Boydedff6cf2012-07-11 19:39:27 -070059 * struct pil_mdt - Representation of <name>.mdt file in memory
60 * @hdr: ELF32 header
61 * @phdr: ELF32 program headers
62 */
63struct pil_mdt {
64 struct elf32_hdr hdr;
65 struct elf32_phdr phdr[];
66};
67
68/**
69 * struct pil_seg - memory map representing one segment
70 * @next: points to next seg mentor NULL if last segment
71 * @paddr: start address of segment
72 * @sz: size of segment
73 * @filesz: size of segment on disk
74 * @num: segment number
Stephen Boyd2db158c2012-07-26 21:47:17 -070075 * @relocated: true if segment is relocated, false otherwise
Stephen Boydedff6cf2012-07-11 19:39:27 -070076 *
77 * Loosely based on an elf program header. Contains all necessary information
78 * to load and initialize a segment of the image in memory.
79 */
80struct pil_seg {
81 phys_addr_t paddr;
82 unsigned long sz;
83 unsigned long filesz;
84 int num;
85 struct list_head list;
Stephen Boyd2db158c2012-07-26 21:47:17 -070086 bool relocated;
Stephen Boydedff6cf2012-07-11 19:39:27 -070087};
88
89/**
Stephen Boydb455f322012-11-27 19:00:01 -080090 * struct pil_image_info - information in IMEM about image and where it is loaded
91 * @name: name of image (may or may not be NULL terminated)
92 * @start: indicates physical address where image starts (little endian)
93 * @size: size of image (little endian)
94 */
95struct pil_image_info {
96 char name[8];
97 __le64 start;
98 __le32 size;
99} __attribute__((__packed__));
100
101/**
Stephen Boyd163f1c32012-06-29 13:20:20 -0700102 * struct pil_priv - Private state for a pil_desc
103 * @proxy: work item used to run the proxy unvoting routine
104 * @wlock: wakelock to prevent suspend during pil_boot
105 * @wname: name of @wlock
106 * @desc: pointer to pil_desc this is private data for
Stephen Boydedff6cf2012-07-11 19:39:27 -0700107 * @seg: list of segments sorted by physical address
Stephen Boyd3030c252012-08-08 17:24:05 -0700108 * @entry_addr: physical address where processor starts booting at
Stephen Boyd2db158c2012-07-26 21:47:17 -0700109 * @base_addr: smallest start address among all segments that are relocatable
Stephen Boyd379e7332012-08-08 18:04:21 -0700110 * @region_start: address where relocatable region starts or lowest address
111 * for non-relocatable images
112 * @region_end: address where relocatable region ends or highest address for
113 * non-relocatable images
Stephen Boyd2db158c2012-07-26 21:47:17 -0700114 * @region: region allocated for relocatable images
Stephen Boyd163f1c32012-06-29 13:20:20 -0700115 *
116 * This struct contains data for a pil_desc that should not be exposed outside
117 * of this file. This structure points to the descriptor and the descriptor
118 * points to this structure so that PIL drivers can't access the private
119 * data of a descriptor but this file can access both.
120 */
121struct pil_priv {
Stephen Boyd36974ec2012-03-22 01:30:59 -0700122 struct delayed_work proxy;
123 struct wake_lock wlock;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700124 char wname[32];
125 struct pil_desc *desc;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700126 struct list_head segs;
Stephen Boyd3030c252012-08-08 17:24:05 -0700127 phys_addr_t entry_addr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700128 phys_addr_t base_addr;
Stephen Boyd379e7332012-08-08 18:04:21 -0700129 phys_addr_t region_start;
130 phys_addr_t region_end;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700131 struct ion_handle *region;
Stephen Boydb455f322012-11-27 19:00:01 -0800132 struct pil_image_info __iomem *info;
133 int id;
Stephen Boyd3f4da322011-08-30 01:03:23 -0700134};
135
Stephen Boyd10667772012-11-28 16:45:35 -0800136/**
137 * pil_do_ramdump() - Ramdump an image
138 * @desc: descriptor from pil_desc_init()
139 * @ramdump_dev: ramdump device returned from create_ramdump_device()
140 *
141 * Calls the ramdump API with a list of segments generated from the addresses
142 * that the descriptor corresponds to.
143 */
144int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
145{
146 struct pil_priv *priv = desc->priv;
147 struct pil_seg *seg;
148 int count = 0, ret;
149 struct ramdump_segment *ramdump_segs, *s;
150
151 list_for_each_entry(seg, &priv->segs, list)
152 count++;
153
154 ramdump_segs = kmalloc_array(count, sizeof(*ramdump_segs), GFP_KERNEL);
155 if (!ramdump_segs)
156 return -ENOMEM;
157
158 s = ramdump_segs;
159 list_for_each_entry(seg, &priv->segs, list) {
160 s->address = seg->paddr;
161 s->size = seg->sz;
162 s++;
163 }
164
165 ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
166 kfree(ramdump_segs);
167
168 return ret;
169}
170EXPORT_SYMBOL(pil_do_ramdump);
171
Stephen Boyd2db158c2012-07-26 21:47:17 -0700172static struct ion_client *ion;
173
Stephen Boyd3030c252012-08-08 17:24:05 -0700174/**
175 * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
176 * @desc: descriptor from pil_desc_init()
177 *
178 * Returns the physical address where the image boots at or 0 if unknown.
179 */
180phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
181{
182 return desc->priv ? desc->priv->entry_addr : 0;
183}
184EXPORT_SYMBOL(pil_get_entry_addr);
185
Stephen Boyd36974ec2012-03-22 01:30:59 -0700186static void pil_proxy_work(struct work_struct *work)
187{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700188 struct delayed_work *delayed = to_delayed_work(work);
189 struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
190 struct pil_desc *desc = priv->desc;
Stephen Boyd36974ec2012-03-22 01:30:59 -0700191
Stephen Boyd163f1c32012-06-29 13:20:20 -0700192 desc->ops->proxy_unvote(desc);
193 wake_unlock(&priv->wlock);
194 module_put(desc->owner);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700195}
196
Stephen Boyd163f1c32012-06-29 13:20:20 -0700197static int pil_proxy_vote(struct pil_desc *desc)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700198{
Stephen Boyd0a563142012-07-02 13:33:31 -0700199 int ret = 0;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700200 struct pil_priv *priv = desc->priv;
Stephen Boyd0a563142012-07-02 13:33:31 -0700201
Stephen Boyd163f1c32012-06-29 13:20:20 -0700202 if (desc->ops->proxy_vote) {
203 wake_lock(&priv->wlock);
204 ret = desc->ops->proxy_vote(desc);
Stephen Boyd0a563142012-07-02 13:33:31 -0700205 if (ret)
Stephen Boyd163f1c32012-06-29 13:20:20 -0700206 wake_unlock(&priv->wlock);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700207 }
Stephen Boyd0a563142012-07-02 13:33:31 -0700208 return ret;
Stephen Boyd36974ec2012-03-22 01:30:59 -0700209}
210
Seemanta Dutta78f43192013-03-04 18:29:43 -0800211static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700212{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700213 struct pil_priv *priv = desc->priv;
Seemanta Dutta78f43192013-03-04 18:29:43 -0800214 unsigned long timeout;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700215
Seemanta Dutta78f43192013-03-04 18:29:43 -0800216 if (proxy_timeout_ms == 0 && !immediate)
217 return;
218 else if (proxy_timeout_ms > 0)
Matt Wagantall0aafa352012-05-14 16:40:41 -0700219 timeout = proxy_timeout_ms;
Seemanta Dutta78f43192013-03-04 18:29:43 -0800220 else
221 timeout = desc->proxy_timeout;
Matt Wagantall0aafa352012-05-14 16:40:41 -0700222
Seemanta Dutta78f43192013-03-04 18:29:43 -0800223 if (desc->ops->proxy_unvote) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700224 if (WARN_ON(!try_module_get(desc->owner)))
225 return;
Seemanta Dutta78f43192013-03-04 18:29:43 -0800226
227 if (immediate)
228 timeout = 0;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700229 schedule_delayed_work(&priv->proxy, msecs_to_jiffies(timeout));
230 }
Stephen Boyd36974ec2012-03-22 01:30:59 -0700231}
232
Stephen Boyd2db158c2012-07-26 21:47:17 -0700233static bool segment_is_relocatable(const struct elf32_phdr *p)
234{
235 return !!(p->p_flags & BIT(27));
236}
237
238static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
239{
240 return addr - priv->base_addr + priv->region_start;
241}
242
Stephen Boydedff6cf2012-07-11 19:39:27 -0700243static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
244 const struct elf32_phdr *phdr, int num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700246 bool reloc = segment_is_relocatable(phdr);
247 const struct pil_priv *priv = desc->priv;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700248 struct pil_seg *seg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249
Stephen Boyd2db158c2012-07-26 21:47:17 -0700250 if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700251 pil_err(desc, "kernel memory would be overwritten [%#08lx, %#08lx)\n",
Stephen Boydf79af532012-05-14 18:57:26 -0700252 (unsigned long)phdr->p_paddr,
253 (unsigned long)(phdr->p_paddr + phdr->p_memsz));
Stephen Boydedff6cf2012-07-11 19:39:27 -0700254 return ERR_PTR(-EPERM);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255 }
256
Stephen Boydedff6cf2012-07-11 19:39:27 -0700257 seg = kmalloc(sizeof(*seg), GFP_KERNEL);
258 if (!seg)
259 return ERR_PTR(-ENOMEM);
260 seg->num = num;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700261 seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700262 seg->filesz = phdr->p_filesz;
263 seg->sz = phdr->p_memsz;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700264 seg->relocated = reloc;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700265 INIT_LIST_HEAD(&seg->list);
266
267 return seg;
268}
269
270#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
271
272static int segment_is_loadable(const struct elf32_phdr *p)
273{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700274 return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
275 p->p_memsz;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700276}
277
Stephen Boyd3030c252012-08-08 17:24:05 -0700278static void pil_dump_segs(const struct pil_priv *priv)
279{
280 struct pil_seg *seg;
281
282 list_for_each_entry(seg, &priv->segs, list) {
283 pil_info(priv->desc, "%d: %#08zx %#08lx\n", seg->num,
284 seg->paddr, seg->paddr + seg->sz);
285 }
286}
287
288/*
Stephen Boyd2db158c2012-07-26 21:47:17 -0700289 * Ensure the entry address lies within the image limits and if the image is
290 * relocatable ensure it lies within a relocatable segment.
Stephen Boyd3030c252012-08-08 17:24:05 -0700291 */
292static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
293{
294 struct pil_seg *seg;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700295 phys_addr_t entry = mdt->hdr.e_entry;
296 bool image_relocated = priv->region;
Stephen Boyd3030c252012-08-08 17:24:05 -0700297
Stephen Boyd2db158c2012-07-26 21:47:17 -0700298 if (image_relocated)
299 entry = pil_reloc(priv, entry);
300 priv->entry_addr = entry;
Stephen Boyd3030c252012-08-08 17:24:05 -0700301
302 if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
303 return 0;
304
305 list_for_each_entry(seg, &priv->segs, list) {
Stephen Boyd2db158c2012-07-26 21:47:17 -0700306 if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
307 if (!image_relocated)
308 return 0;
309 else if (seg->relocated)
310 return 0;
311 }
Stephen Boyd3030c252012-08-08 17:24:05 -0700312 }
Stephen Boyd2db158c2012-07-26 21:47:17 -0700313 pil_err(priv->desc, "entry address %08zx not within range\n", entry);
Stephen Boyd3030c252012-08-08 17:24:05 -0700314 pil_dump_segs(priv);
315 return -EADDRNOTAVAIL;
316}
317
Stephen Boyd2db158c2012-07-26 21:47:17 -0700318static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
319 phys_addr_t max_addr, size_t align)
320{
321 struct ion_handle *region;
322 int ret;
323 unsigned int mask;
Stephen Boyd6385e312012-12-12 11:17:04 -0800324 size_t size = max_addr - min_addr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700325
326 if (!ion) {
327 WARN_ON_ONCE("No ION client, can't support relocation\n");
328 return -ENOMEM;
329 }
330
331 /* Force alignment due to linker scripts not getting it right */
332 if (align > SZ_1M) {
333 mask = ION_HEAP(ION_PIL2_HEAP_ID);
334 align = SZ_4M;
335 } else {
336 mask = ION_HEAP(ION_PIL1_HEAP_ID);
337 align = SZ_1M;
338 }
339
340 region = ion_alloc(ion, size, align, mask, 0);
341 if (IS_ERR(region)) {
342 pil_err(priv->desc, "Failed to allocate relocatable region\n");
343 return PTR_ERR(region);
344 }
345
346 ret = ion_phys(ion, region, (ion_phys_addr_t *)&priv->region_start,
347 &size);
348 if (ret) {
349 ion_free(ion, region);
350 return ret;
351 }
352
353 priv->region = region;
354 priv->region_end = priv->region_start + size;
355 priv->base_addr = min_addr;
356
357 return 0;
358}
359
Stephen Boyd379e7332012-08-08 18:04:21 -0700360static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
361{
362 const struct elf32_phdr *phdr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700363 phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
364 size_t align = 0;
365 int i, ret = 0;
366 bool relocatable = false;
Stephen Boyd379e7332012-08-08 18:04:21 -0700367
Stephen Boyd2db158c2012-07-26 21:47:17 -0700368 min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
369 max_addr_n = max_addr_r = 0;
Stephen Boyd379e7332012-08-08 18:04:21 -0700370
371 /* Find the image limits */
372 for (i = 0; i < mdt->hdr.e_phnum; i++) {
373 phdr = &mdt->phdr[i];
374 if (!segment_is_loadable(phdr))
375 continue;
376
Stephen Boyd2db158c2012-07-26 21:47:17 -0700377 start = phdr->p_paddr;
378 end = start + phdr->p_memsz;
379
380 if (segment_is_relocatable(phdr)) {
381 min_addr_r = min(min_addr_r, start);
382 max_addr_r = max(max_addr_r, end);
383 /*
384 * Lowest relocatable segment dictates alignment of
385 * relocatable region
386 */
387 if (min_addr_r == start)
388 align = phdr->p_align;
389 relocatable = true;
390 } else {
391 min_addr_n = min(min_addr_n, start);
392 max_addr_n = max(max_addr_n, end);
393 }
394
Stephen Boyd379e7332012-08-08 18:04:21 -0700395 }
396
Stephen Boyd6385e312012-12-12 11:17:04 -0800397 /*
398 * Align the max address to the next 4K boundary to satisfy iommus and
399 * XPUs that operate on 4K chunks.
400 */
401 max_addr_n = ALIGN(max_addr_n, SZ_4K);
402 max_addr_r = ALIGN(max_addr_r, SZ_4K);
403
Stephen Boyd2db158c2012-07-26 21:47:17 -0700404 if (relocatable) {
405 ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
406 } else {
407 priv->region_start = min_addr_n;
408 priv->region_end = max_addr_n;
409 priv->base_addr = min_addr_n;
410 }
Stephen Boyd379e7332012-08-08 18:04:21 -0700411
Stephen Boydb455f322012-11-27 19:00:01 -0800412 writeq(priv->region_start, &priv->info->start);
413 writel_relaxed(priv->region_end - priv->region_start,
414 &priv->info->size);
415
Stephen Boyd2db158c2012-07-26 21:47:17 -0700416 return ret;
Stephen Boyd379e7332012-08-08 18:04:21 -0700417}
418
Stephen Boydedff6cf2012-07-11 19:39:27 -0700419static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
420{
421 struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
422 struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
423
424 return seg_a->paddr - seg_b->paddr;
425}
426
427static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
428{
Stephen Boyd3030c252012-08-08 17:24:05 -0700429 struct pil_priv *priv = desc->priv;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700430 const struct elf32_phdr *phdr;
431 struct pil_seg *seg;
Stephen Boyd379e7332012-08-08 18:04:21 -0700432 int i, ret;
433
434 ret = pil_setup_region(priv, mdt);
435 if (ret)
436 return ret;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700437
438 for (i = 0; i < mdt->hdr.e_phnum; i++) {
439 phdr = &mdt->phdr[i];
440 if (!segment_is_loadable(phdr))
441 continue;
442
443 seg = pil_init_seg(desc, phdr, i);
444 if (IS_ERR(seg))
445 return PTR_ERR(seg);
446
447 list_add_tail(&seg->list, &priv->segs);
448 }
449 list_sort(NULL, &priv->segs, pil_cmp_seg);
450
Stephen Boyd3030c252012-08-08 17:24:05 -0700451 return pil_init_entry_addr(priv, mdt);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700452}
453
454static void pil_release_mmap(struct pil_desc *desc)
455{
456 struct pil_priv *priv = desc->priv;
457 struct pil_seg *p, *tmp;
458
Stephen Boydb455f322012-11-27 19:00:01 -0800459 writeq(0, &priv->info->start);
460 writel_relaxed(0, &priv->info->size);
461
Stephen Boyd2db158c2012-07-26 21:47:17 -0700462 if (priv->region)
463 ion_free(ion, priv->region);
Stephen Boyd3e29f102012-11-28 18:55:25 -0800464 priv->region = NULL;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700465 list_for_each_entry_safe(p, tmp, &priv->segs, list) {
466 list_del(&p->list);
467 kfree(p);
468 }
469}
470
471#define IOMAP_SIZE SZ_4M
472
473static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
474{
475 int ret = 0, count, paddr;
476 char fw_name[30];
477 const struct firmware *fw = NULL;
478 const u8 *data;
479 int num = seg->num;
480
481 if (seg->filesz) {
Stephen Boyd3f4da322011-08-30 01:03:23 -0700482 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
Stephen Boyd163f1c32012-06-29 13:20:20 -0700483 desc->name, num);
484 ret = request_firmware(&fw, fw_name, desc->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700486 pil_err(desc, "Failed to locate blob %s\n", fw_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 return ret;
488 }
489
Stephen Boydedff6cf2012-07-11 19:39:27 -0700490 if (fw->size != seg->filesz) {
491 pil_err(desc, "Blob size %u doesn't match %lu\n",
492 fw->size, seg->filesz);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 ret = -EPERM;
494 goto release_fw;
495 }
496 }
497
498 /* Load the segment into memory */
Stephen Boydedff6cf2012-07-11 19:39:27 -0700499 count = seg->filesz;
500 paddr = seg->paddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 data = fw ? fw->data : NULL;
502 while (count > 0) {
503 int size;
504 u8 __iomem *buf;
505
506 size = min_t(size_t, IOMAP_SIZE, count);
507 buf = ioremap(paddr, size);
508 if (!buf) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700509 pil_err(desc, "Failed to map memory\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 ret = -ENOMEM;
511 goto release_fw;
512 }
513 memcpy(buf, data, size);
514 iounmap(buf);
515
516 count -= size;
517 paddr += size;
518 data += size;
519 }
520
521 /* Zero out trailing memory */
Stephen Boydedff6cf2012-07-11 19:39:27 -0700522 count = seg->sz - seg->filesz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 while (count > 0) {
524 int size;
525 u8 __iomem *buf;
526
527 size = min_t(size_t, IOMAP_SIZE, count);
528 buf = ioremap(paddr, size);
529 if (!buf) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700530 pil_err(desc, "Failed to map memory\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 ret = -ENOMEM;
532 goto release_fw;
533 }
534 memset(buf, 0, size);
535 iounmap(buf);
536
537 count -= size;
538 paddr += size;
539 }
540
Stephen Boyd163f1c32012-06-29 13:20:20 -0700541 if (desc->ops->verify_blob) {
Stephen Boydedff6cf2012-07-11 19:39:27 -0700542 ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
Stephen Boydb0f1f802012-02-03 11:28:08 -0800543 if (ret)
Stephen Boyd163f1c32012-06-29 13:20:20 -0700544 pil_err(desc, "Blob%u failed verification\n", num);
Stephen Boydb0f1f802012-02-03 11:28:08 -0800545 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546
547release_fw:
548 release_firmware(fw);
549 return ret;
550}
551
Stephen Boyd163f1c32012-06-29 13:20:20 -0700552/* Synchronize request_firmware() with suspend */
Stephen Boyd80bde032012-03-16 00:14:42 -0700553static DECLARE_RWSEM(pil_pm_rwsem);
554
Stephen Boyd163f1c32012-06-29 13:20:20 -0700555/**
556 * pil_boot() - Load a peripheral image into memory and boot it
557 * @desc: descriptor from pil_desc_init()
558 *
559 * Returns 0 on success or -ERROR on failure.
560 */
561int pil_boot(struct pil_desc *desc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562{
Stephen Boydedff6cf2012-07-11 19:39:27 -0700563 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 char fw_name[30];
Stephen Boydedff6cf2012-07-11 19:39:27 -0700565 const struct pil_mdt *mdt;
566 const struct elf32_hdr *ehdr;
567 struct pil_seg *seg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 const struct firmware *fw;
Stephen Boyd379e7332012-08-08 18:04:21 -0700569 struct pil_priv *priv = desc->priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570
Stephen Boydedff6cf2012-07-11 19:39:27 -0700571 /* Reinitialize for new image */
572 pil_release_mmap(desc);
573
Stephen Boyd80bde032012-03-16 00:14:42 -0700574 down_read(&pil_pm_rwsem);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700575 snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->name);
576 ret = request_firmware(&fw, fw_name, desc->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700578 pil_err(desc, "Failed to locate %s\n", fw_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 goto out;
580 }
581
582 if (fw->size < sizeof(*ehdr)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700583 pil_err(desc, "Not big enough to be an elf header\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 ret = -EIO;
585 goto release_fw;
586 }
587
Stephen Boydedff6cf2012-07-11 19:39:27 -0700588 mdt = (const struct pil_mdt *)fw->data;
589 ehdr = &mdt->hdr;
590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700592 pil_err(desc, "Not an elf header\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 ret = -EIO;
594 goto release_fw;
595 }
596
597 if (ehdr->e_phnum == 0) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700598 pil_err(desc, "No loadable segments\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 ret = -EIO;
600 goto release_fw;
601 }
Stephen Boyd96a9f902011-07-18 18:43:00 -0700602 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
603 sizeof(struct elf32_hdr) > fw->size) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700604 pil_err(desc, "Program headers not within mdt\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 ret = -EIO;
606 goto release_fw;
607 }
608
Stephen Boydedff6cf2012-07-11 19:39:27 -0700609 ret = pil_init_mmap(desc, mdt);
610 if (ret)
611 goto release_fw;
612
Stephen Boyd3030c252012-08-08 17:24:05 -0700613 if (desc->ops->init_image)
614 ret = desc->ops->init_image(desc, fw->data, fw->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700616 pil_err(desc, "Invalid firmware metadata\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617 goto release_fw;
618 }
619
Stephen Boyd379e7332012-08-08 18:04:21 -0700620 if (desc->ops->mem_setup)
621 ret = desc->ops->mem_setup(desc, priv->region_start,
622 priv->region_end - priv->region_start);
623 if (ret) {
624 pil_err(desc, "Memory setup error\n");
625 goto release_fw;
626 }
627
Stephen Boydedff6cf2012-07-11 19:39:27 -0700628 list_for_each_entry(seg, &desc->priv->segs, list) {
629 ret = pil_load_seg(desc, seg);
630 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 goto release_fw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 }
633
Stephen Boyd163f1c32012-06-29 13:20:20 -0700634 ret = pil_proxy_vote(desc);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700635 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700636 pil_err(desc, "Failed to proxy vote\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700637 goto release_fw;
638 }
639
Stephen Boyd163f1c32012-06-29 13:20:20 -0700640 ret = desc->ops->auth_and_reset(desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700642 pil_err(desc, "Failed to bring out of reset\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700643 goto err_boot;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 }
Stephen Boyd163f1c32012-06-29 13:20:20 -0700645 pil_info(desc, "Brought out of reset\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700646err_boot:
Seemanta Dutta78f43192013-03-04 18:29:43 -0800647 pil_proxy_unvote(desc, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648release_fw:
649 release_firmware(fw);
650out:
Stephen Boyd80bde032012-03-16 00:14:42 -0700651 up_read(&pil_pm_rwsem);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700652 if (ret)
653 pil_release_mmap(desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 return ret;
655}
Stephen Boyd163f1c32012-06-29 13:20:20 -0700656EXPORT_SYMBOL(pil_boot);
657
Stephen Boyd163f1c32012-06-29 13:20:20 -0700658/**
659 * pil_shutdown() - Shutdown a peripheral
660 * @desc: descriptor from pil_desc_init()
661 */
662void pil_shutdown(struct pil_desc *desc)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700663{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700664 struct pil_priv *priv = desc->priv;
Matt Wagantall19b48592013-02-27 10:18:41 -0800665 if (desc->ops->shutdown)
666 desc->ops->shutdown(desc);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700667 if (proxy_timeout_ms == 0 && desc->ops->proxy_unvote)
668 desc->ops->proxy_unvote(desc);
Matt Wagantall0aafa352012-05-14 16:40:41 -0700669 else
Stephen Boyd163f1c32012-06-29 13:20:20 -0700670 flush_delayed_work(&priv->proxy);
671}
672EXPORT_SYMBOL(pil_shutdown);
Matt Wagantall0aafa352012-05-14 16:40:41 -0700673
Stephen Boydb455f322012-11-27 19:00:01 -0800674static DEFINE_IDA(pil_ida);
675
Stephen Boyd163f1c32012-06-29 13:20:20 -0700676/**
677 * pil_desc_init() - Initialize a pil descriptor
678 * @desc: descriptor to intialize
679 *
680 * Initialize a pil descriptor for use by other pil functions. This function
681 * must be called before calling pil_boot() or pil_shutdown().
682 *
683 * Returns 0 for success and -ERROR on failure.
684 */
685int pil_desc_init(struct pil_desc *desc)
686{
687 struct pil_priv *priv;
Stephen Boydb455f322012-11-27 19:00:01 -0800688 int id;
689 void __iomem *addr;
Stephen Boyd06ce3962013-01-02 15:03:14 -0800690 char buf[sizeof(priv->info->name)];
Stephen Boyd163f1c32012-06-29 13:20:20 -0700691
692 /* Ignore users who don't make any sense */
693 WARN(desc->ops->proxy_unvote && !desc->proxy_timeout,
694 "A proxy timeout of 0 was specified.\n");
695 if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
696 "Invalid proxy voting. Ignoring\n"))
697 ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
698
699 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
700 if (!priv)
701 return -ENOMEM;
702 desc->priv = priv;
703 priv->desc = desc;
704
Stephen Boydb455f322012-11-27 19:00:01 -0800705 priv->id = id = ida_simple_get(&pil_ida, 0, 10, GFP_KERNEL);
706 if (id < 0) {
707 kfree(priv);
708 return id;
709 }
710 addr = PIL_IMAGE_INFO_BASE + sizeof(struct pil_image_info) * id;
711 priv->info = (struct pil_image_info __iomem *)addr;
712
Stephen Boyd06ce3962013-01-02 15:03:14 -0800713 strncpy(buf, desc->name, sizeof(buf));
714 __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
Stephen Boydb455f322012-11-27 19:00:01 -0800715
Stephen Boyd163f1c32012-06-29 13:20:20 -0700716 snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
717 wake_lock_init(&priv->wlock, WAKE_LOCK_SUSPEND, priv->wname);
718 INIT_DELAYED_WORK(&priv->proxy, pil_proxy_work);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700719 INIT_LIST_HEAD(&priv->segs);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700720
721 return 0;
722}
723EXPORT_SYMBOL(pil_desc_init);
724
725/**
726 * pil_desc_release() - Release a pil descriptor
727 * @desc: descriptor to free
728 */
729void pil_desc_release(struct pil_desc *desc)
730{
731 struct pil_priv *priv = desc->priv;
732
733 if (priv) {
Stephen Boydb455f322012-11-27 19:00:01 -0800734 ida_simple_remove(&pil_ida, priv->id);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700735 flush_delayed_work(&priv->proxy);
736 wake_lock_destroy(&priv->wlock);
737 }
738 desc->priv = NULL;
739 kfree(priv);
740}
741EXPORT_SYMBOL(pil_desc_release);
742
Stephen Boyd80bde032012-03-16 00:14:42 -0700743static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
744{
745 switch (event) {
746 case PM_SUSPEND_PREPARE:
747 down_write(&pil_pm_rwsem);
748 break;
749 case PM_POST_SUSPEND:
750 up_write(&pil_pm_rwsem);
751 break;
752 }
753 return NOTIFY_DONE;
754}
755
756static struct notifier_block pil_pm_notifier = {
757 .notifier_call = pil_pm_notify,
758};
759
Stephen Boyd6d67d252011-09-27 11:50:05 -0700760static int __init msm_pil_init(void)
761{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700762 ion = msm_ion_client_create(UINT_MAX, "pil");
763 if (IS_ERR(ion)) /* Can't support relocatable images */
764 ion = NULL;
Stephen Boyd04148122012-06-29 18:18:12 -0700765 return register_pm_notifier(&pil_pm_notifier);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700766}
Stephen Boyd2db158c2012-07-26 21:47:17 -0700767device_initcall(msm_pil_init);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700768
769static void __exit msm_pil_exit(void)
770{
Stephen Boyd80bde032012-03-16 00:14:42 -0700771 unregister_pm_notifier(&pil_pm_notifier);
Stephen Boyd2db158c2012-07-26 21:47:17 -0700772 if (ion)
773 ion_client_destroy(ion);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700774}
775module_exit(msm_pil_exit);
776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777MODULE_LICENSE("GPL v2");
778MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");