blob: 65e05a95ae1d97560f94e843165bf9e2b06842d0 [file] [log] [blame]
Matt Wagantall39f90cb2012-02-08 14:09:11 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/string.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/firmware.h>
16#include <linux/io.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/elf.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
Stephen Boyd3f4da322011-08-30 01:03:23 -070020#include <linux/slab.h>
Stephen Boyd80bde032012-03-16 00:14:42 -070021#include <linux/suspend.h>
22#include <linux/rwsem.h>
Stephen Boyd20ad8102011-10-09 21:28:01 -070023#include <linux/sysfs.h>
Stephen Boyd36974ec2012-03-22 01:30:59 -070024#include <linux/workqueue.h>
25#include <linux/jiffies.h>
26#include <linux/wakelock.h>
Stephen Boydedff6cf2012-07-11 19:39:27 -070027#include <linux/err.h>
Stephen Boyd2db158c2012-07-26 21:47:17 -070028#include <linux/msm_ion.h>
Stephen Boydedff6cf2012-07-11 19:39:27 -070029#include <linux/list.h>
30#include <linux/list_sort.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32#include <asm/uaccess.h>
33#include <asm/setup.h>
34
35#include "peripheral-loader.h"
36
Stephen Boyd163f1c32012-06-29 13:20:20 -070037#define pil_err(desc, fmt, ...) \
38 dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
39#define pil_info(desc, fmt, ...) \
40 dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
41
Matt Wagantall0aafa352012-05-14 16:40:41 -070042/**
43 * proxy_timeout - Override for proxy vote timeouts
44 * -1: Use driver-specified timeout
45 * 0: Hold proxy votes until shutdown
46 * >0: Specify a custom timeout in ms
47 */
48static int proxy_timeout_ms = -1;
49module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
50
Stephen Boyd163f1c32012-06-29 13:20:20 -070051/**
Stephen Boydedff6cf2012-07-11 19:39:27 -070052 * struct pil_mdt - Representation of <name>.mdt file in memory
53 * @hdr: ELF32 header
54 * @phdr: ELF32 program headers
55 */
56struct pil_mdt {
57 struct elf32_hdr hdr;
58 struct elf32_phdr phdr[];
59};
60
61/**
62 * struct pil_seg - memory map representing one segment
63 * @next: points to next seg mentor NULL if last segment
64 * @paddr: start address of segment
65 * @sz: size of segment
66 * @filesz: size of segment on disk
67 * @num: segment number
Stephen Boyd2db158c2012-07-26 21:47:17 -070068 * @relocated: true if segment is relocated, false otherwise
Stephen Boydedff6cf2012-07-11 19:39:27 -070069 *
70 * Loosely based on an elf program header. Contains all necessary information
71 * to load and initialize a segment of the image in memory.
72 */
73struct pil_seg {
74 phys_addr_t paddr;
75 unsigned long sz;
76 unsigned long filesz;
77 int num;
78 struct list_head list;
Stephen Boyd2db158c2012-07-26 21:47:17 -070079 bool relocated;
Stephen Boydedff6cf2012-07-11 19:39:27 -070080};
81
82/**
Stephen Boyd163f1c32012-06-29 13:20:20 -070083 * struct pil_priv - Private state for a pil_desc
84 * @proxy: work item used to run the proxy unvoting routine
85 * @wlock: wakelock to prevent suspend during pil_boot
86 * @wname: name of @wlock
87 * @desc: pointer to pil_desc this is private data for
Stephen Boydedff6cf2012-07-11 19:39:27 -070088 * @seg: list of segments sorted by physical address
Stephen Boyd3030c252012-08-08 17:24:05 -070089 * @entry_addr: physical address where processor starts booting at
Stephen Boyd2db158c2012-07-26 21:47:17 -070090 * @base_addr: smallest start address among all segments that are relocatable
Stephen Boyd379e7332012-08-08 18:04:21 -070091 * @region_start: address where relocatable region starts or lowest address
92 * for non-relocatable images
93 * @region_end: address where relocatable region ends or highest address for
94 * non-relocatable images
Stephen Boyd2db158c2012-07-26 21:47:17 -070095 * @region: region allocated for relocatable images
Stephen Boyd163f1c32012-06-29 13:20:20 -070096 *
97 * This struct contains data for a pil_desc that should not be exposed outside
98 * of this file. This structure points to the descriptor and the descriptor
99 * points to this structure so that PIL drivers can't access the private
100 * data of a descriptor but this file can access both.
101 */
102struct pil_priv {
Stephen Boyd36974ec2012-03-22 01:30:59 -0700103 struct delayed_work proxy;
104 struct wake_lock wlock;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700105 char wname[32];
106 struct pil_desc *desc;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700107 struct list_head segs;
Stephen Boyd3030c252012-08-08 17:24:05 -0700108 phys_addr_t entry_addr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700109 phys_addr_t base_addr;
Stephen Boyd379e7332012-08-08 18:04:21 -0700110 phys_addr_t region_start;
111 phys_addr_t region_end;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700112 struct ion_handle *region;
Stephen Boyd3f4da322011-08-30 01:03:23 -0700113};
114
Stephen Boyd2db158c2012-07-26 21:47:17 -0700115static struct ion_client *ion;
116
Stephen Boyd3030c252012-08-08 17:24:05 -0700117/**
118 * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
119 * @desc: descriptor from pil_desc_init()
120 *
121 * Returns the physical address where the image boots at or 0 if unknown.
122 */
123phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
124{
125 return desc->priv ? desc->priv->entry_addr : 0;
126}
127EXPORT_SYMBOL(pil_get_entry_addr);
128
Stephen Boyd36974ec2012-03-22 01:30:59 -0700129static void pil_proxy_work(struct work_struct *work)
130{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700131 struct delayed_work *delayed = to_delayed_work(work);
132 struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
133 struct pil_desc *desc = priv->desc;
Stephen Boyd36974ec2012-03-22 01:30:59 -0700134
Stephen Boyd163f1c32012-06-29 13:20:20 -0700135 desc->ops->proxy_unvote(desc);
136 wake_unlock(&priv->wlock);
137 module_put(desc->owner);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700138}
139
Stephen Boyd163f1c32012-06-29 13:20:20 -0700140static int pil_proxy_vote(struct pil_desc *desc)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700141{
Stephen Boyd0a563142012-07-02 13:33:31 -0700142 int ret = 0;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700143 struct pil_priv *priv = desc->priv;
Stephen Boyd0a563142012-07-02 13:33:31 -0700144
Stephen Boyd163f1c32012-06-29 13:20:20 -0700145 if (desc->ops->proxy_vote) {
146 wake_lock(&priv->wlock);
147 ret = desc->ops->proxy_vote(desc);
Stephen Boyd0a563142012-07-02 13:33:31 -0700148 if (ret)
Stephen Boyd163f1c32012-06-29 13:20:20 -0700149 wake_unlock(&priv->wlock);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700150 }
Stephen Boyd0a563142012-07-02 13:33:31 -0700151 return ret;
Stephen Boyd36974ec2012-03-22 01:30:59 -0700152}
153
Stephen Boyd163f1c32012-06-29 13:20:20 -0700154static void pil_proxy_unvote(struct pil_desc *desc, unsigned long timeout)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700155{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700156 struct pil_priv *priv = desc->priv;
157
Matt Wagantall0aafa352012-05-14 16:40:41 -0700158 if (proxy_timeout_ms >= 0)
159 timeout = proxy_timeout_ms;
160
Stephen Boyd163f1c32012-06-29 13:20:20 -0700161 if (timeout && desc->ops->proxy_unvote) {
162 if (WARN_ON(!try_module_get(desc->owner)))
163 return;
164 schedule_delayed_work(&priv->proxy, msecs_to_jiffies(timeout));
165 }
Stephen Boyd36974ec2012-03-22 01:30:59 -0700166}
167
Stephen Boyd2db158c2012-07-26 21:47:17 -0700168static bool segment_is_relocatable(const struct elf32_phdr *p)
169{
170 return !!(p->p_flags & BIT(27));
171}
172
173static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
174{
175 return addr - priv->base_addr + priv->region_start;
176}
177
Stephen Boydedff6cf2012-07-11 19:39:27 -0700178static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
179 const struct elf32_phdr *phdr, int num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700181 bool reloc = segment_is_relocatable(phdr);
182 const struct pil_priv *priv = desc->priv;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700183 struct pil_seg *seg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184
Stephen Boyd2db158c2012-07-26 21:47:17 -0700185 if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700186 pil_err(desc, "kernel memory would be overwritten [%#08lx, %#08lx)\n",
Stephen Boydf79af532012-05-14 18:57:26 -0700187 (unsigned long)phdr->p_paddr,
188 (unsigned long)(phdr->p_paddr + phdr->p_memsz));
Stephen Boydedff6cf2012-07-11 19:39:27 -0700189 return ERR_PTR(-EPERM);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 }
191
Stephen Boydedff6cf2012-07-11 19:39:27 -0700192 seg = kmalloc(sizeof(*seg), GFP_KERNEL);
193 if (!seg)
194 return ERR_PTR(-ENOMEM);
195 seg->num = num;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700196 seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700197 seg->filesz = phdr->p_filesz;
198 seg->sz = phdr->p_memsz;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700199 seg->relocated = reloc;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700200 INIT_LIST_HEAD(&seg->list);
201
202 return seg;
203}
204
205#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
206
207static int segment_is_loadable(const struct elf32_phdr *p)
208{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700209 return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
210 p->p_memsz;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700211}
212
Stephen Boyd3030c252012-08-08 17:24:05 -0700213static void pil_dump_segs(const struct pil_priv *priv)
214{
215 struct pil_seg *seg;
216
217 list_for_each_entry(seg, &priv->segs, list) {
218 pil_info(priv->desc, "%d: %#08zx %#08lx\n", seg->num,
219 seg->paddr, seg->paddr + seg->sz);
220 }
221}
222
223/*
Stephen Boyd2db158c2012-07-26 21:47:17 -0700224 * Ensure the entry address lies within the image limits and if the image is
225 * relocatable ensure it lies within a relocatable segment.
Stephen Boyd3030c252012-08-08 17:24:05 -0700226 */
227static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
228{
229 struct pil_seg *seg;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700230 phys_addr_t entry = mdt->hdr.e_entry;
231 bool image_relocated = priv->region;
Stephen Boyd3030c252012-08-08 17:24:05 -0700232
Stephen Boyd2db158c2012-07-26 21:47:17 -0700233 if (image_relocated)
234 entry = pil_reloc(priv, entry);
235 priv->entry_addr = entry;
Stephen Boyd3030c252012-08-08 17:24:05 -0700236
237 if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
238 return 0;
239
240 list_for_each_entry(seg, &priv->segs, list) {
Stephen Boyd2db158c2012-07-26 21:47:17 -0700241 if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
242 if (!image_relocated)
243 return 0;
244 else if (seg->relocated)
245 return 0;
246 }
Stephen Boyd3030c252012-08-08 17:24:05 -0700247 }
Stephen Boyd2db158c2012-07-26 21:47:17 -0700248 pil_err(priv->desc, "entry address %08zx not within range\n", entry);
Stephen Boyd3030c252012-08-08 17:24:05 -0700249 pil_dump_segs(priv);
250 return -EADDRNOTAVAIL;
251}
252
Stephen Boyd2db158c2012-07-26 21:47:17 -0700253static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
254 phys_addr_t max_addr, size_t align)
255{
256 struct ion_handle *region;
257 int ret;
258 unsigned int mask;
259 size_t size = round_up(max_addr - min_addr, align);
260
261 if (!ion) {
262 WARN_ON_ONCE("No ION client, can't support relocation\n");
263 return -ENOMEM;
264 }
265
266 /* Force alignment due to linker scripts not getting it right */
267 if (align > SZ_1M) {
268 mask = ION_HEAP(ION_PIL2_HEAP_ID);
269 align = SZ_4M;
270 } else {
271 mask = ION_HEAP(ION_PIL1_HEAP_ID);
272 align = SZ_1M;
273 }
274
275 region = ion_alloc(ion, size, align, mask, 0);
276 if (IS_ERR(region)) {
277 pil_err(priv->desc, "Failed to allocate relocatable region\n");
278 return PTR_ERR(region);
279 }
280
281 ret = ion_phys(ion, region, (ion_phys_addr_t *)&priv->region_start,
282 &size);
283 if (ret) {
284 ion_free(ion, region);
285 return ret;
286 }
287
288 priv->region = region;
289 priv->region_end = priv->region_start + size;
290 priv->base_addr = min_addr;
291
292 return 0;
293}
294
Stephen Boyd379e7332012-08-08 18:04:21 -0700295static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
296{
297 const struct elf32_phdr *phdr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700298 phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
299 size_t align = 0;
300 int i, ret = 0;
301 bool relocatable = false;
Stephen Boyd379e7332012-08-08 18:04:21 -0700302
Stephen Boyd2db158c2012-07-26 21:47:17 -0700303 min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
304 max_addr_n = max_addr_r = 0;
Stephen Boyd379e7332012-08-08 18:04:21 -0700305
306 /* Find the image limits */
307 for (i = 0; i < mdt->hdr.e_phnum; i++) {
308 phdr = &mdt->phdr[i];
309 if (!segment_is_loadable(phdr))
310 continue;
311
Stephen Boyd2db158c2012-07-26 21:47:17 -0700312 start = phdr->p_paddr;
313 end = start + phdr->p_memsz;
314
315 if (segment_is_relocatable(phdr)) {
316 min_addr_r = min(min_addr_r, start);
317 max_addr_r = max(max_addr_r, end);
318 /*
319 * Lowest relocatable segment dictates alignment of
320 * relocatable region
321 */
322 if (min_addr_r == start)
323 align = phdr->p_align;
324 relocatable = true;
325 } else {
326 min_addr_n = min(min_addr_n, start);
327 max_addr_n = max(max_addr_n, end);
328 }
329
Stephen Boyd379e7332012-08-08 18:04:21 -0700330 }
331
Stephen Boyd2db158c2012-07-26 21:47:17 -0700332 if (relocatable) {
333 ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
334 } else {
335 priv->region_start = min_addr_n;
336 priv->region_end = max_addr_n;
337 priv->base_addr = min_addr_n;
338 }
Stephen Boyd379e7332012-08-08 18:04:21 -0700339
Stephen Boyd2db158c2012-07-26 21:47:17 -0700340 return ret;
Stephen Boyd379e7332012-08-08 18:04:21 -0700341}
342
Stephen Boydedff6cf2012-07-11 19:39:27 -0700343static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
344{
345 struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
346 struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
347
348 return seg_a->paddr - seg_b->paddr;
349}
350
351static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
352{
Stephen Boyd3030c252012-08-08 17:24:05 -0700353 struct pil_priv *priv = desc->priv;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700354 const struct elf32_phdr *phdr;
355 struct pil_seg *seg;
Stephen Boyd379e7332012-08-08 18:04:21 -0700356 int i, ret;
357
358 ret = pil_setup_region(priv, mdt);
359 if (ret)
360 return ret;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700361
362 for (i = 0; i < mdt->hdr.e_phnum; i++) {
363 phdr = &mdt->phdr[i];
364 if (!segment_is_loadable(phdr))
365 continue;
366
367 seg = pil_init_seg(desc, phdr, i);
368 if (IS_ERR(seg))
369 return PTR_ERR(seg);
370
371 list_add_tail(&seg->list, &priv->segs);
372 }
373 list_sort(NULL, &priv->segs, pil_cmp_seg);
374
Stephen Boyd3030c252012-08-08 17:24:05 -0700375 return pil_init_entry_addr(priv, mdt);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700376}
377
378static void pil_release_mmap(struct pil_desc *desc)
379{
380 struct pil_priv *priv = desc->priv;
381 struct pil_seg *p, *tmp;
382
Stephen Boyd2db158c2012-07-26 21:47:17 -0700383 if (priv->region)
384 ion_free(ion, priv->region);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700385 list_for_each_entry_safe(p, tmp, &priv->segs, list) {
386 list_del(&p->list);
387 kfree(p);
388 }
389}
390
391#define IOMAP_SIZE SZ_4M
392
393static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
394{
395 int ret = 0, count, paddr;
396 char fw_name[30];
397 const struct firmware *fw = NULL;
398 const u8 *data;
399 int num = seg->num;
400
401 if (seg->filesz) {
Stephen Boyd3f4da322011-08-30 01:03:23 -0700402 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
Stephen Boyd163f1c32012-06-29 13:20:20 -0700403 desc->name, num);
404 ret = request_firmware(&fw, fw_name, desc->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700406 pil_err(desc, "Failed to locate blob %s\n", fw_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 return ret;
408 }
409
Stephen Boydedff6cf2012-07-11 19:39:27 -0700410 if (fw->size != seg->filesz) {
411 pil_err(desc, "Blob size %u doesn't match %lu\n",
412 fw->size, seg->filesz);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 ret = -EPERM;
414 goto release_fw;
415 }
416 }
417
418 /* Load the segment into memory */
Stephen Boydedff6cf2012-07-11 19:39:27 -0700419 count = seg->filesz;
420 paddr = seg->paddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 data = fw ? fw->data : NULL;
422 while (count > 0) {
423 int size;
424 u8 __iomem *buf;
425
426 size = min_t(size_t, IOMAP_SIZE, count);
427 buf = ioremap(paddr, size);
428 if (!buf) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700429 pil_err(desc, "Failed to map memory\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 ret = -ENOMEM;
431 goto release_fw;
432 }
433 memcpy(buf, data, size);
434 iounmap(buf);
435
436 count -= size;
437 paddr += size;
438 data += size;
439 }
440
441 /* Zero out trailing memory */
Stephen Boydedff6cf2012-07-11 19:39:27 -0700442 count = seg->sz - seg->filesz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 while (count > 0) {
444 int size;
445 u8 __iomem *buf;
446
447 size = min_t(size_t, IOMAP_SIZE, count);
448 buf = ioremap(paddr, size);
449 if (!buf) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700450 pil_err(desc, "Failed to map memory\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 ret = -ENOMEM;
452 goto release_fw;
453 }
454 memset(buf, 0, size);
455 iounmap(buf);
456
457 count -= size;
458 paddr += size;
459 }
460
Stephen Boyd163f1c32012-06-29 13:20:20 -0700461 if (desc->ops->verify_blob) {
Stephen Boydedff6cf2012-07-11 19:39:27 -0700462 ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
Stephen Boydb0f1f802012-02-03 11:28:08 -0800463 if (ret)
Stephen Boyd163f1c32012-06-29 13:20:20 -0700464 pil_err(desc, "Blob%u failed verification\n", num);
Stephen Boydb0f1f802012-02-03 11:28:08 -0800465 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466
467release_fw:
468 release_firmware(fw);
469 return ret;
470}
471
Stephen Boyd163f1c32012-06-29 13:20:20 -0700472/* Synchronize request_firmware() with suspend */
Stephen Boyd80bde032012-03-16 00:14:42 -0700473static DECLARE_RWSEM(pil_pm_rwsem);
474
Stephen Boyd163f1c32012-06-29 13:20:20 -0700475/**
476 * pil_boot() - Load a peripheral image into memory and boot it
477 * @desc: descriptor from pil_desc_init()
478 *
479 * Returns 0 on success or -ERROR on failure.
480 */
481int pil_boot(struct pil_desc *desc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482{
Stephen Boydedff6cf2012-07-11 19:39:27 -0700483 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 char fw_name[30];
Stephen Boydedff6cf2012-07-11 19:39:27 -0700485 const struct pil_mdt *mdt;
486 const struct elf32_hdr *ehdr;
487 struct pil_seg *seg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 const struct firmware *fw;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700489 unsigned long proxy_timeout = desc->proxy_timeout;
Stephen Boyd379e7332012-08-08 18:04:21 -0700490 struct pil_priv *priv = desc->priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491
Stephen Boydedff6cf2012-07-11 19:39:27 -0700492 /* Reinitialize for new image */
493 pil_release_mmap(desc);
494
Stephen Boyd80bde032012-03-16 00:14:42 -0700495 down_read(&pil_pm_rwsem);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700496 snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->name);
497 ret = request_firmware(&fw, fw_name, desc->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700499 pil_err(desc, "Failed to locate %s\n", fw_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 goto out;
501 }
502
503 if (fw->size < sizeof(*ehdr)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700504 pil_err(desc, "Not big enough to be an elf header\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 ret = -EIO;
506 goto release_fw;
507 }
508
Stephen Boydedff6cf2012-07-11 19:39:27 -0700509 mdt = (const struct pil_mdt *)fw->data;
510 ehdr = &mdt->hdr;
511
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700513 pil_err(desc, "Not an elf header\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 ret = -EIO;
515 goto release_fw;
516 }
517
518 if (ehdr->e_phnum == 0) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700519 pil_err(desc, "No loadable segments\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 ret = -EIO;
521 goto release_fw;
522 }
Stephen Boyd96a9f902011-07-18 18:43:00 -0700523 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
524 sizeof(struct elf32_hdr) > fw->size) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700525 pil_err(desc, "Program headers not within mdt\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 ret = -EIO;
527 goto release_fw;
528 }
529
Stephen Boydedff6cf2012-07-11 19:39:27 -0700530 ret = pil_init_mmap(desc, mdt);
531 if (ret)
532 goto release_fw;
533
Stephen Boyd3030c252012-08-08 17:24:05 -0700534 if (desc->ops->init_image)
535 ret = desc->ops->init_image(desc, fw->data, fw->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700537 pil_err(desc, "Invalid firmware metadata\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 goto release_fw;
539 }
540
Stephen Boyd379e7332012-08-08 18:04:21 -0700541 if (desc->ops->mem_setup)
542 ret = desc->ops->mem_setup(desc, priv->region_start,
543 priv->region_end - priv->region_start);
544 if (ret) {
545 pil_err(desc, "Memory setup error\n");
546 goto release_fw;
547 }
548
Stephen Boydedff6cf2012-07-11 19:39:27 -0700549 list_for_each_entry(seg, &desc->priv->segs, list) {
550 ret = pil_load_seg(desc, seg);
551 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 goto release_fw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 }
554
Stephen Boyd163f1c32012-06-29 13:20:20 -0700555 ret = pil_proxy_vote(desc);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700556 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700557 pil_err(desc, "Failed to proxy vote\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700558 goto release_fw;
559 }
560
Stephen Boyd163f1c32012-06-29 13:20:20 -0700561 ret = desc->ops->auth_and_reset(desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700563 pil_err(desc, "Failed to bring out of reset\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700564 proxy_timeout = 0; /* Remove proxy vote immediately on error */
565 goto err_boot;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 }
Stephen Boyd163f1c32012-06-29 13:20:20 -0700567 pil_info(desc, "Brought out of reset\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700568err_boot:
Stephen Boyd163f1c32012-06-29 13:20:20 -0700569 pil_proxy_unvote(desc, proxy_timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570release_fw:
571 release_firmware(fw);
572out:
Stephen Boyd80bde032012-03-16 00:14:42 -0700573 up_read(&pil_pm_rwsem);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700574 if (ret)
575 pil_release_mmap(desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 return ret;
577}
Stephen Boyd163f1c32012-06-29 13:20:20 -0700578EXPORT_SYMBOL(pil_boot);
579
Stephen Boyd163f1c32012-06-29 13:20:20 -0700580/**
581 * pil_shutdown() - Shutdown a peripheral
582 * @desc: descriptor from pil_desc_init()
583 */
584void pil_shutdown(struct pil_desc *desc)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700585{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700586 struct pil_priv *priv = desc->priv;
587 desc->ops->shutdown(desc);
588 if (proxy_timeout_ms == 0 && desc->ops->proxy_unvote)
589 desc->ops->proxy_unvote(desc);
Matt Wagantall0aafa352012-05-14 16:40:41 -0700590 else
Stephen Boyd163f1c32012-06-29 13:20:20 -0700591 flush_delayed_work(&priv->proxy);
592}
593EXPORT_SYMBOL(pil_shutdown);
Matt Wagantall0aafa352012-05-14 16:40:41 -0700594
Stephen Boyd163f1c32012-06-29 13:20:20 -0700595/**
596 * pil_desc_init() - Initialize a pil descriptor
597 * @desc: descriptor to intialize
598 *
599 * Initialize a pil descriptor for use by other pil functions. This function
600 * must be called before calling pil_boot() or pil_shutdown().
601 *
602 * Returns 0 for success and -ERROR on failure.
603 */
604int pil_desc_init(struct pil_desc *desc)
605{
606 struct pil_priv *priv;
607
608 /* Ignore users who don't make any sense */
609 WARN(desc->ops->proxy_unvote && !desc->proxy_timeout,
610 "A proxy timeout of 0 was specified.\n");
611 if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
612 "Invalid proxy voting. Ignoring\n"))
613 ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
614
615 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
616 if (!priv)
617 return -ENOMEM;
618 desc->priv = priv;
619 priv->desc = desc;
620
621 snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
622 wake_lock_init(&priv->wlock, WAKE_LOCK_SUSPEND, priv->wname);
623 INIT_DELAYED_WORK(&priv->proxy, pil_proxy_work);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700624 INIT_LIST_HEAD(&priv->segs);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700625
626 return 0;
627}
628EXPORT_SYMBOL(pil_desc_init);
629
630/**
631 * pil_desc_release() - Release a pil descriptor
632 * @desc: descriptor to free
633 */
634void pil_desc_release(struct pil_desc *desc)
635{
636 struct pil_priv *priv = desc->priv;
637
638 if (priv) {
639 flush_delayed_work(&priv->proxy);
640 wake_lock_destroy(&priv->wlock);
641 }
642 desc->priv = NULL;
643 kfree(priv);
644}
645EXPORT_SYMBOL(pil_desc_release);
646
Stephen Boyd80bde032012-03-16 00:14:42 -0700647static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
648{
649 switch (event) {
650 case PM_SUSPEND_PREPARE:
651 down_write(&pil_pm_rwsem);
652 break;
653 case PM_POST_SUSPEND:
654 up_write(&pil_pm_rwsem);
655 break;
656 }
657 return NOTIFY_DONE;
658}
659
660static struct notifier_block pil_pm_notifier = {
661 .notifier_call = pil_pm_notify,
662};
663
Stephen Boyd6d67d252011-09-27 11:50:05 -0700664static int __init msm_pil_init(void)
665{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700666 ion = msm_ion_client_create(UINT_MAX, "pil");
667 if (IS_ERR(ion)) /* Can't support relocatable images */
668 ion = NULL;
Stephen Boyd04148122012-06-29 18:18:12 -0700669 return register_pm_notifier(&pil_pm_notifier);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700670}
Stephen Boyd2db158c2012-07-26 21:47:17 -0700671device_initcall(msm_pil_init);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700672
673static void __exit msm_pil_exit(void)
674{
Stephen Boyd80bde032012-03-16 00:14:42 -0700675 unregister_pm_notifier(&pil_pm_notifier);
Stephen Boyd2db158c2012-07-26 21:47:17 -0700676 if (ion)
677 ion_client_destroy(ion);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700678}
679module_exit(msm_pil_exit);
680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681MODULE_LICENSE("GPL v2");
682MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");