blob: 157dc015b46fa033a836201189783752e1aa465e [file] [log] [blame]
Stephen Boyd06ce3962013-01-02 15:03:14 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/string.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/firmware.h>
16#include <linux/io.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/elf.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
Stephen Boyd3f4da322011-08-30 01:03:23 -070020#include <linux/slab.h>
Stephen Boyd80bde032012-03-16 00:14:42 -070021#include <linux/suspend.h>
22#include <linux/rwsem.h>
Stephen Boyd20ad8102011-10-09 21:28:01 -070023#include <linux/sysfs.h>
Stephen Boyd36974ec2012-03-22 01:30:59 -070024#include <linux/workqueue.h>
25#include <linux/jiffies.h>
26#include <linux/wakelock.h>
Stephen Boydedff6cf2012-07-11 19:39:27 -070027#include <linux/err.h>
Stephen Boyd2db158c2012-07-26 21:47:17 -070028#include <linux/msm_ion.h>
Stephen Boydedff6cf2012-07-11 19:39:27 -070029#include <linux/list.h>
30#include <linux/list_sort.h>
Stephen Boydb455f322012-11-27 19:00:01 -080031#include <linux/idr.h>
Seemanta Duttad21a7972013-03-05 12:16:17 -080032#include <linux/interrupt.h>
Seemanta Duttad99446c2013-06-17 18:05:18 -070033#include <linux/of_gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35#include <asm/uaccess.h>
36#include <asm/setup.h>
Stephen Boydb455f322012-11-27 19:00:01 -080037#include <asm-generic/io-64-nonatomic-lo-hi.h>
38
39#include <mach/msm_iomap.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070040#include <mach/ramdump.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#include "peripheral-loader.h"
43
Stephen Boyd163f1c32012-06-29 13:20:20 -070044#define pil_err(desc, fmt, ...) \
45 dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
46#define pil_info(desc, fmt, ...) \
47 dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
48
Stephen Boydb455f322012-11-27 19:00:01 -080049#define PIL_IMAGE_INFO_BASE (MSM_IMEM_BASE + 0x94c)
50
Matt Wagantall0aafa352012-05-14 16:40:41 -070051/**
52 * proxy_timeout - Override for proxy vote timeouts
53 * -1: Use driver-specified timeout
54 * 0: Hold proxy votes until shutdown
55 * >0: Specify a custom timeout in ms
56 */
57static int proxy_timeout_ms = -1;
58module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
59
Stephen Boyd163f1c32012-06-29 13:20:20 -070060/**
Stephen Boydedff6cf2012-07-11 19:39:27 -070061 * struct pil_mdt - Representation of <name>.mdt file in memory
62 * @hdr: ELF32 header
63 * @phdr: ELF32 program headers
64 */
65struct pil_mdt {
66 struct elf32_hdr hdr;
67 struct elf32_phdr phdr[];
68};
69
70/**
71 * struct pil_seg - memory map representing one segment
72 * @next: points to next seg mentor NULL if last segment
73 * @paddr: start address of segment
74 * @sz: size of segment
75 * @filesz: size of segment on disk
76 * @num: segment number
Stephen Boyd2db158c2012-07-26 21:47:17 -070077 * @relocated: true if segment is relocated, false otherwise
Stephen Boydedff6cf2012-07-11 19:39:27 -070078 *
79 * Loosely based on an elf program header. Contains all necessary information
80 * to load and initialize a segment of the image in memory.
81 */
82struct pil_seg {
83 phys_addr_t paddr;
84 unsigned long sz;
85 unsigned long filesz;
86 int num;
87 struct list_head list;
Stephen Boyd2db158c2012-07-26 21:47:17 -070088 bool relocated;
Stephen Boydedff6cf2012-07-11 19:39:27 -070089};
90
91/**
Stephen Boydb455f322012-11-27 19:00:01 -080092 * struct pil_image_info - information in IMEM about image and where it is loaded
93 * @name: name of image (may or may not be NULL terminated)
94 * @start: indicates physical address where image starts (little endian)
95 * @size: size of image (little endian)
96 */
97struct pil_image_info {
98 char name[8];
99 __le64 start;
100 __le32 size;
101} __attribute__((__packed__));
102
103/**
Stephen Boyd163f1c32012-06-29 13:20:20 -0700104 * struct pil_priv - Private state for a pil_desc
105 * @proxy: work item used to run the proxy unvoting routine
106 * @wlock: wakelock to prevent suspend during pil_boot
107 * @wname: name of @wlock
108 * @desc: pointer to pil_desc this is private data for
Stephen Boydedff6cf2012-07-11 19:39:27 -0700109 * @seg: list of segments sorted by physical address
Stephen Boyd3030c252012-08-08 17:24:05 -0700110 * @entry_addr: physical address where processor starts booting at
Stephen Boyd2db158c2012-07-26 21:47:17 -0700111 * @base_addr: smallest start address among all segments that are relocatable
Stephen Boyd379e7332012-08-08 18:04:21 -0700112 * @region_start: address where relocatable region starts or lowest address
113 * for non-relocatable images
114 * @region_end: address where relocatable region ends or highest address for
115 * non-relocatable images
Stephen Boyd2db158c2012-07-26 21:47:17 -0700116 * @region: region allocated for relocatable images
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700117 * @unvoted_flag: flag to keep track if we have unvoted or not.
Stephen Boyd163f1c32012-06-29 13:20:20 -0700118 *
119 * This struct contains data for a pil_desc that should not be exposed outside
120 * of this file. This structure points to the descriptor and the descriptor
121 * points to this structure so that PIL drivers can't access the private
122 * data of a descriptor but this file can access both.
123 */
124struct pil_priv {
Stephen Boyd36974ec2012-03-22 01:30:59 -0700125 struct delayed_work proxy;
126 struct wake_lock wlock;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700127 char wname[32];
128 struct pil_desc *desc;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700129 struct list_head segs;
Stephen Boyd3030c252012-08-08 17:24:05 -0700130 phys_addr_t entry_addr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700131 phys_addr_t base_addr;
Stephen Boyd379e7332012-08-08 18:04:21 -0700132 phys_addr_t region_start;
133 phys_addr_t region_end;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700134 struct ion_handle *region;
Stephen Boydb455f322012-11-27 19:00:01 -0800135 struct pil_image_info __iomem *info;
136 int id;
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700137 int unvoted_flag;
Stephen Boyd3f4da322011-08-30 01:03:23 -0700138};
139
Stephen Boyd10667772012-11-28 16:45:35 -0800140/**
141 * pil_do_ramdump() - Ramdump an image
142 * @desc: descriptor from pil_desc_init()
143 * @ramdump_dev: ramdump device returned from create_ramdump_device()
144 *
145 * Calls the ramdump API with a list of segments generated from the addresses
146 * that the descriptor corresponds to.
147 */
148int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
149{
150 struct pil_priv *priv = desc->priv;
151 struct pil_seg *seg;
152 int count = 0, ret;
153 struct ramdump_segment *ramdump_segs, *s;
154
155 list_for_each_entry(seg, &priv->segs, list)
156 count++;
157
158 ramdump_segs = kmalloc_array(count, sizeof(*ramdump_segs), GFP_KERNEL);
159 if (!ramdump_segs)
160 return -ENOMEM;
161
162 s = ramdump_segs;
163 list_for_each_entry(seg, &priv->segs, list) {
164 s->address = seg->paddr;
165 s->size = seg->sz;
166 s++;
167 }
168
169 ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
170 kfree(ramdump_segs);
171
172 return ret;
173}
174EXPORT_SYMBOL(pil_do_ramdump);
175
Stephen Boyd2db158c2012-07-26 21:47:17 -0700176static struct ion_client *ion;
177
Stephen Boyd3030c252012-08-08 17:24:05 -0700178/**
179 * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
180 * @desc: descriptor from pil_desc_init()
181 *
182 * Returns the physical address where the image boots at or 0 if unknown.
183 */
184phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
185{
186 return desc->priv ? desc->priv->entry_addr : 0;
187}
188EXPORT_SYMBOL(pil_get_entry_addr);
189
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700190static void __pil_proxy_unvote(struct pil_priv *priv)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700191{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700192 struct pil_desc *desc = priv->desc;
Stephen Boyd36974ec2012-03-22 01:30:59 -0700193
Stephen Boyd163f1c32012-06-29 13:20:20 -0700194 desc->ops->proxy_unvote(desc);
195 wake_unlock(&priv->wlock);
196 module_put(desc->owner);
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700197
198}
199
200static void pil_proxy_unvote_work(struct work_struct *work)
201{
202 struct delayed_work *delayed = to_delayed_work(work);
203 struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
204 __pil_proxy_unvote(priv);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700205}
206
Stephen Boyd163f1c32012-06-29 13:20:20 -0700207static int pil_proxy_vote(struct pil_desc *desc)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700208{
Stephen Boyd0a563142012-07-02 13:33:31 -0700209 int ret = 0;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700210 struct pil_priv *priv = desc->priv;
Stephen Boyd0a563142012-07-02 13:33:31 -0700211
Stephen Boyd163f1c32012-06-29 13:20:20 -0700212 if (desc->ops->proxy_vote) {
213 wake_lock(&priv->wlock);
214 ret = desc->ops->proxy_vote(desc);
Stephen Boyd0a563142012-07-02 13:33:31 -0700215 if (ret)
Stephen Boyd163f1c32012-06-29 13:20:20 -0700216 wake_unlock(&priv->wlock);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700217 }
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700218
219 if (desc->proxy_unvote_irq)
220 enable_irq(desc->proxy_unvote_irq);
221
Stephen Boyd0a563142012-07-02 13:33:31 -0700222 return ret;
Stephen Boyd36974ec2012-03-22 01:30:59 -0700223}
224
Seemanta Dutta78f43192013-03-04 18:29:43 -0800225static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700226{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700227 struct pil_priv *priv = desc->priv;
Seemanta Dutta78f43192013-03-04 18:29:43 -0800228 unsigned long timeout;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700229
Seemanta Dutta78f43192013-03-04 18:29:43 -0800230 if (proxy_timeout_ms == 0 && !immediate)
231 return;
232 else if (proxy_timeout_ms > 0)
Matt Wagantall0aafa352012-05-14 16:40:41 -0700233 timeout = proxy_timeout_ms;
Seemanta Dutta78f43192013-03-04 18:29:43 -0800234 else
235 timeout = desc->proxy_timeout;
Matt Wagantall0aafa352012-05-14 16:40:41 -0700236
Seemanta Dutta78f43192013-03-04 18:29:43 -0800237 if (desc->ops->proxy_unvote) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700238 if (WARN_ON(!try_module_get(desc->owner)))
239 return;
Seemanta Dutta78f43192013-03-04 18:29:43 -0800240
241 if (immediate)
242 timeout = 0;
Seemanta Duttad21a7972013-03-05 12:16:17 -0800243
244 if (!desc->proxy_unvote_irq || immediate)
245 schedule_delayed_work(&priv->proxy,
246 msecs_to_jiffies(timeout));
Stephen Boyd163f1c32012-06-29 13:20:20 -0700247 }
Stephen Boyd36974ec2012-03-22 01:30:59 -0700248}
249
Seemanta Duttad21a7972013-03-05 12:16:17 -0800250static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
251{
252 struct pil_desc *desc = dev_id;
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700253 struct pil_priv *priv = desc->priv;
Seemanta Duttad21a7972013-03-05 12:16:17 -0800254
Deepak Katragadda7306a3e2013-07-15 16:29:48 -0700255 pil_info(desc, "Power/Clock ready interrupt received\n");
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700256 if (!desc->priv->unvoted_flag) {
257 desc->priv->unvoted_flag = 1;
258 __pil_proxy_unvote(priv);
259 }
260
Seemanta Duttad21a7972013-03-05 12:16:17 -0800261 return IRQ_HANDLED;
262}
263
Stephen Boyd2db158c2012-07-26 21:47:17 -0700264static bool segment_is_relocatable(const struct elf32_phdr *p)
265{
266 return !!(p->p_flags & BIT(27));
267}
268
269static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
270{
271 return addr - priv->base_addr + priv->region_start;
272}
273
Stephen Boydedff6cf2012-07-11 19:39:27 -0700274static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
275 const struct elf32_phdr *phdr, int num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700277 bool reloc = segment_is_relocatable(phdr);
278 const struct pil_priv *priv = desc->priv;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700279 struct pil_seg *seg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280
Stephen Boyd2db158c2012-07-26 21:47:17 -0700281 if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700282 pil_err(desc, "kernel memory would be overwritten [%#08lx, %#08lx)\n",
Stephen Boydf79af532012-05-14 18:57:26 -0700283 (unsigned long)phdr->p_paddr,
284 (unsigned long)(phdr->p_paddr + phdr->p_memsz));
Stephen Boydedff6cf2012-07-11 19:39:27 -0700285 return ERR_PTR(-EPERM);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 }
287
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700288 if (phdr->p_filesz > phdr->p_memsz) {
289 pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
290 num, phdr->p_filesz, phdr->p_memsz);
291 return ERR_PTR(-EINVAL);
292 }
293
Stephen Boydedff6cf2012-07-11 19:39:27 -0700294 seg = kmalloc(sizeof(*seg), GFP_KERNEL);
295 if (!seg)
296 return ERR_PTR(-ENOMEM);
297 seg->num = num;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700298 seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700299 seg->filesz = phdr->p_filesz;
300 seg->sz = phdr->p_memsz;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700301 seg->relocated = reloc;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700302 INIT_LIST_HEAD(&seg->list);
303
304 return seg;
305}
306
307#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
308
309static int segment_is_loadable(const struct elf32_phdr *p)
310{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700311 return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
312 p->p_memsz;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700313}
314
Stephen Boyd3030c252012-08-08 17:24:05 -0700315static void pil_dump_segs(const struct pil_priv *priv)
316{
317 struct pil_seg *seg;
Tianyi Gou74a93992013-04-10 19:58:21 -0700318 phys_addr_t seg_h_paddr;
Stephen Boyd3030c252012-08-08 17:24:05 -0700319
320 list_for_each_entry(seg, &priv->segs, list) {
Tianyi Gou74a93992013-04-10 19:58:21 -0700321 seg_h_paddr = seg->paddr + seg->sz;
322 pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
323 &seg->paddr, &seg_h_paddr);
Stephen Boyd3030c252012-08-08 17:24:05 -0700324 }
325}
326
327/*
Stephen Boyd2db158c2012-07-26 21:47:17 -0700328 * Ensure the entry address lies within the image limits and if the image is
329 * relocatable ensure it lies within a relocatable segment.
Stephen Boyd3030c252012-08-08 17:24:05 -0700330 */
331static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
332{
333 struct pil_seg *seg;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700334 phys_addr_t entry = mdt->hdr.e_entry;
335 bool image_relocated = priv->region;
Stephen Boyd3030c252012-08-08 17:24:05 -0700336
Stephen Boyd2db158c2012-07-26 21:47:17 -0700337 if (image_relocated)
338 entry = pil_reloc(priv, entry);
339 priv->entry_addr = entry;
Stephen Boyd3030c252012-08-08 17:24:05 -0700340
341 if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
342 return 0;
343
344 list_for_each_entry(seg, &priv->segs, list) {
Stephen Boyd2db158c2012-07-26 21:47:17 -0700345 if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
346 if (!image_relocated)
347 return 0;
348 else if (seg->relocated)
349 return 0;
350 }
Stephen Boyd3030c252012-08-08 17:24:05 -0700351 }
Tianyi Gou74a93992013-04-10 19:58:21 -0700352 pil_err(priv->desc, "entry address %pa not within range\n", &entry);
Stephen Boyd3030c252012-08-08 17:24:05 -0700353 pil_dump_segs(priv);
354 return -EADDRNOTAVAIL;
355}
356
Stephen Boyd2db158c2012-07-26 21:47:17 -0700357static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
358 phys_addr_t max_addr, size_t align)
359{
360 struct ion_handle *region;
361 int ret;
362 unsigned int mask;
Stephen Boyd6385e312012-12-12 11:17:04 -0800363 size_t size = max_addr - min_addr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700364
Stephen Boyd87c7bfa2013-03-20 18:19:13 -0700365 /* Don't reallocate due to fragmentation concerns, just sanity check */
366 if (priv->region) {
367 if (WARN(priv->region_end - priv->region_start < size,
368 "Can't reuse PIL memory, too small\n"))
369 return -ENOMEM;
370 return 0;
371 }
372
Stephen Boyd2db158c2012-07-26 21:47:17 -0700373 if (!ion) {
374 WARN_ON_ONCE("No ION client, can't support relocation\n");
375 return -ENOMEM;
376 }
377
378 /* Force alignment due to linker scripts not getting it right */
379 if (align > SZ_1M) {
380 mask = ION_HEAP(ION_PIL2_HEAP_ID);
381 align = SZ_4M;
382 } else {
383 mask = ION_HEAP(ION_PIL1_HEAP_ID);
384 align = SZ_1M;
385 }
386
387 region = ion_alloc(ion, size, align, mask, 0);
388 if (IS_ERR(region)) {
389 pil_err(priv->desc, "Failed to allocate relocatable region\n");
390 return PTR_ERR(region);
391 }
392
393 ret = ion_phys(ion, region, (ion_phys_addr_t *)&priv->region_start,
394 &size);
395 if (ret) {
396 ion_free(ion, region);
397 return ret;
398 }
399
400 priv->region = region;
401 priv->region_end = priv->region_start + size;
402 priv->base_addr = min_addr;
403
404 return 0;
405}
406
Stephen Boyd379e7332012-08-08 18:04:21 -0700407static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
408{
409 const struct elf32_phdr *phdr;
Stephen Boyd2db158c2012-07-26 21:47:17 -0700410 phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
411 size_t align = 0;
412 int i, ret = 0;
413 bool relocatable = false;
Stephen Boyd379e7332012-08-08 18:04:21 -0700414
Stephen Boyd2db158c2012-07-26 21:47:17 -0700415 min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
416 max_addr_n = max_addr_r = 0;
Stephen Boyd379e7332012-08-08 18:04:21 -0700417
418 /* Find the image limits */
419 for (i = 0; i < mdt->hdr.e_phnum; i++) {
420 phdr = &mdt->phdr[i];
421 if (!segment_is_loadable(phdr))
422 continue;
423
Stephen Boyd2db158c2012-07-26 21:47:17 -0700424 start = phdr->p_paddr;
425 end = start + phdr->p_memsz;
426
427 if (segment_is_relocatable(phdr)) {
428 min_addr_r = min(min_addr_r, start);
429 max_addr_r = max(max_addr_r, end);
430 /*
431 * Lowest relocatable segment dictates alignment of
432 * relocatable region
433 */
434 if (min_addr_r == start)
435 align = phdr->p_align;
436 relocatable = true;
437 } else {
438 min_addr_n = min(min_addr_n, start);
439 max_addr_n = max(max_addr_n, end);
440 }
441
Stephen Boyd379e7332012-08-08 18:04:21 -0700442 }
443
Stephen Boyd6385e312012-12-12 11:17:04 -0800444 /*
445 * Align the max address to the next 4K boundary to satisfy iommus and
446 * XPUs that operate on 4K chunks.
447 */
448 max_addr_n = ALIGN(max_addr_n, SZ_4K);
449 max_addr_r = ALIGN(max_addr_r, SZ_4K);
450
Stephen Boyd2db158c2012-07-26 21:47:17 -0700451 if (relocatable) {
452 ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
453 } else {
454 priv->region_start = min_addr_n;
455 priv->region_end = max_addr_n;
456 priv->base_addr = min_addr_n;
457 }
Stephen Boyd379e7332012-08-08 18:04:21 -0700458
Stephen Boydb455f322012-11-27 19:00:01 -0800459 writeq(priv->region_start, &priv->info->start);
460 writel_relaxed(priv->region_end - priv->region_start,
461 &priv->info->size);
462
Stephen Boyd2db158c2012-07-26 21:47:17 -0700463 return ret;
Stephen Boyd379e7332012-08-08 18:04:21 -0700464}
465
Stephen Boydedff6cf2012-07-11 19:39:27 -0700466static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
467{
468 struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
469 struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
470
471 return seg_a->paddr - seg_b->paddr;
472}
473
474static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
475{
Stephen Boyd3030c252012-08-08 17:24:05 -0700476 struct pil_priv *priv = desc->priv;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700477 const struct elf32_phdr *phdr;
478 struct pil_seg *seg;
Stephen Boyd379e7332012-08-08 18:04:21 -0700479 int i, ret;
480
481 ret = pil_setup_region(priv, mdt);
482 if (ret)
483 return ret;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700484
Patrick Dalye2b17122013-07-24 20:25:47 -0700485 pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
486 &priv->region_end);
487
Stephen Boydedff6cf2012-07-11 19:39:27 -0700488 for (i = 0; i < mdt->hdr.e_phnum; i++) {
489 phdr = &mdt->phdr[i];
490 if (!segment_is_loadable(phdr))
491 continue;
492
493 seg = pil_init_seg(desc, phdr, i);
494 if (IS_ERR(seg))
495 return PTR_ERR(seg);
496
497 list_add_tail(&seg->list, &priv->segs);
498 }
499 list_sort(NULL, &priv->segs, pil_cmp_seg);
500
Stephen Boyd3030c252012-08-08 17:24:05 -0700501 return pil_init_entry_addr(priv, mdt);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700502}
503
504static void pil_release_mmap(struct pil_desc *desc)
505{
506 struct pil_priv *priv = desc->priv;
507 struct pil_seg *p, *tmp;
508
Stephen Boydb455f322012-11-27 19:00:01 -0800509 writeq(0, &priv->info->start);
510 writel_relaxed(0, &priv->info->size);
511
Stephen Boydedff6cf2012-07-11 19:39:27 -0700512 list_for_each_entry_safe(p, tmp, &priv->segs, list) {
513 list_del(&p->list);
514 kfree(p);
515 }
516}
517
518#define IOMAP_SIZE SZ_4M
519
520static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
521{
Tianyi Gou74a93992013-04-10 19:58:21 -0700522 int ret = 0, count;
523 phys_addr_t paddr;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700524 char fw_name[30];
Stephen Boydedff6cf2012-07-11 19:39:27 -0700525 int num = seg->num;
526
527 if (seg->filesz) {
Stephen Boyd3f4da322011-08-30 01:03:23 -0700528 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
Stephen Boyd163f1c32012-06-29 13:20:20 -0700529 desc->name, num);
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700530 ret = request_firmware_direct(fw_name, desc->dev, seg->paddr,
531 seg->filesz);
532 if (ret < 0) {
533 pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
534 fw_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 return ret;
536 }
537
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700538 if (ret != seg->filesz) {
Stephen Boydedff6cf2012-07-11 19:39:27 -0700539 pil_err(desc, "Blob size %u doesn't match %lu\n",
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700540 ret, seg->filesz);
541 return -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 }
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700543 ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 }
545
546 /* Zero out trailing memory */
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700547 paddr = seg->paddr + seg->filesz;
Stephen Boydedff6cf2012-07-11 19:39:27 -0700548 count = seg->sz - seg->filesz;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 while (count > 0) {
550 int size;
551 u8 __iomem *buf;
552
553 size = min_t(size_t, IOMAP_SIZE, count);
554 buf = ioremap(paddr, size);
555 if (!buf) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700556 pil_err(desc, "Failed to map memory\n");
Vikram Mulukutlaae533e72013-05-14 11:28:03 -0700557 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 }
559 memset(buf, 0, size);
560 iounmap(buf);
561
562 count -= size;
563 paddr += size;
564 }
565
Stephen Boyd163f1c32012-06-29 13:20:20 -0700566 if (desc->ops->verify_blob) {
Stephen Boydedff6cf2012-07-11 19:39:27 -0700567 ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
Stephen Boydb0f1f802012-02-03 11:28:08 -0800568 if (ret)
Stephen Boyd163f1c32012-06-29 13:20:20 -0700569 pil_err(desc, "Blob%u failed verification\n", num);
Stephen Boydb0f1f802012-02-03 11:28:08 -0800570 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 return ret;
573}
574
Seemanta Duttad99446c2013-06-17 18:05:18 -0700575static void pil_parse_devicetree(struct pil_desc *desc)
576{
577 int clk_ready = 0;
578
Patrick Daly7bd9e692013-07-24 19:37:27 -0700579 if (desc->ops->proxy_unvote &&
580 of_find_property(desc->dev->of_node,
Seemanta Duttad99446c2013-06-17 18:05:18 -0700581 "qcom,gpio-proxy-unvote",
582 NULL)) {
583 clk_ready = of_get_named_gpio(desc->dev->of_node,
584 "qcom,gpio-proxy-unvote", 0);
585
586 if (clk_ready < 0) {
587 dev_err(desc->dev,
588 "[%s]: Error getting proxy unvoting gpio\n",
589 desc->name);
590 return;
591 }
592
593 clk_ready = gpio_to_irq(clk_ready);
594 if (clk_ready < 0) {
595 dev_err(desc->dev,
596 "[%s]: Error getting proxy unvote IRQ\n",
597 desc->name);
598 return;
599 }
600 }
601 desc->proxy_unvote_irq = clk_ready;
602}
603
Stephen Boyd163f1c32012-06-29 13:20:20 -0700604/* Synchronize request_firmware() with suspend */
Stephen Boyd80bde032012-03-16 00:14:42 -0700605static DECLARE_RWSEM(pil_pm_rwsem);
606
Stephen Boyd163f1c32012-06-29 13:20:20 -0700607/**
608 * pil_boot() - Load a peripheral image into memory and boot it
609 * @desc: descriptor from pil_desc_init()
610 *
611 * Returns 0 on success or -ERROR on failure.
612 */
613int pil_boot(struct pil_desc *desc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614{
Stephen Boydedff6cf2012-07-11 19:39:27 -0700615 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 char fw_name[30];
Stephen Boydedff6cf2012-07-11 19:39:27 -0700617 const struct pil_mdt *mdt;
618 const struct elf32_hdr *ehdr;
619 struct pil_seg *seg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 const struct firmware *fw;
Stephen Boyd379e7332012-08-08 18:04:21 -0700621 struct pil_priv *priv = desc->priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622
Stephen Boydedff6cf2012-07-11 19:39:27 -0700623 /* Reinitialize for new image */
624 pil_release_mmap(desc);
625
Stephen Boyd80bde032012-03-16 00:14:42 -0700626 down_read(&pil_pm_rwsem);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700627 snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->name);
628 ret = request_firmware(&fw, fw_name, desc->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700630 pil_err(desc, "Failed to locate %s\n", fw_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 goto out;
632 }
633
634 if (fw->size < sizeof(*ehdr)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700635 pil_err(desc, "Not big enough to be an elf header\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 ret = -EIO;
637 goto release_fw;
638 }
639
Stephen Boydedff6cf2012-07-11 19:39:27 -0700640 mdt = (const struct pil_mdt *)fw->data;
641 ehdr = &mdt->hdr;
642
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700644 pil_err(desc, "Not an elf header\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 ret = -EIO;
646 goto release_fw;
647 }
648
649 if (ehdr->e_phnum == 0) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700650 pil_err(desc, "No loadable segments\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 ret = -EIO;
652 goto release_fw;
653 }
Stephen Boyd96a9f902011-07-18 18:43:00 -0700654 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
655 sizeof(struct elf32_hdr) > fw->size) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700656 pil_err(desc, "Program headers not within mdt\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 ret = -EIO;
658 goto release_fw;
659 }
660
Stephen Boydedff6cf2012-07-11 19:39:27 -0700661 ret = pil_init_mmap(desc, mdt);
662 if (ret)
663 goto release_fw;
664
Stephen Boyd3030c252012-08-08 17:24:05 -0700665 if (desc->ops->init_image)
666 ret = desc->ops->init_image(desc, fw->data, fw->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700668 pil_err(desc, "Invalid firmware metadata\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 goto release_fw;
670 }
671
Stephen Boyd379e7332012-08-08 18:04:21 -0700672 if (desc->ops->mem_setup)
673 ret = desc->ops->mem_setup(desc, priv->region_start,
674 priv->region_end - priv->region_start);
675 if (ret) {
676 pil_err(desc, "Memory setup error\n");
677 goto release_fw;
678 }
679
Stephen Boydedff6cf2012-07-11 19:39:27 -0700680 list_for_each_entry(seg, &desc->priv->segs, list) {
681 ret = pil_load_seg(desc, seg);
682 if (ret)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 goto release_fw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684 }
685
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700686 desc->priv->unvoted_flag = 0;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700687 ret = pil_proxy_vote(desc);
Stephen Boyd36974ec2012-03-22 01:30:59 -0700688 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700689 pil_err(desc, "Failed to proxy vote\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700690 goto release_fw;
691 }
692
Stephen Boyd163f1c32012-06-29 13:20:20 -0700693 ret = desc->ops->auth_and_reset(desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 if (ret) {
Stephen Boyd163f1c32012-06-29 13:20:20 -0700695 pil_err(desc, "Failed to bring out of reset\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700696 goto err_boot;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 }
Stephen Boyd163f1c32012-06-29 13:20:20 -0700698 pil_info(desc, "Brought out of reset\n");
Stephen Boyd36974ec2012-03-22 01:30:59 -0700699err_boot:
Seemanta Dutta78f43192013-03-04 18:29:43 -0800700 pil_proxy_unvote(desc, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701release_fw:
702 release_firmware(fw);
703out:
Stephen Boyd80bde032012-03-16 00:14:42 -0700704 up_read(&pil_pm_rwsem);
Stephen Boyd87c7bfa2013-03-20 18:19:13 -0700705 if (ret) {
706 if (priv->region) {
707 ion_free(ion, priv->region);
708 priv->region = NULL;
709 }
Stephen Boydedff6cf2012-07-11 19:39:27 -0700710 pil_release_mmap(desc);
Stephen Boyd87c7bfa2013-03-20 18:19:13 -0700711 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 return ret;
713}
Stephen Boyd163f1c32012-06-29 13:20:20 -0700714EXPORT_SYMBOL(pil_boot);
715
Stephen Boyd163f1c32012-06-29 13:20:20 -0700716/**
717 * pil_shutdown() - Shutdown a peripheral
718 * @desc: descriptor from pil_desc_init()
719 */
720void pil_shutdown(struct pil_desc *desc)
Stephen Boyd36974ec2012-03-22 01:30:59 -0700721{
Stephen Boyd163f1c32012-06-29 13:20:20 -0700722 struct pil_priv *priv = desc->priv;
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700723
Matt Wagantall19b48592013-02-27 10:18:41 -0800724 if (desc->ops->shutdown)
725 desc->ops->shutdown(desc);
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700726
727 if (desc->proxy_unvote_irq) {
728 disable_irq(desc->proxy_unvote_irq);
729 if (!desc->priv->unvoted_flag)
730 pil_proxy_unvote(desc, 1);
731 return;
732 }
733
734 if (!proxy_timeout_ms)
735 pil_proxy_unvote(desc, 1);
Matt Wagantall0aafa352012-05-14 16:40:41 -0700736 else
Stephen Boyd163f1c32012-06-29 13:20:20 -0700737 flush_delayed_work(&priv->proxy);
738}
739EXPORT_SYMBOL(pil_shutdown);
Matt Wagantall0aafa352012-05-14 16:40:41 -0700740
Stephen Boydb455f322012-11-27 19:00:01 -0800741static DEFINE_IDA(pil_ida);
742
Stephen Boyd163f1c32012-06-29 13:20:20 -0700743/**
744 * pil_desc_init() - Initialize a pil descriptor
745 * @desc: descriptor to intialize
746 *
747 * Initialize a pil descriptor for use by other pil functions. This function
748 * must be called before calling pil_boot() or pil_shutdown().
749 *
750 * Returns 0 for success and -ERROR on failure.
751 */
752int pil_desc_init(struct pil_desc *desc)
753{
754 struct pil_priv *priv;
Seemanta Duttad21a7972013-03-05 12:16:17 -0800755 int ret;
Stephen Boydb455f322012-11-27 19:00:01 -0800756 void __iomem *addr;
Stephen Boyd06ce3962013-01-02 15:03:14 -0800757 char buf[sizeof(priv->info->name)];
Stephen Boyd163f1c32012-06-29 13:20:20 -0700758
Stephen Boyd163f1c32012-06-29 13:20:20 -0700759 if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
760 "Invalid proxy voting. Ignoring\n"))
761 ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
762
763 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
764 if (!priv)
765 return -ENOMEM;
766 desc->priv = priv;
767 priv->desc = desc;
768
Seemanta Duttad21a7972013-03-05 12:16:17 -0800769 priv->id = ret = ida_simple_get(&pil_ida, 0, 10, GFP_KERNEL);
770 if (priv->id < 0)
771 goto err;
772
773 addr = PIL_IMAGE_INFO_BASE + sizeof(struct pil_image_info) * priv->id;
Stephen Boydb455f322012-11-27 19:00:01 -0800774 priv->info = (struct pil_image_info __iomem *)addr;
775
Stephen Boyd06ce3962013-01-02 15:03:14 -0800776 strncpy(buf, desc->name, sizeof(buf));
777 __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
Stephen Boydb455f322012-11-27 19:00:01 -0800778
Seemanta Duttad99446c2013-06-17 18:05:18 -0700779 pil_parse_devicetree(desc);
780
781 /* Ignore users who don't make any sense */
782 WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
783 && !desc->proxy_timeout,
784 "Invalid proxy unvote callback or a proxy timeout of 0"
785 " was specified or no proxy unvote IRQ was specified.\n");
786
Patrick Daly7bd9e692013-07-24 19:37:27 -0700787 if (desc->proxy_unvote_irq) {
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700788 ret = request_threaded_irq(desc->proxy_unvote_irq,
789 NULL,
Seemanta Duttad21a7972013-03-05 12:16:17 -0800790 proxy_unvote_intr_handler,
Seemanta Duttae3161fa2013-04-09 17:27:17 -0700791 IRQF_TRIGGER_RISING,
Seemanta Duttad21a7972013-03-05 12:16:17 -0800792 desc->name, desc);
793 if (ret < 0) {
794 dev_err(desc->dev,
795 "Unable to request proxy unvote IRQ: %d\n",
796 ret);
797 goto err;
798 }
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700799 disable_irq(desc->proxy_unvote_irq);
Seemanta Duttad21a7972013-03-05 12:16:17 -0800800 }
801
Stephen Boyd163f1c32012-06-29 13:20:20 -0700802 snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
803 wake_lock_init(&priv->wlock, WAKE_LOCK_SUSPEND, priv->wname);
Seemanta Dutta9907e2a2013-05-16 19:57:09 -0700804 INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
Stephen Boydedff6cf2012-07-11 19:39:27 -0700805 INIT_LIST_HEAD(&priv->segs);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700806
807 return 0;
Seemanta Duttad21a7972013-03-05 12:16:17 -0800808err:
809 kfree(priv);
810 return ret;
Stephen Boyd163f1c32012-06-29 13:20:20 -0700811}
812EXPORT_SYMBOL(pil_desc_init);
813
814/**
815 * pil_desc_release() - Release a pil descriptor
816 * @desc: descriptor to free
817 */
818void pil_desc_release(struct pil_desc *desc)
819{
820 struct pil_priv *priv = desc->priv;
821
822 if (priv) {
Stephen Boydb455f322012-11-27 19:00:01 -0800823 ida_simple_remove(&pil_ida, priv->id);
Stephen Boyd163f1c32012-06-29 13:20:20 -0700824 flush_delayed_work(&priv->proxy);
825 wake_lock_destroy(&priv->wlock);
826 }
827 desc->priv = NULL;
828 kfree(priv);
829}
830EXPORT_SYMBOL(pil_desc_release);
831
Stephen Boyd80bde032012-03-16 00:14:42 -0700832static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
833{
834 switch (event) {
835 case PM_SUSPEND_PREPARE:
836 down_write(&pil_pm_rwsem);
837 break;
838 case PM_POST_SUSPEND:
839 up_write(&pil_pm_rwsem);
840 break;
841 }
842 return NOTIFY_DONE;
843}
844
845static struct notifier_block pil_pm_notifier = {
846 .notifier_call = pil_pm_notify,
847};
848
Stephen Boyd6d67d252011-09-27 11:50:05 -0700849static int __init msm_pil_init(void)
850{
Stephen Boyd2db158c2012-07-26 21:47:17 -0700851 ion = msm_ion_client_create(UINT_MAX, "pil");
852 if (IS_ERR(ion)) /* Can't support relocatable images */
853 ion = NULL;
Stephen Boyd04148122012-06-29 18:18:12 -0700854 return register_pm_notifier(&pil_pm_notifier);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700855}
Stephen Boyd2db158c2012-07-26 21:47:17 -0700856device_initcall(msm_pil_init);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700857
858static void __exit msm_pil_exit(void)
859{
Stephen Boyd80bde032012-03-16 00:14:42 -0700860 unregister_pm_notifier(&pil_pm_notifier);
Stephen Boyd2db158c2012-07-26 21:47:17 -0700861 if (ion)
862 ion_client_destroy(ion);
Stephen Boyd6d67d252011-09-27 11:50:05 -0700863}
864module_exit(msm_pil_exit);
865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866MODULE_LICENSE("GPL v2");
867MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");