blob: c5bfec0935795702f712673aa3b351a588e0a2f9 [file] [log] [blame]
Sanjay Dwivedie7b2e792020-01-21 16:09:02 +05301/* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
Kyle Yane45fa022016-08-29 11:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/firmware.h>
16#include <linux/io.h>
17#include <linux/elf.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/slab.h>
21#include <linux/suspend.h>
22#include <linux/rwsem.h>
23#include <linux/sysfs.h>
24#include <linux/workqueue.h>
25#include <linux/jiffies.h>
26#include <linux/err.h>
27#include <linux/list.h>
28#include <linux/list_sort.h>
29#include <linux/idr.h>
30#include <linux/interrupt.h>
31#include <linux/of_gpio.h>
32#include <linux/of_address.h>
33#include <linux/io.h>
34#include <linux/dma-mapping.h>
35#include <soc/qcom/ramdump.h>
36#include <soc/qcom/subsystem_restart.h>
37#include <soc/qcom/secure_buffer.h>
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +053038#include <soc/qcom/smem.h>
Sanjay Dwivedia64142e2019-07-23 13:59:33 +053039#include <linux/kthread.h>
Kyle Yane45fa022016-08-29 11:40:26 -070040
41#include <linux/uaccess.h>
42#include <asm/setup.h>
Gaurav Kohli2da45012017-05-08 15:21:43 +053043#define CREATE_TRACE_POINTS
44#include <trace/events/trace_msm_pil_event.h>
Kyle Yane45fa022016-08-29 11:40:26 -070045
46#include "peripheral-loader.h"
Sanjay Dwivedie7b2e792020-01-21 16:09:02 +053047#include <soc/qcom/boot_stats.h>
Kyle Yane45fa022016-08-29 11:40:26 -070048
49#define pil_err(desc, fmt, ...) \
50 dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
51#define pil_info(desc, fmt, ...) \
52 dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
53
54#if defined(CONFIG_ARM)
55#define pil_memset_io(d, c, count) memset(d, c, count)
56#else
57#define pil_memset_io(d, c, count) memset_io(d, c, count)
58#endif
59
60#define PIL_NUM_DESC 10
Kyle Yan0ceeb642017-09-13 11:21:35 -070061#define MAX_LEN 96
Kyle Yane45fa022016-08-29 11:40:26 -070062static void __iomem *pil_info_base;
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +053063static struct md_global_toc *g_md_toc;
Kyle Yane45fa022016-08-29 11:40:26 -070064
65/**
66 * proxy_timeout - Override for proxy vote timeouts
67 * -1: Use driver-specified timeout
68 * 0: Hold proxy votes until shutdown
69 * >0: Specify a custom timeout in ms
70 */
71static int proxy_timeout_ms = -1;
72module_param(proxy_timeout_ms, int, 0644);
73
74static bool disable_timeouts;
Sanjay Dwivedia64142e2019-07-23 13:59:33 +053075
76static struct workqueue_struct *pil_wq;
77
Kyle Yane45fa022016-08-29 11:40:26 -070078/**
79 * struct pil_mdt - Representation of <name>.mdt file in memory
80 * @hdr: ELF32 header
81 * @phdr: ELF32 program headers
82 */
83struct pil_mdt {
84 struct elf32_hdr hdr;
85 struct elf32_phdr phdr[];
86};
87
88/**
89 * struct pil_seg - memory map representing one segment
90 * @next: points to next seg mentor NULL if last segment
91 * @paddr: physical start address of segment
92 * @sz: size of segment
93 * @filesz: size of segment on disk
94 * @num: segment number
95 * @relocated: true if segment is relocated, false otherwise
96 *
97 * Loosely based on an elf program header. Contains all necessary information
98 * to load and initialize a segment of the image in memory.
99 */
100struct pil_seg {
101 phys_addr_t paddr;
102 unsigned long sz;
103 unsigned long filesz;
104 int num;
105 struct list_head list;
106 bool relocated;
107};
108
109/**
110 * struct pil_priv - Private state for a pil_desc
111 * @proxy: work item used to run the proxy unvoting routine
112 * @ws: wakeup source to prevent suspend during pil_boot
113 * @wname: name of @ws
114 * @desc: pointer to pil_desc this is private data for
115 * @seg: list of segments sorted by physical address
116 * @entry_addr: physical address where processor starts booting at
117 * @base_addr: smallest start address among all segments that are relocatable
118 * @region_start: address where relocatable region starts or lowest address
119 * for non-relocatable images
120 * @region_end: address where relocatable region ends or highest address for
121 * non-relocatable images
122 * @region: region allocated for relocatable images
123 * @unvoted_flag: flag to keep track if we have unvoted or not.
124 *
125 * This struct contains data for a pil_desc that should not be exposed outside
126 * of this file. This structure points to the descriptor and the descriptor
127 * points to this structure so that PIL drivers can't access the private
128 * data of a descriptor but this file can access both.
129 */
130struct pil_priv {
131 struct delayed_work proxy;
132 struct wakeup_source ws;
133 char wname[32];
134 struct pil_desc *desc;
Sanjay Dwivedia64142e2019-07-23 13:59:33 +0530135 int num_segs;
Kyle Yane45fa022016-08-29 11:40:26 -0700136 struct list_head segs;
137 phys_addr_t entry_addr;
138 phys_addr_t base_addr;
139 phys_addr_t region_start;
140 phys_addr_t region_end;
141 void *region;
142 struct pil_image_info __iomem *info;
143 int id;
144 int unvoted_flag;
145 size_t region_size;
146};
147
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530148static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
149{
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530150 struct md_ss_region __iomem *region_info_ss;
151 struct md_ss_region __iomem *region_info_pdr;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530152 struct ramdump_segment *ramdump_segs, *s;
153 struct pil_priv *priv = desc->priv;
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530154 void __iomem *subsys_segtable_base_ss;
155 void __iomem *subsys_segtable_base_pdr;
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530156 u64 ss_region_ptr = 0;
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530157 void __iomem *offset_ss;
158 void __iomem *offset_pdr;
159 int ss_mdump_seg_cnt_ss = 0, ss_mdump_seg_cnt_pdr = 0, total_segs;
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530160 int ss_valid_seg_cnt;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530161 int ret, i;
162
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530163 if (!ramdump_dev)
164 return -ENODEV;
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530165
166 ss_region_ptr = desc->minidump_ss->md_ss_smem_regions_baseptr;
167 ss_mdump_seg_cnt_ss = desc->minidump_ss->ss_region_count;
168 subsys_segtable_base_ss =
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530169 ioremap((unsigned long)ss_region_ptr,
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530170 ss_mdump_seg_cnt_ss * sizeof(struct md_ss_region));
171 region_info_ss =
172 (struct md_ss_region __iomem *)subsys_segtable_base_ss;
173 if (!region_info_ss)
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530174 return -EINVAL;
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530175 pr_info("Minidump : SS Segments in minidump 0x%x\n",
176 ss_mdump_seg_cnt_ss);
177
178 if (desc->minidump_pdr &&
179 (desc->minidump_pdr->md_ss_enable_status == MD_SS_ENABLED)) {
180 ss_region_ptr = desc->minidump_pdr->md_ss_smem_regions_baseptr;
181 ss_mdump_seg_cnt_pdr = desc->minidump_pdr->ss_region_count;
182 subsys_segtable_base_pdr =
183 ioremap((unsigned long)ss_region_ptr,
184 ss_mdump_seg_cnt_pdr * sizeof(struct md_ss_region));
185 region_info_pdr =
186 (struct md_ss_region __iomem *)subsys_segtable_base_pdr;
187 if (!region_info_pdr)
188 return -EINVAL;
189 pr_info("Minidump : PDR Segments in minidump 0x%x\n",
190 ss_mdump_seg_cnt_pdr);
191 }
192 total_segs = ss_mdump_seg_cnt_ss + ss_mdump_seg_cnt_pdr;
193 ramdump_segs = kcalloc(total_segs,
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530194 sizeof(*ramdump_segs), GFP_KERNEL);
195 if (!ramdump_segs)
196 return -ENOMEM;
197
198 if (desc->subsys_vmid > 0)
199 ret = pil_assign_mem_to_linux(desc, priv->region_start,
200 (priv->region_end - priv->region_start));
201
202 s = ramdump_segs;
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530203 ss_valid_seg_cnt = total_segs;
204 for (i = 0; i < ss_mdump_seg_cnt_ss; i++) {
205 memcpy(&offset_ss, &region_info_ss, sizeof(region_info_ss));
206 offset_ss = offset_ss + sizeof(region_info_ss->name) +
207 sizeof(region_info_ss->seq_num);
208 if (__raw_readl(offset_ss) == MD_REGION_VALID) {
209 memcpy(&s->name, &region_info_ss,
210 sizeof(region_info_ss));
211 offset_ss = offset_ss +
212 sizeof(region_info_ss->md_valid);
213 s->address = __raw_readl(offset_ss);
214 offset_ss = offset_ss +
215 sizeof(region_info_ss->region_base_address);
216 s->size = __raw_readl(offset_ss);
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530217 pr_info("Minidump : Dumping segment %s with address 0x%lx and size 0x%x\n",
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530218 s->name, s->address, (unsigned int)s->size);
219 } else
220 ss_valid_seg_cnt--;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530221 s++;
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530222 region_info_ss++;
223 }
224
225 for (i = 0; i < ss_mdump_seg_cnt_pdr; i++) {
226 memcpy(&offset_pdr, &region_info_pdr, sizeof(region_info_pdr));
227 offset_pdr = offset_pdr + sizeof(region_info_pdr->name) +
228 sizeof(region_info_pdr->seq_num);
229 if (__raw_readl(offset_pdr) == MD_REGION_VALID) {
230 memcpy(&s->name, &region_info_pdr,
231 sizeof(region_info_pdr));
232 offset_pdr = offset_pdr +
233 sizeof(region_info_pdr->md_valid);
234 s->address = __raw_readl(offset_pdr);
235 offset_pdr = offset_pdr +
236 sizeof(region_info_pdr->region_base_address);
237 s->size = __raw_readl(offset_pdr);
238 pr_info("Minidump : Dumping segment %s with address 0x%lx and size 0x%x\n",
239 s->name, s->address, (unsigned int)s->size);
240 } else
241 ss_valid_seg_cnt--;
242 s++;
243 region_info_pdr++;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530244 }
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530245 ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530246 kfree(ramdump_segs);
247 if (ret)
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530248 pil_err(desc, "%s: Minidump collection failed for subsys %s rc:%d\n",
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530249 __func__, desc->name, ret);
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +0530250
251 if (desc->subsys_vmid > 0)
252 ret = pil_assign_mem_to_subsys(desc, priv->region_start,
253 (priv->region_end - priv->region_start));
254 return ret;
255}
256
Kyle Yane45fa022016-08-29 11:40:26 -0700257/**
258 * pil_do_ramdump() - Ramdump an image
259 * @desc: descriptor from pil_desc_init()
260 * @ramdump_dev: ramdump device returned from create_ramdump_device()
261 *
262 * Calls the ramdump API with a list of segments generated from the addresses
263 * that the descriptor corresponds to.
264 */
Avaneesh Kumar Dwivedi5418d392017-10-04 19:45:31 +0530265int pil_do_ramdump(struct pil_desc *desc,
266 void *ramdump_dev, void *minidump_dev)
Kyle Yane45fa022016-08-29 11:40:26 -0700267{
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530268 struct ramdump_segment *ramdump_segs, *s;
Kyle Yane45fa022016-08-29 11:40:26 -0700269 struct pil_priv *priv = desc->priv;
270 struct pil_seg *seg;
271 int count = 0, ret;
Avaneesh Kumar Dwivedic1e552d2018-05-11 16:57:50 +0530272 u32 encryption_status = 0;
Kyle Yane45fa022016-08-29 11:40:26 -0700273
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530274 if (desc->minidump_ss) {
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530275 pr_info("Minidump : md_ss_toc->md_ss_toc_init is 0x%x\n",
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530276 (unsigned int)desc->minidump_ss->md_ss_toc_init);
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530277 pr_info("Minidump : md_ss_toc->md_ss_enable_status is 0x%x\n",
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530278 (unsigned int)desc->minidump_ss->md_ss_enable_status);
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530279 pr_info("Minidump : md_ss_toc->encryption_status is 0x%x\n",
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530280 (unsigned int)desc->minidump_ss->encryption_status);
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530281 pr_info("Minidump : md_ss_toc->ss_region_count is 0x%x\n",
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530282 (unsigned int)desc->minidump_ss->ss_region_count);
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530283 pr_info("Minidump : md_ss_toc->md_ss_smem_regions_baseptr is 0x%x\n",
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530284 (unsigned int)
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530285 desc->minidump_ss->md_ss_smem_regions_baseptr);
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530286 /**
287 * Collect minidump if SS ToC is valid and segment table
288 * is initialized in memory and encryption status is set.
289 */
Avaneesh Kumar Dwivedic1e552d2018-05-11 16:57:50 +0530290 encryption_status = desc->minidump_ss->encryption_status;
291
Jitendra Sharma73e4f202018-03-19 14:57:46 +0530292 if ((desc->minidump_ss->md_ss_smem_regions_baseptr != 0) &&
293 (desc->minidump_ss->md_ss_toc_init == true) &&
294 (desc->minidump_ss->md_ss_enable_status ==
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530295 MD_SS_ENABLED)) {
Avaneesh Kumar Dwivedic1e552d2018-05-11 16:57:50 +0530296 if (encryption_status == MD_SS_ENCR_DONE ||
297 encryption_status == MD_SS_ENCR_NOTREQ) {
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530298 pr_info("Minidump : Dumping for %s\n",
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530299 desc->name);
Avaneesh Kumar Dwivedi5418d392017-10-04 19:45:31 +0530300 return pil_do_minidump(desc, minidump_dev);
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530301 }
Jitendra Sharma3c319f72018-01-25 15:27:46 +0530302 pr_info("Minidump : aborted for %s\n", desc->name);
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +0530303 return -EINVAL;
304 }
305 }
306 pr_debug("Continuing with full SSR dump for %s\n", desc->name);
Kyle Yane45fa022016-08-29 11:40:26 -0700307 list_for_each_entry(seg, &priv->segs, list)
308 count++;
309
310 ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
311 if (!ramdump_segs)
312 return -ENOMEM;
313
314 if (desc->subsys_vmid > 0)
315 ret = pil_assign_mem_to_linux(desc, priv->region_start,
316 (priv->region_end - priv->region_start));
317
318 s = ramdump_segs;
319 list_for_each_entry(seg, &priv->segs, list) {
320 s->address = seg->paddr;
321 s->size = seg->sz;
322 s++;
323 }
324
325 ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
326 kfree(ramdump_segs);
327
Puja Gupta41ff9242017-02-02 13:41:04 -0800328 if (ret)
329 pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
330 __func__, desc->name, ret);
331
332 if (desc->subsys_vmid > 0)
Kyle Yane45fa022016-08-29 11:40:26 -0700333 ret = pil_assign_mem_to_subsys(desc, priv->region_start,
334 (priv->region_end - priv->region_start));
335
336 return ret;
337}
338EXPORT_SYMBOL(pil_do_ramdump);
339
340int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
341 size_t size)
342{
343 int ret;
344 int srcVM[1] = {VMID_HLOS};
345 int destVM[1] = {desc->subsys_vmid};
346 int destVMperm[1] = {PERM_READ | PERM_WRITE};
347
348 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
349 if (ret)
350 pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
351 __func__, &addr, size, desc->subsys_vmid, ret);
352 return ret;
353}
354EXPORT_SYMBOL(pil_assign_mem_to_subsys);
355
356int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
357 size_t size)
358{
359 int ret;
360 int srcVM[1] = {desc->subsys_vmid};
361 int destVM[1] = {VMID_HLOS};
362 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
363
364 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
365 if (ret)
366 panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
367 __func__, &addr, size, desc->subsys_vmid, ret);
368
369 return ret;
370}
371EXPORT_SYMBOL(pil_assign_mem_to_linux);
372
373int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
374 phys_addr_t addr, size_t size)
375{
376 int ret;
377 int srcVM[1] = {VMID_HLOS};
378 int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
379 int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
380
381 ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
382 if (ret)
383 pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
384 __func__, &addr, size, desc->subsys_vmid, ret);
385
386 return ret;
387}
388EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
389
390int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
391 int VMid)
392{
393 int ret;
394 int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
395 int destVM[1] = {VMid};
396 int destVMperm[1] = {PERM_READ | PERM_WRITE};
397
398 if (VMid == VMID_HLOS)
399 destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
400
401 ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
402 if (ret)
403 panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
404 __func__, &addr, size, desc->subsys_vmid);
405
406 return ret;
407}
408EXPORT_SYMBOL(pil_reclaim_mem);
409
410/**
411 * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
412 * @desc: descriptor from pil_desc_init()
413 *
414 * Returns the physical address where the image boots at or 0 if unknown.
415 */
416phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
417{
418 return desc->priv ? desc->priv->entry_addr : 0;
419}
420EXPORT_SYMBOL(pil_get_entry_addr);
421
422static void __pil_proxy_unvote(struct pil_priv *priv)
423{
424 struct pil_desc *desc = priv->desc;
425
426 desc->ops->proxy_unvote(desc);
427 notify_proxy_unvote(desc->dev);
428 __pm_relax(&priv->ws);
429 module_put(desc->owner);
430
431}
432
433static void pil_proxy_unvote_work(struct work_struct *work)
434{
435 struct delayed_work *delayed = to_delayed_work(work);
436 struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
437
438 __pil_proxy_unvote(priv);
439}
440
441static int pil_proxy_vote(struct pil_desc *desc)
442{
443 int ret = 0;
444 struct pil_priv *priv = desc->priv;
445
446 if (desc->ops->proxy_vote) {
447 __pm_stay_awake(&priv->ws);
448 ret = desc->ops->proxy_vote(desc);
449 if (ret)
450 __pm_relax(&priv->ws);
451 }
452
453 if (desc->proxy_unvote_irq)
454 enable_irq(desc->proxy_unvote_irq);
455 notify_proxy_vote(desc->dev);
456
457 return ret;
458}
459
460static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
461{
462 struct pil_priv *priv = desc->priv;
463 unsigned long timeout;
464
465 if (proxy_timeout_ms == 0 && !immediate)
466 return;
467 else if (proxy_timeout_ms > 0)
468 timeout = proxy_timeout_ms;
469 else
470 timeout = desc->proxy_timeout;
471
472 if (desc->ops->proxy_unvote) {
473 if (WARN_ON(!try_module_get(desc->owner)))
474 return;
475
476 if (immediate)
477 timeout = 0;
478
479 if (!desc->proxy_unvote_irq || immediate)
480 schedule_delayed_work(&priv->proxy,
481 msecs_to_jiffies(timeout));
482 }
483}
484
485static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
486{
487 struct pil_desc *desc = dev_id;
488 struct pil_priv *priv = desc->priv;
489
490 pil_info(desc, "Power/Clock ready interrupt received\n");
491 if (!desc->priv->unvoted_flag) {
492 desc->priv->unvoted_flag = 1;
493 __pil_proxy_unvote(priv);
494 }
495
496 return IRQ_HANDLED;
497}
498
499static bool segment_is_relocatable(const struct elf32_phdr *p)
500{
501 return !!(p->p_flags & BIT(27));
502}
503
504static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
505{
506 return addr - priv->base_addr + priv->region_start;
507}
508
509static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
510 const struct elf32_phdr *phdr, int num)
511{
512 bool reloc = segment_is_relocatable(phdr);
513 const struct pil_priv *priv = desc->priv;
514 struct pil_seg *seg;
515
516 if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
517 pil_err(desc, "Segment not relocatable,kernel memory would be overwritten[%#08lx, %#08lx)\n",
518 (unsigned long)phdr->p_paddr,
519 (unsigned long)(phdr->p_paddr + phdr->p_memsz));
520 return ERR_PTR(-EPERM);
521 }
522
523 if (phdr->p_filesz > phdr->p_memsz) {
524 pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
525 num, phdr->p_filesz, phdr->p_memsz);
526 return ERR_PTR(-EINVAL);
527 }
528
529 seg = kmalloc(sizeof(*seg), GFP_KERNEL);
530 if (!seg)
531 return ERR_PTR(-ENOMEM);
532 seg->num = num;
533 seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
534 seg->filesz = phdr->p_filesz;
535 seg->sz = phdr->p_memsz;
536 seg->relocated = reloc;
537 INIT_LIST_HEAD(&seg->list);
538
539 return seg;
540}
541
542#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
543
544static int segment_is_loadable(const struct elf32_phdr *p)
545{
546 return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
547 p->p_memsz;
548}
549
550static void pil_dump_segs(const struct pil_priv *priv)
551{
552 struct pil_seg *seg;
553 phys_addr_t seg_h_paddr;
554
555 list_for_each_entry(seg, &priv->segs, list) {
556 seg_h_paddr = seg->paddr + seg->sz;
557 pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
558 &seg->paddr, &seg_h_paddr);
559 }
560}
561
562/*
563 * Ensure the entry address lies within the image limits and if the image is
564 * relocatable ensure it lies within a relocatable segment.
565 */
566static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
567{
568 struct pil_seg *seg;
569 phys_addr_t entry = mdt->hdr.e_entry;
570 bool image_relocated = priv->region;
571
572 if (image_relocated)
573 entry = pil_reloc(priv, entry);
574 priv->entry_addr = entry;
575
576 if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
577 return 0;
578
579 list_for_each_entry(seg, &priv->segs, list) {
580 if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
581 if (!image_relocated)
582 return 0;
583 else if (seg->relocated)
584 return 0;
585 }
586 }
587 pil_err(priv->desc, "entry address %pa not within range\n", &entry);
588 pil_dump_segs(priv);
589 return -EADDRNOTAVAIL;
590}
591
592static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
Kyle Yan95180af2017-12-01 17:05:59 -0800593 phys_addr_t max_addr, size_t align,
594 size_t mdt_size)
Kyle Yane45fa022016-08-29 11:40:26 -0700595{
596 void *region;
597 size_t size = max_addr - min_addr;
Kyle Yan95180af2017-12-01 17:05:59 -0800598 size_t aligned_size = max(size, mdt_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700599
600 /* Don't reallocate due to fragmentation concerns, just sanity check */
601 if (priv->region) {
602 if (WARN(priv->region_end - priv->region_start < size,
603 "Can't reuse PIL memory, too small\n"))
604 return -ENOMEM;
605 return 0;
606 }
607
608 if (align > SZ_4M)
Kyle Yan95180af2017-12-01 17:05:59 -0800609 aligned_size = ALIGN(aligned_size, SZ_4M);
Runmin Wang3f19a282017-12-01 15:26:07 -0800610 else if (align > SZ_1M)
Kyle Yan95180af2017-12-01 17:05:59 -0800611 aligned_size = ALIGN(aligned_size, SZ_1M);
Runmin Wang3f19a282017-12-01 15:26:07 -0800612 else
613 aligned_size = ALIGN(aligned_size, SZ_4K);
Kyle Yane45fa022016-08-29 11:40:26 -0700614
615 priv->desc->attrs = 0;
616 priv->desc->attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
617
618 region = dma_alloc_attrs(priv->desc->dev, aligned_size,
619 &priv->region_start, GFP_KERNEL,
620 priv->desc->attrs);
621
622 if (region == NULL) {
623 pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
624 size);
Gaurav Kohli38b9bad2017-03-25 17:36:30 +0530625 priv->region_start = 0;
626 priv->region_end = 0;
Kyle Yane45fa022016-08-29 11:40:26 -0700627 return -ENOMEM;
628 }
629
630 priv->region = region;
631 priv->region_end = priv->region_start + size;
632 priv->base_addr = min_addr;
633 priv->region_size = aligned_size;
634
635 return 0;
636}
637
Kyle Yan95180af2017-12-01 17:05:59 -0800638static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt,
639 size_t mdt_size)
Kyle Yane45fa022016-08-29 11:40:26 -0700640{
641 const struct elf32_phdr *phdr;
642 phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
643 size_t align = 0;
644 int i, ret = 0;
645 bool relocatable = false;
646
647 min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
648 max_addr_n = max_addr_r = 0;
649
650 /* Find the image limits */
651 for (i = 0; i < mdt->hdr.e_phnum; i++) {
652 phdr = &mdt->phdr[i];
653 if (!segment_is_loadable(phdr))
654 continue;
655
656 start = phdr->p_paddr;
657 end = start + phdr->p_memsz;
658
659 if (segment_is_relocatable(phdr)) {
660 min_addr_r = min(min_addr_r, start);
661 max_addr_r = max(max_addr_r, end);
662 /*
663 * Lowest relocatable segment dictates alignment of
664 * relocatable region
665 */
666 if (min_addr_r == start)
667 align = phdr->p_align;
668 relocatable = true;
669 } else {
670 min_addr_n = min(min_addr_n, start);
671 max_addr_n = max(max_addr_n, end);
672 }
673
674 }
675
676 /*
677 * Align the max address to the next 4K boundary to satisfy iommus and
678 * XPUs that operate on 4K chunks.
679 */
680 max_addr_n = ALIGN(max_addr_n, SZ_4K);
681 max_addr_r = ALIGN(max_addr_r, SZ_4K);
682
683 if (relocatable) {
Kyle Yan95180af2017-12-01 17:05:59 -0800684 ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align,
685 mdt_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700686 } else {
687 priv->region_start = min_addr_n;
688 priv->region_end = max_addr_n;
689 priv->base_addr = min_addr_n;
690 }
691
692 if (priv->info) {
693 __iowrite32_copy(&priv->info->start, &priv->region_start,
694 sizeof(priv->region_start) / 4);
695 writel_relaxed(priv->region_end - priv->region_start,
696 &priv->info->size);
697 }
698
699 return ret;
700}
701
702static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
703{
704 int ret = 0;
705 struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
706 struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
707
708 if (seg_a->paddr < seg_b->paddr)
709 ret = -1;
710 else if (seg_a->paddr > seg_b->paddr)
711 ret = 1;
712
713 return ret;
714}
715
Kyle Yan95180af2017-12-01 17:05:59 -0800716static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt,
717 size_t mdt_size)
Kyle Yane45fa022016-08-29 11:40:26 -0700718{
719 struct pil_priv *priv = desc->priv;
720 const struct elf32_phdr *phdr;
721 struct pil_seg *seg;
722 int i, ret;
723
Kyle Yan95180af2017-12-01 17:05:59 -0800724 ret = pil_setup_region(priv, mdt, mdt_size);
Kyle Yane45fa022016-08-29 11:40:26 -0700725 if (ret)
726 return ret;
727
Sanjay Dwivedie7b2e792020-01-21 16:09:02 +0530728 place_marker("M - Modem Image Start Loading");
Kyle Yane45fa022016-08-29 11:40:26 -0700729 pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
730 &priv->region_end);
731
Sanjay Dwivedia64142e2019-07-23 13:59:33 +0530732 priv->num_segs = 0;
Kyle Yane45fa022016-08-29 11:40:26 -0700733 for (i = 0; i < mdt->hdr.e_phnum; i++) {
734 phdr = &mdt->phdr[i];
735 if (!segment_is_loadable(phdr))
736 continue;
737
738 seg = pil_init_seg(desc, phdr, i);
739 if (IS_ERR(seg))
740 return PTR_ERR(seg);
741
742 list_add_tail(&seg->list, &priv->segs);
Sanjay Dwivedia64142e2019-07-23 13:59:33 +0530743 priv->num_segs++;
Kyle Yane45fa022016-08-29 11:40:26 -0700744 }
745 list_sort(NULL, &priv->segs, pil_cmp_seg);
746
747 return pil_init_entry_addr(priv, mdt);
748}
749
Puja Gupta7c187e82017-02-06 14:33:19 -0800750struct pil_map_fw_info {
751 void *region;
752 unsigned long attrs;
753 phys_addr_t base_addr;
754 struct device *dev;
755};
756
Kyle Yane45fa022016-08-29 11:40:26 -0700757static void pil_release_mmap(struct pil_desc *desc)
758{
759 struct pil_priv *priv = desc->priv;
760 struct pil_seg *p, *tmp;
761 u64 zero = 0ULL;
Gaurav Kohliebb42832017-02-21 12:08:50 +0530762
763 if (priv->info) {
764 __iowrite32_copy(&priv->info->start, &zero,
765 sizeof(zero) / 4);
766 writel_relaxed(0, &priv->info->size);
767 }
768
769 list_for_each_entry_safe(p, tmp, &priv->segs, list) {
770 list_del(&p->list);
771 kfree(p);
772 }
773}
774
775static void pil_clear_segment(struct pil_desc *desc)
776{
777 struct pil_priv *priv = desc->priv;
Puja Gupta7c187e82017-02-06 14:33:19 -0800778 u8 __iomem *buf;
779
780 struct pil_map_fw_info map_fw_info = {
781 .attrs = desc->attrs,
782 .region = priv->region,
783 .base_addr = priv->region_start,
784 .dev = desc->dev,
785 };
786
787 void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
788
789 /* Clear memory so that unauthorized ELF code is not left behind */
790 buf = desc->map_fw_mem(priv->region_start, (priv->region_end -
791 priv->region_start), map_data);
Swetha Chikkaboraiahb14f1bf2017-09-20 12:42:45 +0530792
793 if (!buf) {
794 pil_err(desc, "Failed to map memory\n");
795 return;
796 }
797
Puja Gupta7c187e82017-02-06 14:33:19 -0800798 pil_memset_io(buf, 0, (priv->region_end - priv->region_start));
799 desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
800 map_data);
Kyle Yane45fa022016-08-29 11:40:26 -0700801
Kyle Yane45fa022016-08-29 11:40:26 -0700802}
803
804#define IOMAP_SIZE SZ_1M
805
Kyle Yane45fa022016-08-29 11:40:26 -0700806static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
807{
808 struct pil_map_fw_info *info = data;
809
810 return dma_remap(info->dev, info->region, paddr, size,
811 info->attrs);
812}
813
814static void unmap_fw_mem(void *vaddr, size_t size, void *data)
815{
816 struct pil_map_fw_info *info = data;
817
818 dma_unremap(info->dev, vaddr, size);
819}
820
821static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
822{
823 int ret = 0, count;
824 phys_addr_t paddr;
825 char fw_name[30];
826 int num = seg->num;
827 const struct firmware *fw = NULL;
Kyle Yan88156d12017-01-05 15:12:45 -0800828 void __iomem *firmware_buf;
Kyle Yane45fa022016-08-29 11:40:26 -0700829 struct pil_map_fw_info map_fw_info = {
830 .attrs = desc->attrs,
831 .region = desc->priv->region,
832 .base_addr = desc->priv->region_start,
833 .dev = desc->dev,
834 };
835 void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
836
837 if (seg->filesz) {
838 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
839 desc->fw_name, num);
Kyle Yan88156d12017-01-05 15:12:45 -0800840 firmware_buf = desc->map_fw_mem(seg->paddr, seg->filesz,
841 map_data);
842 if (!firmware_buf) {
843 pil_err(desc, "Failed to map memory for firmware buffer\n");
844 return -ENOMEM;
845 }
846
Kyle Yane45fa022016-08-29 11:40:26 -0700847 ret = request_firmware_into_buf(&fw, fw_name, desc->dev,
Kyle Yan88156d12017-01-05 15:12:45 -0800848 firmware_buf, seg->filesz);
849 desc->unmap_fw_mem(firmware_buf, seg->filesz, map_data);
850
851 if (ret) {
Kyle Yane45fa022016-08-29 11:40:26 -0700852 pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
853 fw_name, ret);
854 return ret;
855 }
856
Kyle Yan88156d12017-01-05 15:12:45 -0800857 if (fw->size != seg->filesz) {
Kyle Yane45fa022016-08-29 11:40:26 -0700858 pil_err(desc, "Blob size %u doesn't match %lu\n",
859 ret, seg->filesz);
Raghavendra Rao Ananta9909a6b2018-08-20 17:55:02 -0700860 release_firmware(fw);
Kyle Yane45fa022016-08-29 11:40:26 -0700861 return -EPERM;
862 }
Raghavendra Rao Ananta9909a6b2018-08-20 17:55:02 -0700863
864 release_firmware(fw);
Kyle Yane45fa022016-08-29 11:40:26 -0700865 }
866
867 /* Zero out trailing memory */
868 paddr = seg->paddr + seg->filesz;
869 count = seg->sz - seg->filesz;
870 while (count > 0) {
871 int size;
872 u8 __iomem *buf;
873
874 size = min_t(size_t, IOMAP_SIZE, count);
875 buf = desc->map_fw_mem(paddr, size, map_data);
876 if (!buf) {
877 pil_err(desc, "Failed to map memory\n");
878 return -ENOMEM;
879 }
880 pil_memset_io(buf, 0, size);
881
882 desc->unmap_fw_mem(buf, size, map_data);
883
884 count -= size;
885 paddr += size;
886 }
887
888 if (desc->ops->verify_blob) {
889 ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
890 if (ret)
891 pil_err(desc, "Blob%u failed verification(rc:%d)\n",
892 num, ret);
893 }
894
895 return ret;
896}
897
898static int pil_parse_devicetree(struct pil_desc *desc)
899{
900 struct device_node *ofnode = desc->dev->of_node;
901 int clk_ready = 0;
902
903 if (!ofnode)
904 return -EINVAL;
905
906 if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
907 &desc->subsys_vmid))
908 pr_debug("Unable to read the addr-protect-id for %s\n",
909 desc->name);
910
911 if (desc->ops->proxy_unvote && of_find_property(ofnode,
912 "qcom,gpio-proxy-unvote",
913 NULL)) {
914 clk_ready = of_get_named_gpio(ofnode,
915 "qcom,gpio-proxy-unvote", 0);
916
917 if (clk_ready < 0) {
918 dev_dbg(desc->dev,
919 "[%s]: Error getting proxy unvoting gpio\n",
920 desc->name);
921 return clk_ready;
922 }
923
924 clk_ready = gpio_to_irq(clk_ready);
925 if (clk_ready < 0) {
926 dev_err(desc->dev,
927 "[%s]: Error getting proxy unvote IRQ\n",
928 desc->name);
929 return clk_ready;
930 }
931 }
932 desc->proxy_unvote_irq = clk_ready;
Sanjay Dwivedia64142e2019-07-23 13:59:33 +0530933
934 desc->sequential_load = of_property_read_bool(ofnode,
935 "qcom,sequential-fw-load");
Kyle Yane45fa022016-08-29 11:40:26 -0700936 return 0;
937}
938
Kyle Yan0ceeb642017-09-13 11:21:35 -0700939static int pil_notify_aop(struct pil_desc *desc, char *status)
940{
941 struct qmp_pkt pkt;
942 char mbox_msg[MAX_LEN];
943
944 if (!desc->signal_aop)
945 return 0;
946
947 snprintf(mbox_msg, MAX_LEN,
948 "{class: image, res: load_state, name: %s, val: %s}",
949 desc->name, status);
950 pkt.size = MAX_LEN;
951 pkt.data = mbox_msg;
952
953 return mbox_send_message(desc->mbox, &pkt);
954}
955
Kyle Yane45fa022016-08-29 11:40:26 -0700956/* Synchronize request_firmware() with suspend */
957static DECLARE_RWSEM(pil_pm_rwsem);
958
Sanjay Dwivedia64142e2019-07-23 13:59:33 +0530959struct pil_seg_data {
960 struct pil_desc *desc;
961 struct pil_seg *seg;
962 struct work_struct load_seg_work;
963 int retval;
964};
965
966static void pil_load_seg_work_fn(struct work_struct *work)
967{
968 struct pil_seg_data *pil_seg_data = container_of(work,
969 struct pil_seg_data,
970 load_seg_work);
971 struct pil_desc *desc = pil_seg_data->desc;
972 struct pil_seg *seg = pil_seg_data->seg;
973
974 pil_seg_data->retval = pil_load_seg(desc, seg);
975}
976
977static int pil_load_segs(struct pil_desc *desc)
978{
979 int ret = 0;
980 int seg_id = 0;
981 struct pil_priv *priv = desc->priv;
982 struct pil_seg_data *pil_seg_data;
983 struct pil_seg *seg;
984 unsigned long *err_map;
985
986 err_map = kcalloc(BITS_TO_LONGS(priv->num_segs), sizeof(unsigned long),
987 GFP_KERNEL);
988 if (!err_map)
989 return -ENOMEM;
990
991 pil_seg_data = kcalloc(priv->num_segs, sizeof(*pil_seg_data),
992 GFP_KERNEL);
993 if (!pil_seg_data) {
994 ret = -ENOMEM;
995 goto out;
996 }
997
998 /* Initialize and spawn a thread for each segment */
999 list_for_each_entry(seg, &desc->priv->segs, list) {
1000 pil_seg_data[seg_id].desc = desc;
1001 pil_seg_data[seg_id].seg = seg;
1002
1003 INIT_WORK(&pil_seg_data[seg_id].load_seg_work,
1004 pil_load_seg_work_fn);
1005 queue_work(pil_wq, &pil_seg_data[seg_id].load_seg_work);
1006
1007 seg_id++;
1008 }
1009
1010 bitmap_zero(err_map, priv->num_segs);
1011
1012 /* Wait for the parallel loads to finish */
1013 seg_id = 0;
1014 list_for_each_entry(seg, &desc->priv->segs, list) {
1015 flush_work(&pil_seg_data[seg_id].load_seg_work);
1016
1017 /*
1018 * Don't exit if one of the thread fails. Wait for others to
1019 * complete. Bitmap the return codes we get from the threads.
1020 */
1021 if (pil_seg_data[seg_id].retval) {
1022 pil_err(desc,
1023 "Failed to load the segment[%d]. ret = %d\n",
1024 seg_id, pil_seg_data[seg_id].retval);
1025 __set_bit(seg_id, err_map);
1026 }
1027
1028 seg_id++;
1029 }
1030
1031 kfree(pil_seg_data);
1032
1033 /* Each segment can fail due to different reason. Send a generic err */
1034 if (!bitmap_empty(err_map, priv->num_segs))
1035 ret = -EFAULT;
1036
1037out:
1038 kfree(err_map);
1039 return ret;
1040}
1041
Kyle Yane45fa022016-08-29 11:40:26 -07001042/**
1043 * pil_boot() - Load a peripheral image into memory and boot it
1044 * @desc: descriptor from pil_desc_init()
1045 *
1046 * Returns 0 on success or -ERROR on failure.
1047 */
1048int pil_boot(struct pil_desc *desc)
1049{
1050 int ret;
1051 char fw_name[30];
Sanjay Dwivedia64142e2019-07-23 13:59:33 +05301052 struct pil_seg *seg;
Kyle Yane45fa022016-08-29 11:40:26 -07001053 const struct pil_mdt *mdt;
1054 const struct elf32_hdr *ehdr;
Kyle Yane45fa022016-08-29 11:40:26 -07001055 const struct firmware *fw;
1056 struct pil_priv *priv = desc->priv;
1057 bool mem_protect = false;
1058 bool hyp_assign = false;
1059
Kyle Yan0ceeb642017-09-13 11:21:35 -07001060 ret = pil_notify_aop(desc, "on");
1061 if (ret < 0) {
1062 pil_err(desc, "Failed to send ON message to AOP rc:%d\n", ret);
1063 return ret;
1064 }
1065
Kyle Yane45fa022016-08-29 11:40:26 -07001066 if (desc->shutdown_fail)
1067 pil_err(desc, "Subsystem shutdown failed previously!\n");
1068
1069 /* Reinitialize for new image */
1070 pil_release_mmap(desc);
1071
1072 down_read(&pil_pm_rwsem);
1073 snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
1074 ret = request_firmware(&fw, fw_name, desc->dev);
1075 if (ret) {
1076 pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
1077 goto out;
1078 }
1079
1080 if (fw->size < sizeof(*ehdr)) {
1081 pil_err(desc, "Not big enough to be an elf header\n");
1082 ret = -EIO;
1083 goto release_fw;
1084 }
1085
1086 mdt = (const struct pil_mdt *)fw->data;
1087 ehdr = &mdt->hdr;
1088
1089 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
1090 pil_err(desc, "Not an elf header\n");
1091 ret = -EIO;
1092 goto release_fw;
1093 }
1094
1095 if (ehdr->e_phnum == 0) {
1096 pil_err(desc, "No loadable segments\n");
1097 ret = -EIO;
1098 goto release_fw;
1099 }
1100 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
1101 sizeof(struct elf32_hdr) > fw->size) {
1102 pil_err(desc, "Program headers not within mdt\n");
1103 ret = -EIO;
1104 goto release_fw;
1105 }
1106
Kyle Yan95180af2017-12-01 17:05:59 -08001107 ret = pil_init_mmap(desc, mdt, fw->size);
Kyle Yane45fa022016-08-29 11:40:26 -07001108 if (ret)
1109 goto release_fw;
1110
1111 desc->priv->unvoted_flag = 0;
1112 ret = pil_proxy_vote(desc);
1113 if (ret) {
1114 pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
1115 goto release_fw;
1116 }
1117
Gaurav Kohli2da45012017-05-08 15:21:43 +05301118 trace_pil_event("before_init_image", desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001119 if (desc->ops->init_image)
Kyle Yan95180af2017-12-01 17:05:59 -08001120 ret = desc->ops->init_image(desc, fw->data, fw->size,
1121 priv->region_start, priv->region);
Kyle Yane45fa022016-08-29 11:40:26 -07001122 if (ret) {
1123 pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
1124 goto err_boot;
1125 }
1126
Gaurav Kohli2da45012017-05-08 15:21:43 +05301127 trace_pil_event("before_mem_setup", desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001128 if (desc->ops->mem_setup)
1129 ret = desc->ops->mem_setup(desc, priv->region_start,
1130 priv->region_end - priv->region_start);
1131 if (ret) {
1132 pil_err(desc, "Memory setup error(rc:%d)\n", ret);
1133 goto err_deinit_image;
1134 }
1135
1136 if (desc->subsys_vmid > 0) {
1137 /**
1138 * In case of modem ssr, we need to assign memory back to linux.
1139 * This is not true after cold boot since linux already owns it.
1140 * Also for secure boot devices, modem memory has to be released
1141 * after MBA is booted
1142 */
Gaurav Kohli2da45012017-05-08 15:21:43 +05301143 trace_pil_event("before_assign_mem", desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001144 if (desc->modem_ssr) {
1145 ret = pil_assign_mem_to_linux(desc, priv->region_start,
1146 (priv->region_end - priv->region_start));
1147 if (ret)
1148 pil_err(desc, "Failed to assign to linux, ret- %d\n",
1149 ret);
1150 }
1151 ret = pil_assign_mem_to_subsys_and_linux(desc,
1152 priv->region_start,
1153 (priv->region_end - priv->region_start));
1154 if (ret) {
1155 pil_err(desc, "Failed to assign memory, ret - %d\n",
1156 ret);
1157 goto err_deinit_image;
1158 }
1159 hyp_assign = true;
1160 }
1161
Gaurav Kohli2da45012017-05-08 15:21:43 +05301162 trace_pil_event("before_load_seg", desc);
Sanjay Dwivedia64142e2019-07-23 13:59:33 +05301163
1164 if (desc->sequential_load) {
1165 list_for_each_entry(seg, &desc->priv->segs, list) {
1166 ret = pil_load_seg(desc, seg);
1167 if (ret)
1168 goto err_deinit_image;
1169 }
1170 } else {
1171 ret = pil_load_segs(desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001172 if (ret)
1173 goto err_deinit_image;
1174 }
1175
1176 if (desc->subsys_vmid > 0) {
Gaurav Kohli2da45012017-05-08 15:21:43 +05301177 trace_pil_event("before_reclaim_mem", desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001178 ret = pil_reclaim_mem(desc, priv->region_start,
1179 (priv->region_end - priv->region_start),
1180 desc->subsys_vmid);
1181 if (ret) {
1182 pil_err(desc, "Failed to assign %s memory, ret - %d\n",
1183 desc->name, ret);
1184 goto err_deinit_image;
1185 }
1186 hyp_assign = false;
1187 }
1188
Gaurav Kohli2da45012017-05-08 15:21:43 +05301189 trace_pil_event("before_auth_reset", desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001190 ret = desc->ops->auth_and_reset(desc);
1191 if (ret) {
1192 pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
1193 goto err_auth_and_reset;
1194 }
Gaurav Kohli2da45012017-05-08 15:21:43 +05301195 trace_pil_event("reset_done", desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001196 pil_info(desc, "Brought out of reset\n");
Sanjay Dwivedie7b2e792020-01-21 16:09:02 +05301197 place_marker("M - Modem out of reset");
Kyle Yane45fa022016-08-29 11:40:26 -07001198 desc->modem_ssr = false;
1199err_auth_and_reset:
1200 if (ret && desc->subsys_vmid > 0) {
1201 pil_assign_mem_to_linux(desc, priv->region_start,
1202 (priv->region_end - priv->region_start));
1203 mem_protect = true;
1204 }
1205err_deinit_image:
1206 if (ret && desc->ops->deinit_image)
1207 desc->ops->deinit_image(desc);
1208err_boot:
1209 if (ret && desc->proxy_unvote_irq)
1210 disable_irq(desc->proxy_unvote_irq);
1211 pil_proxy_unvote(desc, ret);
1212release_fw:
1213 release_firmware(fw);
1214out:
1215 up_read(&pil_pm_rwsem);
1216 if (ret) {
1217 if (priv->region) {
1218 if (desc->subsys_vmid > 0 && !mem_protect &&
1219 hyp_assign) {
1220 pil_reclaim_mem(desc, priv->region_start,
1221 (priv->region_end -
1222 priv->region_start),
1223 VMID_HLOS);
1224 }
Avaneesh Kumar Dwivedib2c72692017-04-24 21:51:42 +05301225 if (desc->clear_fw_region && priv->region_start)
1226 pil_clear_segment(desc);
Kyle Yane45fa022016-08-29 11:40:26 -07001227 dma_free_attrs(desc->dev, priv->region_size,
1228 priv->region, priv->region_start,
1229 desc->attrs);
1230 priv->region = NULL;
1231 }
Kyle Yane45fa022016-08-29 11:40:26 -07001232 pil_release_mmap(desc);
Kyle Yan0ceeb642017-09-13 11:21:35 -07001233 pil_notify_aop(desc, "off");
Kyle Yane45fa022016-08-29 11:40:26 -07001234 }
1235 return ret;
1236}
1237EXPORT_SYMBOL(pil_boot);
1238
1239/**
1240 * pil_shutdown() - Shutdown a peripheral
1241 * @desc: descriptor from pil_desc_init()
1242 */
1243void pil_shutdown(struct pil_desc *desc)
1244{
Kyle Yan0ceeb642017-09-13 11:21:35 -07001245 int ret;
Kyle Yane45fa022016-08-29 11:40:26 -07001246 struct pil_priv *priv = desc->priv;
1247
1248 if (desc->ops->shutdown) {
1249 if (desc->ops->shutdown(desc))
1250 desc->shutdown_fail = true;
1251 else
1252 desc->shutdown_fail = false;
1253 }
1254
1255 if (desc->proxy_unvote_irq) {
1256 disable_irq(desc->proxy_unvote_irq);
1257 if (!desc->priv->unvoted_flag)
1258 pil_proxy_unvote(desc, 1);
1259 } else if (!proxy_timeout_ms)
1260 pil_proxy_unvote(desc, 1);
1261 else
1262 flush_delayed_work(&priv->proxy);
Kyle Yan0ceeb642017-09-13 11:21:35 -07001263 ret = pil_notify_aop(desc, "off");
1264 if (ret < 0)
1265 pr_warn("pil: failed to send OFF message to AOP rc:%d\n", ret);
Kyle Yane45fa022016-08-29 11:40:26 -07001266 desc->modem_ssr = true;
1267}
1268EXPORT_SYMBOL(pil_shutdown);
1269
1270/**
1271 * pil_free_memory() - Free memory resources associated with a peripheral
1272 * @desc: descriptor from pil_desc_init()
1273 */
1274void pil_free_memory(struct pil_desc *desc)
1275{
1276 struct pil_priv *priv = desc->priv;
1277
1278 if (priv->region) {
1279 if (desc->subsys_vmid > 0)
1280 pil_assign_mem_to_linux(desc, priv->region_start,
1281 (priv->region_end - priv->region_start));
1282 dma_free_attrs(desc->dev, priv->region_size,
1283 priv->region, priv->region_start, desc->attrs);
1284 priv->region = NULL;
1285 }
1286}
1287EXPORT_SYMBOL(pil_free_memory);
1288
1289static DEFINE_IDA(pil_ida);
1290
1291bool is_timeout_disabled(void)
1292{
1293 return disable_timeouts;
1294}
1295/**
1296 * pil_desc_init() - Initialize a pil descriptor
1297 * @desc: descriptor to initialize
1298 *
1299 * Initialize a pil descriptor for use by other pil functions. This function
1300 * must be called before calling pil_boot() or pil_shutdown().
1301 *
1302 * Returns 0 for success and -ERROR on failure.
1303 */
1304int pil_desc_init(struct pil_desc *desc)
1305{
1306 struct pil_priv *priv;
Kyle Yane45fa022016-08-29 11:40:26 -07001307 void __iomem *addr;
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301308 void *ss_toc_addr;
1309 int ret;
Kyle Yane45fa022016-08-29 11:40:26 -07001310 char buf[sizeof(priv->info->name)];
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301311 struct device_node *ofnode = desc->dev->of_node;
Kyle Yane45fa022016-08-29 11:40:26 -07001312
1313 if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
1314 "Invalid proxy voting. Ignoring\n"))
1315 ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
1316
1317 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1318 if (!priv)
1319 return -ENOMEM;
1320 desc->priv = priv;
1321 priv->desc = desc;
1322
1323 priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
1324 if (priv->id < 0)
1325 goto err;
1326
1327 if (pil_info_base) {
1328 addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
1329 priv->info = (struct pil_image_info __iomem *)addr;
1330
1331 strlcpy(buf, desc->name, sizeof(buf));
1332 __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
1333 }
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301334 if (of_property_read_u32(ofnode, "qcom,minidump-id",
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301335 &desc->minidump_id))
1336 pr_err("minidump-id not found for %s\n", desc->name);
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301337 else {
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301338 if (g_md_toc && g_md_toc->md_toc_init == true) {
1339 ss_toc_addr = &g_md_toc->md_ss_toc[desc->minidump_id];
Jitendra Sharma73e4f202018-03-19 14:57:46 +05301340 pr_debug("Minidump : ss_toc_addr for ss is %pa and desc->minidump_id is %d\n",
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301341 &ss_toc_addr, desc->minidump_id);
Jitendra Sharma73e4f202018-03-19 14:57:46 +05301342 memcpy(&desc->minidump_ss, &ss_toc_addr,
1343 sizeof(ss_toc_addr));
1344 ss_toc_addr =
1345 &g_md_toc->md_ss_toc[desc->minidump_id + 1];
1346 pr_debug("Minidump : ss_toc_addr for pdr is %pa and desc->minidump_id is %d\n",
1347 &ss_toc_addr, desc->minidump_id);
1348 memcpy(&desc->minidump_pdr, &ss_toc_addr,
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301349 sizeof(ss_toc_addr));
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301350 }
1351 }
Kyle Yane45fa022016-08-29 11:40:26 -07001352
1353 ret = pil_parse_devicetree(desc);
1354 if (ret)
1355 goto err_parse_dt;
1356
1357 /* Ignore users who don't make any sense */
1358 WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
1359 && !desc->proxy_timeout,
1360 "Invalid proxy unvote callback or a proxy timeout of 0 was specified or no proxy unvote IRQ was specified.\n");
1361
1362 if (desc->proxy_unvote_irq) {
1363 ret = request_threaded_irq(desc->proxy_unvote_irq,
1364 NULL,
1365 proxy_unvote_intr_handler,
1366 IRQF_ONESHOT | IRQF_TRIGGER_RISING,
1367 desc->name, desc);
1368 if (ret < 0) {
1369 dev_err(desc->dev,
1370 "Unable to request proxy unvote IRQ: %d\n",
1371 ret);
1372 goto err;
1373 }
1374 disable_irq(desc->proxy_unvote_irq);
1375 }
1376
1377 snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
1378 wakeup_source_init(&priv->ws, priv->wname);
1379 INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
1380 INIT_LIST_HEAD(&priv->segs);
1381
1382 /* Make sure mapping functions are set. */
1383 if (!desc->map_fw_mem)
1384 desc->map_fw_mem = map_fw_mem;
1385
1386 if (!desc->unmap_fw_mem)
1387 desc->unmap_fw_mem = unmap_fw_mem;
1388
1389 return 0;
1390err_parse_dt:
1391 ida_simple_remove(&pil_ida, priv->id);
1392err:
1393 kfree(priv);
1394 return ret;
1395}
1396EXPORT_SYMBOL(pil_desc_init);
1397
1398/**
1399 * pil_desc_release() - Release a pil descriptor
1400 * @desc: descriptor to free
1401 */
1402void pil_desc_release(struct pil_desc *desc)
1403{
1404 struct pil_priv *priv = desc->priv;
1405
1406 if (priv) {
1407 ida_simple_remove(&pil_ida, priv->id);
1408 flush_delayed_work(&priv->proxy);
1409 wakeup_source_trash(&priv->ws);
1410 }
1411 desc->priv = NULL;
1412 kfree(priv);
1413}
1414EXPORT_SYMBOL(pil_desc_release);
1415
1416static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
1417{
1418 switch (event) {
1419 case PM_SUSPEND_PREPARE:
1420 down_write(&pil_pm_rwsem);
1421 break;
1422 case PM_POST_SUSPEND:
1423 up_write(&pil_pm_rwsem);
1424 break;
1425 }
1426 return NOTIFY_DONE;
1427}
1428
1429static struct notifier_block pil_pm_notifier = {
1430 .notifier_call = pil_pm_notify,
1431};
1432
1433static int __init msm_pil_init(void)
1434{
1435 struct device_node *np;
1436 struct resource res;
1437 int i;
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301438 unsigned int size;
Kyle Yane45fa022016-08-29 11:40:26 -07001439
1440 np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
1441 if (!np) {
1442 pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
1443 goto out;
1444 }
1445 if (of_address_to_resource(np, 0, &res)) {
1446 pr_warn("pil: address to resource on imem region failed\n");
1447 goto out;
1448 }
1449 pil_info_base = ioremap(res.start, resource_size(&res));
1450 if (!pil_info_base) {
1451 pr_warn("pil: could not map imem region\n");
1452 goto out;
1453 }
1454 if (__raw_readl(pil_info_base) == 0x53444247) {
1455 pr_info("pil: pil-imem set to disable pil timeouts\n");
1456 disable_timeouts = true;
1457 }
1458 for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
1459 writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
1460
Avaneesh Kumar Dwivedi691d2bf2017-10-04 19:51:51 +05301461 /* Get Global minidump ToC*/
1462 g_md_toc = smem_get_entry(SBL_MINIDUMP_SMEM_ID, &size, 0,
1463 SMEM_ANY_HOST_FLAG);
1464 pr_debug("Minidump: g_md_toc is %pa\n", &g_md_toc);
1465 if (PTR_ERR(g_md_toc) == -EPROBE_DEFER) {
1466 pr_err("SMEM is not initialized.\n");
1467 return -EPROBE_DEFER;
Avaneesh Kumar Dwivediec6d1392017-07-06 21:18:03 +05301468 }
Sanjay Dwivedia64142e2019-07-23 13:59:33 +05301469
1470 pil_wq = alloc_workqueue("pil_workqueue", WQ_HIGHPRI | WQ_UNBOUND, 0);
1471 if (!pil_wq)
1472 pr_warn("pil: Defaulting to sequential firmware loading.\n");
1473
Kyle Yane45fa022016-08-29 11:40:26 -07001474out:
1475 return register_pm_notifier(&pil_pm_notifier);
1476}
1477device_initcall(msm_pil_init);
1478
1479static void __exit msm_pil_exit(void)
1480{
Sanjay Dwivedia64142e2019-07-23 13:59:33 +05301481 if (pil_wq)
1482 destroy_workqueue(pil_wq);
Kyle Yane45fa022016-08-29 11:40:26 -07001483 unregister_pm_notifier(&pil_pm_notifier);
1484 if (pil_info_base)
1485 iounmap(pil_info_base);
Kyle Yane45fa022016-08-29 11:40:26 -07001486}
1487module_exit(msm_pil_exit);
1488
1489MODULE_LICENSE("GPL v2");
1490MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");