blob: 729911b67a9a1cbdc8aa142142fd507529b69796 [file] [log] [blame]
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +02001/*
2 * Remote Processor Framework
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * Ohad Ben-Cohen <ohad@wizery.com>
8 * Brian Swetland <swetland@google.com>
9 * Mark Grosen <mgrosen@ti.com>
10 * Fernando Guzman Lugo <fernando.lugo@ti.com>
11 * Suman Anna <s-anna@ti.com>
12 * Robert Tivy <rtivy@ti.com>
13 * Armando Uribe De Leon <x0095078@ti.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * version 2 as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25#define pr_fmt(fmt) "%s: " fmt, __func__
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/slab.h>
31#include <linux/mutex.h>
32#include <linux/dma-mapping.h>
33#include <linux/firmware.h>
34#include <linux/string.h>
35#include <linux/debugfs.h>
36#include <linux/remoteproc.h>
37#include <linux/iommu.h>
38#include <linux/klist.h>
39#include <linux/elf.h>
40#include <linux/virtio_ids.h>
41#include <linux/virtio_ring.h>
Ohad Ben-Cohencf59d3e2012-01-31 15:23:41 +020042#include <asm/byteorder.h>
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +020043
44#include "remoteproc_internal.h"
45
46static void klist_rproc_get(struct klist_node *n);
47static void klist_rproc_put(struct klist_node *n);
48
49/*
50 * klist of the available remote processors.
51 *
52 * We need this in order to support name-based lookups (needed by the
53 * rproc_get_by_name()).
54 *
55 * That said, we don't use rproc_get_by_name() anymore within the rpmsg
56 * framework. The use cases that do require its existence should be
57 * scrutinized, and hopefully migrated to rproc_boot() using device-based
58 * binding.
59 *
60 * If/when this materializes, we could drop the klist (and the by_name
61 * API).
62 */
63static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put);
64
65typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
66 struct fw_resource *rsc, int len);
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +020067typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
68 struct fw_resource *rsc);
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +020069
70/*
71 * This is the IOMMU fault handler we register with the IOMMU API
72 * (when relevant; not all remote processors access memory through
73 * an IOMMU).
74 *
75 * IOMMU core will invoke this handler whenever the remote processor
76 * will try to access an unmapped device address.
77 *
78 * Currently this is mostly a stub, but it will be later used to trigger
79 * the recovery of the remote processor.
80 */
81static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
82 unsigned long iova, int flags)
83{
84 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
85
86 /*
87 * Let the iommu core know we're not really handling this fault;
88 * we just plan to use this as a recovery trigger.
89 */
90 return -ENOSYS;
91}
92
93static int rproc_enable_iommu(struct rproc *rproc)
94{
95 struct iommu_domain *domain;
96 struct device *dev = rproc->dev;
97 int ret;
98
99 /*
100 * We currently use iommu_present() to decide if an IOMMU
101 * setup is needed.
102 *
103 * This works for simple cases, but will easily fail with
104 * platforms that do have an IOMMU, but not for this specific
105 * rproc.
106 *
107 * This will be easily solved by introducing hw capabilities
108 * that will be set by the remoteproc driver.
109 */
110 if (!iommu_present(dev->bus)) {
Mark Grosen0798e1d2011-12-13 08:41:47 +0200111 dev_dbg(dev, "iommu not found\n");
112 return 0;
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200113 }
114
115 domain = iommu_domain_alloc(dev->bus);
116 if (!domain) {
117 dev_err(dev, "can't alloc iommu domain\n");
118 return -ENOMEM;
119 }
120
121 iommu_set_fault_handler(domain, rproc_iommu_fault);
122
123 ret = iommu_attach_device(domain, dev);
124 if (ret) {
125 dev_err(dev, "can't attach iommu device: %d\n", ret);
126 goto free_domain;
127 }
128
129 rproc->domain = domain;
130
131 return 0;
132
133free_domain:
134 iommu_domain_free(domain);
135 return ret;
136}
137
138static void rproc_disable_iommu(struct rproc *rproc)
139{
140 struct iommu_domain *domain = rproc->domain;
141 struct device *dev = rproc->dev;
142
143 if (!domain)
144 return;
145
146 iommu_detach_device(domain, dev);
147 iommu_domain_free(domain);
148
149 return;
150}
151
152/*
153 * Some remote processors will ask us to allocate them physically contiguous
154 * memory regions (which we call "carveouts"), and map them to specific
155 * device addresses (which are hardcoded in the firmware).
156 *
157 * They may then ask us to copy objects into specific device addresses (e.g.
158 * code/data sections) or expose us certain symbols in other device address
159 * (e.g. their trace buffer).
160 *
161 * This function is an internal helper with which we can go over the allocated
162 * carveouts and translate specific device address to kernel virtual addresses
163 * so we can access the referenced memory.
164 *
165 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
166 * but only on kernel direct mapped RAM memory. Instead, we're just using
167 * here the output of the DMA API, which should be more correct.
168 */
169static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
170{
171 struct rproc_mem_entry *carveout;
172 void *ptr = NULL;
173
174 list_for_each_entry(carveout, &rproc->carveouts, node) {
175 int offset = da - carveout->da;
176
177 /* try next carveout if da is too small */
178 if (offset < 0)
179 continue;
180
181 /* try next carveout if da is too large */
182 if (offset + len > carveout->len)
183 continue;
184
185 ptr = carveout->va + offset;
186
187 break;
188 }
189
190 return ptr;
191}
192
193/**
194 * rproc_load_segments() - load firmware segments to memory
195 * @rproc: remote processor which will be booted using these fw segments
196 * @elf_data: the content of the ELF firmware image
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200197 * @len: firmware size (in bytes)
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200198 *
199 * This function loads the firmware segments to memory, where the remote
200 * processor expects them.
201 *
202 * Some remote processors will expect their code and data to be placed
203 * in specific device addresses, and can't have them dynamically assigned.
204 *
205 * We currently support only those kind of remote processors, and expect
206 * the program header's paddr member to contain those addresses. We then go
207 * through the physically contiguous "carveout" memory regions which we
208 * allocated (and mapped) earlier on behalf of the remote processor,
209 * and "translate" device address to kernel addresses, so we can copy the
210 * segments where they are expected.
211 *
212 * Currently we only support remote processors that required carveout
213 * allocations and got them mapped onto their iommus. Some processors
214 * might be different: they might not have iommus, and would prefer to
215 * directly allocate memory for every segment/resource. This is not yet
216 * supported, though.
217 */
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200218static int
219rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200220{
221 struct device *dev = rproc->dev;
222 struct elf32_hdr *ehdr;
223 struct elf32_phdr *phdr;
224 int i, ret = 0;
225
226 ehdr = (struct elf32_hdr *)elf_data;
227 phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
228
229 /* go through the available ELF segments */
230 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
231 u32 da = phdr->p_paddr;
232 u32 memsz = phdr->p_memsz;
233 u32 filesz = phdr->p_filesz;
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200234 u32 offset = phdr->p_offset;
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200235 void *ptr;
236
237 if (phdr->p_type != PT_LOAD)
238 continue;
239
240 dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
241 phdr->p_type, da, memsz, filesz);
242
243 if (filesz > memsz) {
244 dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
245 filesz, memsz);
246 ret = -EINVAL;
247 break;
248 }
249
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200250 if (offset + filesz > len) {
251 dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
252 offset + filesz, len);
253 ret = -EINVAL;
254 break;
255 }
256
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200257 /* grab the kernel address for this device address */
258 ptr = rproc_da_to_va(rproc, da, memsz);
259 if (!ptr) {
260 dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
261 ret = -EINVAL;
262 break;
263 }
264
265 /* put the segment where the remote processor expects it */
266 if (phdr->p_filesz)
267 memcpy(ptr, elf_data + phdr->p_offset, filesz);
268
269 /*
270 * Zero out remaining memory for this segment.
271 *
272 * This isn't strictly required since dma_alloc_coherent already
273 * did this for us. albeit harmless, we may consider removing
274 * this.
275 */
276 if (memsz > filesz)
277 memset(ptr + filesz, 0, memsz - filesz);
278 }
279
280 return ret;
281}
282
283/**
284 * rproc_handle_virtio_hdr() - handle a virtio header resource
285 * @rproc: the remote processor
286 * @rsc: the resource descriptor
287 *
288 * The existence of this virtio hdr resource entry means that the firmware
289 * of this @rproc supports this virtio device.
290 *
291 * Currently we support only a single virtio device of type VIRTIO_ID_RPMSG,
292 * but the plan is to remove this limitation and support any number
293 * of virtio devices (and of any type). We'll also add support for dynamically
294 * adding (and removing) virtio devices over the rpmsg bus, but small
295 * firmwares that doesn't want to get involved with rpmsg will be able
296 * to simple use the resource table for this.
297 *
298 * At this point this virtio header entry is rather simple: it just
299 * announces the virtio device id and the supported virtio device features.
300 * The plan though is to extend this to include the vring information and
301 * the virtio config space, too (but first, some resource table overhaul
302 * is needed: move from fixed-sized to variable-length TLV entries).
303 *
304 * For now, the 'flags' member of the resource entry contains the virtio
305 * device id, the 'da' member contains the device features, and 'pa' is
306 * where we need to store the guest features once negotiation completes.
307 * As usual, the 'id' member of this resource contains the index of this
308 * resource type (i.e. is this the first virtio hdr entry, the 2nd, ...).
309 *
310 * Returns 0 on success, or an appropriate error code otherwise
311 */
312static int rproc_handle_virtio_hdr(struct rproc *rproc, struct fw_resource *rsc)
313{
314 struct rproc_vdev *rvdev;
315
316 /* we only support VIRTIO_ID_RPMSG devices for now */
317 if (rsc->flags != VIRTIO_ID_RPMSG) {
318 dev_warn(rproc->dev, "unsupported vdev: %d\n", rsc->flags);
319 return -EINVAL;
320 }
321
322 /* we only support a single vdev per rproc for now */
323 if (rsc->id || rproc->rvdev) {
324 dev_warn(rproc->dev, "redundant vdev entry: %s\n", rsc->name);
325 return -EINVAL;
326 }
327
328 rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
329 if (!rvdev)
330 return -ENOMEM;
331
332 /* remember the device features */
333 rvdev->dfeatures = rsc->da;
334
335 rproc->rvdev = rvdev;
336 rvdev->rproc = rproc;
337
338 return 0;
339}
340
341/**
342 * rproc_handle_vring() - handle a vring fw resource
343 * @rproc: the remote processor
344 * @rsc: the vring resource descriptor
345 *
346 * This resource entry requires allocation of non-cacheable memory
347 * for a virtio vring. Currently we only support two vrings per remote
348 * processor, required for the virtio rpmsg device.
349 *
350 * The 'len' member of @rsc should contain the number of buffers this vring
351 * support and 'da' should either contain the device address where
352 * the remote processor is expecting the vring, or indicate that
353 * dynamically allocation of the vring's device address is supported.
354 *
355 * Note: 'da' is currently not handled. This will be revised when the generic
356 * iommu-based DMA API will arrive, or a dynanic & non-iommu use case show
357 * up. Meanwhile, statically-addressed iommu-based images should use
358 * RSC_DEVMEM resource entries to map their require 'da' to the physical
359 * address of their base CMA region.
360 *
361 * Returns 0 on success, or an appropriate error code otherwise
362 */
363static int rproc_handle_vring(struct rproc *rproc, struct fw_resource *rsc)
364{
365 struct device *dev = rproc->dev;
366 struct rproc_vdev *rvdev = rproc->rvdev;
367 dma_addr_t dma;
368 int size, id = rsc->id;
369 void *va;
370
371 /* no vdev is in place ? */
372 if (!rvdev) {
373 dev_err(dev, "vring requested without a virtio dev entry\n");
374 return -EINVAL;
375 }
376
377 /* the firmware must provide the expected queue size */
378 if (!rsc->len) {
379 dev_err(dev, "missing expected queue size\n");
380 return -EINVAL;
381 }
382
383 /* we currently support two vrings per rproc (for rx and tx) */
384 if (id >= ARRAY_SIZE(rvdev->vring)) {
385 dev_err(dev, "%s: invalid vring id %d\n", rsc->name, id);
386 return -EINVAL;
387 }
388
389 /* have we already allocated this vring id ? */
390 if (rvdev->vring[id].len) {
391 dev_err(dev, "%s: duplicated id %d\n", rsc->name, id);
392 return -EINVAL;
393 }
394
395 /* actual size of vring (in bytes) */
396 size = PAGE_ALIGN(vring_size(rsc->len, AMP_VRING_ALIGN));
397
398 /*
399 * Allocate non-cacheable memory for the vring. In the future
400 * this call will also configure the IOMMU for us
401 */
402 va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
403 if (!va) {
404 dev_err(dev, "dma_alloc_coherent failed\n");
405 return -ENOMEM;
406 }
407
408 dev_dbg(dev, "vring%d: va %p dma %x qsz %d ring size %x\n", id, va,
409 dma, rsc->len, size);
410
411 rvdev->vring[id].len = rsc->len;
412 rvdev->vring[id].va = va;
413 rvdev->vring[id].dma = dma;
414
415 return 0;
416}
417
418/**
419 * rproc_handle_trace() - handle a shared trace buffer resource
420 * @rproc: the remote processor
421 * @rsc: the trace resource descriptor
422 *
423 * In case the remote processor dumps trace logs into memory,
424 * export it via debugfs.
425 *
426 * Currently, the 'da' member of @rsc should contain the device address
427 * where the remote processor is dumping the traces. Later we could also
428 * support dynamically allocating this address using the generic
429 * DMA API (but currently there isn't a use case for that).
430 *
431 * Returns 0 on success, or an appropriate error code otherwise
432 */
433static int rproc_handle_trace(struct rproc *rproc, struct fw_resource *rsc)
434{
435 struct rproc_mem_entry *trace;
436 struct device *dev = rproc->dev;
437 void *ptr;
438 char name[15];
439
440 /* what's the kernel address of this resource ? */
441 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
442 if (!ptr) {
443 dev_err(dev, "erroneous trace resource entry\n");
444 return -EINVAL;
445 }
446
447 trace = kzalloc(sizeof(*trace), GFP_KERNEL);
448 if (!trace) {
449 dev_err(dev, "kzalloc trace failed\n");
450 return -ENOMEM;
451 }
452
453 /* set the trace buffer dma properties */
454 trace->len = rsc->len;
455 trace->va = ptr;
456
457 /* make sure snprintf always null terminates, even if truncating */
458 snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
459
460 /* create the debugfs entry */
461 trace->priv = rproc_create_trace_file(name, rproc, trace);
462 if (!trace->priv) {
463 trace->va = NULL;
464 kfree(trace);
465 return -EINVAL;
466 }
467
468 list_add_tail(&trace->node, &rproc->traces);
469
470 rproc->num_traces++;
471
472 dev_dbg(dev, "%s added: va %p, da 0x%llx, len 0x%x\n", name, ptr,
473 rsc->da, rsc->len);
474
475 return 0;
476}
477
478/**
479 * rproc_handle_devmem() - handle devmem resource entry
480 * @rproc: remote processor handle
481 * @rsc: the devmem resource entry
482 *
483 * Remote processors commonly need to access certain on-chip peripherals.
484 *
485 * Some of these remote processors access memory via an iommu device,
486 * and might require us to configure their iommu before they can access
487 * the on-chip peripherals they need.
488 *
489 * This resource entry is a request to map such a peripheral device.
490 *
491 * These devmem entries will contain the physical address of the device in
492 * the 'pa' member. If a specific device address is expected, then 'da' will
493 * contain it (currently this is the only use case supported). 'len' will
494 * contain the size of the physical region we need to map.
495 *
496 * Currently we just "trust" those devmem entries to contain valid physical
497 * addresses, but this is going to change: we want the implementations to
498 * tell us ranges of physical addresses the firmware is allowed to request,
499 * and not allow firmwares to request access to physical addresses that
500 * are outside those ranges.
501 */
502static int rproc_handle_devmem(struct rproc *rproc, struct fw_resource *rsc)
503{
504 struct rproc_mem_entry *mapping;
505 int ret;
506
507 /* no point in handling this resource without a valid iommu domain */
508 if (!rproc->domain)
509 return -EINVAL;
510
511 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
512 if (!mapping) {
513 dev_err(rproc->dev, "kzalloc mapping failed\n");
514 return -ENOMEM;
515 }
516
517 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
518 if (ret) {
519 dev_err(rproc->dev, "failed to map devmem: %d\n", ret);
520 goto out;
521 }
522
523 /*
524 * We'll need this info later when we'll want to unmap everything
525 * (e.g. on shutdown).
526 *
527 * We can't trust the remote processor not to change the resource
528 * table, so we must maintain this info independently.
529 */
530 mapping->da = rsc->da;
531 mapping->len = rsc->len;
532 list_add_tail(&mapping->node, &rproc->mappings);
533
534 dev_dbg(rproc->dev, "mapped devmem pa 0x%llx, da 0x%llx, len 0x%x\n",
535 rsc->pa, rsc->da, rsc->len);
536
537 return 0;
538
539out:
540 kfree(mapping);
541 return ret;
542}
543
544/**
545 * rproc_handle_carveout() - handle phys contig memory allocation requests
546 * @rproc: rproc handle
547 * @rsc: the resource entry
548 *
549 * This function will handle firmware requests for allocation of physically
550 * contiguous memory regions.
551 *
552 * These request entries should come first in the firmware's resource table,
553 * as other firmware entries might request placing other data objects inside
554 * these memory regions (e.g. data/code segments, trace resource entries, ...).
555 *
556 * Allocating memory this way helps utilizing the reserved physical memory
557 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
558 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
559 * pressure is important; it may have a substantial impact on performance.
560 */
561static int rproc_handle_carveout(struct rproc *rproc, struct fw_resource *rsc)
562{
563 struct rproc_mem_entry *carveout, *mapping;
564 struct device *dev = rproc->dev;
565 dma_addr_t dma;
566 void *va;
567 int ret;
568
569 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
570 if (!mapping) {
571 dev_err(dev, "kzalloc mapping failed\n");
572 return -ENOMEM;
573 }
574
575 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
576 if (!carveout) {
577 dev_err(dev, "kzalloc carveout failed\n");
578 ret = -ENOMEM;
579 goto free_mapping;
580 }
581
582 va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL);
583 if (!va) {
584 dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len);
585 ret = -ENOMEM;
586 goto free_carv;
587 }
588
589 dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len);
590
591 /*
592 * Ok, this is non-standard.
593 *
594 * Sometimes we can't rely on the generic iommu-based DMA API
595 * to dynamically allocate the device address and then set the IOMMU
596 * tables accordingly, because some remote processors might
597 * _require_ us to use hard coded device addresses that their
598 * firmware was compiled with.
599 *
600 * In this case, we must use the IOMMU API directly and map
601 * the memory to the device address as expected by the remote
602 * processor.
603 *
604 * Obviously such remote processor devices should not be configured
605 * to use the iommu-based DMA API: we expect 'dma' to contain the
606 * physical address in this case.
607 */
608 if (rproc->domain) {
609 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
610 rsc->flags);
611 if (ret) {
612 dev_err(dev, "iommu_map failed: %d\n", ret);
613 goto dma_free;
614 }
615
616 /*
617 * We'll need this info later when we'll want to unmap
618 * everything (e.g. on shutdown).
619 *
620 * We can't trust the remote processor not to change the
621 * resource table, so we must maintain this info independently.
622 */
623 mapping->da = rsc->da;
624 mapping->len = rsc->len;
625 list_add_tail(&mapping->node, &rproc->mappings);
626
627 dev_dbg(dev, "carveout mapped 0x%llx to 0x%x\n", rsc->da, dma);
628
629 /*
630 * Some remote processors might need to know the pa
631 * even though they are behind an IOMMU. E.g., OMAP4's
632 * remote M3 processor needs this so it can control
633 * on-chip hardware accelerators that are not behind
634 * the IOMMU, and therefor must know the pa.
635 *
636 * Generally we don't want to expose physical addresses
637 * if we don't have to (remote processors are generally
638 * _not_ trusted), so we might want to do this only for
639 * remote processor that _must_ have this (e.g. OMAP4's
640 * dual M3 subsystem).
641 */
642 rsc->pa = dma;
643 }
644
645 carveout->va = va;
646 carveout->len = rsc->len;
647 carveout->dma = dma;
648 carveout->da = rsc->da;
649
650 list_add_tail(&carveout->node, &rproc->carveouts);
651
652 return 0;
653
654dma_free:
655 dma_free_coherent(dev, rsc->len, va, dma);
656free_carv:
657 kfree(carveout);
658free_mapping:
659 kfree(mapping);
660 return ret;
661}
662
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +0200663/*
664 * A lookup table for resource handlers. The indices are defined in
665 * enum fw_resource_type.
666 */
667static rproc_handle_resource_t rproc_handle_rsc[] = {
668 [RSC_CARVEOUT] = rproc_handle_carveout,
669 [RSC_DEVMEM] = rproc_handle_devmem,
670 [RSC_TRACE] = rproc_handle_trace,
671 [RSC_VRING] = rproc_handle_vring,
672 [RSC_VIRTIO_DEV] = NULL, /* handled early upon registration */
673};
674
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200675/* handle firmware resource entries before booting the remote processor */
676static int
677rproc_handle_boot_rsc(struct rproc *rproc, struct fw_resource *rsc, int len)
678{
679 struct device *dev = rproc->dev;
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +0200680 rproc_handle_resource_t handler;
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200681 int ret = 0;
682
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +0200683 for (; len >= sizeof(*rsc); rsc++, len -= sizeof(*rsc)) {
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200684 dev_dbg(dev, "rsc: type %d, da 0x%llx, pa 0x%llx, len 0x%x, "
685 "id %d, name %s, flags %x\n", rsc->type, rsc->da,
686 rsc->pa, rsc->len, rsc->id, rsc->name, rsc->flags);
687
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +0200688 if (rsc->type >= RSC_LAST) {
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200689 dev_warn(dev, "unsupported resource %d\n", rsc->type);
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +0200690 continue;
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200691 }
692
Ohad Ben-Cohene12bc142012-01-31 16:07:27 +0200693 handler = rproc_handle_rsc[rsc->type];
694 if (!handler)
695 continue;
696
697 ret = handler(rproc, rsc);
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200698 if (ret)
699 break;
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200700 }
701
702 return ret;
703}
704
705/* handle firmware resource entries while registering the remote processor */
706static int
707rproc_handle_virtio_rsc(struct rproc *rproc, struct fw_resource *rsc, int len)
708{
709 struct device *dev = rproc->dev;
Mark Grosen7d2d3952011-12-13 09:14:15 +0200710 int ret = -ENODEV;
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200711
712 for (; len >= sizeof(*rsc); rsc++, len -= sizeof(*rsc))
713 if (rsc->type == RSC_VIRTIO_DEV) {
714 dev_dbg(dev, "found vdev %d/%s features %llx\n",
715 rsc->flags, rsc->name, rsc->da);
716 ret = rproc_handle_virtio_hdr(rproc, rsc);
717 break;
718 }
719
720 return ret;
721}
722
723/**
724 * rproc_handle_resources() - find and handle the resource table
725 * @rproc: the rproc handle
726 * @elf_data: the content of the ELF firmware image
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200727 * @len: firmware size (in bytes)
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200728 * @handler: function that should be used to handle the resource table
729 *
730 * This function finds the resource table inside the remote processor's
731 * firmware, and invoke a user-supplied handler with it (we have two
732 * possible handlers: one is invoked upon registration of @rproc,
733 * in order to register the supported virito devices, and the other is
734 * invoked when @rproc is actually booted).
735 *
736 * Currently this function fails if a resource table doesn't exist.
737 * This restriction will be removed when we'll start supporting remote
738 * processors that don't need a resource table.
739 */
740static int rproc_handle_resources(struct rproc *rproc, const u8 *elf_data,
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200741 size_t len, rproc_handle_resources_t handler)
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200742
743{
744 struct elf32_hdr *ehdr;
745 struct elf32_shdr *shdr;
746 const char *name_table;
747 int i, ret = -EINVAL;
748
749 ehdr = (struct elf32_hdr *)elf_data;
750 shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
751 name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
752
753 /* look for the resource table and handle it */
754 for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
755 if (!strcmp(name_table + shdr->sh_name, ".resource_table")) {
756 struct fw_resource *table = (struct fw_resource *)
757 (elf_data + shdr->sh_offset);
758
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200759 if (shdr->sh_offset + shdr->sh_size > len) {
760 dev_err(rproc->dev,
761 "truncated fw: need 0x%x avail 0x%x\n",
762 shdr->sh_offset + shdr->sh_size, len);
763 ret = -EINVAL;
764 }
765
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200766 ret = handler(rproc, table, shdr->sh_size);
767
768 break;
769 }
770 }
771
772 return ret;
773}
774
775/**
776 * rproc_resource_cleanup() - clean up and free all acquired resources
777 * @rproc: rproc handle
778 *
779 * This function will free all resources acquired for @rproc, and it
780 * is called when @rproc shuts down, or just failed booting.
781 */
782static void rproc_resource_cleanup(struct rproc *rproc)
783{
784 struct rproc_mem_entry *entry, *tmp;
785 struct device *dev = rproc->dev;
786 struct rproc_vdev *rvdev = rproc->rvdev;
787 int i;
788
789 /* clean up debugfs trace entries */
790 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
791 rproc_remove_trace_file(entry->priv);
792 rproc->num_traces--;
793 list_del(&entry->node);
794 kfree(entry);
795 }
796
797 /* free the coherent memory allocated for the vrings */
798 for (i = 0; rvdev && i < ARRAY_SIZE(rvdev->vring); i++) {
799 int qsz = rvdev->vring[i].len;
800 void *va = rvdev->vring[i].va;
801 int dma = rvdev->vring[i].dma;
802
803 /* virtqueue size is expressed in number of buffers supported */
804 if (qsz) {
805 /* how many bytes does this vring really occupy ? */
806 int size = PAGE_ALIGN(vring_size(qsz, AMP_VRING_ALIGN));
807
808 dma_free_coherent(rproc->dev, size, va, dma);
809
810 rvdev->vring[i].len = 0;
811 }
812 }
813
814 /* clean up carveout allocations */
815 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
816 dma_free_coherent(dev, entry->len, entry->va, entry->dma);
817 list_del(&entry->node);
818 kfree(entry);
819 }
820
821 /* clean up iommu mapping entries */
822 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
823 size_t unmapped;
824
825 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
826 if (unmapped != entry->len) {
827 /* nothing much to do besides complaining */
828 dev_err(dev, "failed to unmap %u/%u\n", entry->len,
829 unmapped);
830 }
831
832 list_del(&entry->node);
833 kfree(entry);
834 }
835}
836
837/* make sure this fw image is sane */
838static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
839{
840 const char *name = rproc->firmware;
841 struct device *dev = rproc->dev;
842 struct elf32_hdr *ehdr;
843
844 if (!fw) {
845 dev_err(dev, "failed to load %s\n", name);
846 return -EINVAL;
847 }
848
849 if (fw->size < sizeof(struct elf32_hdr)) {
850 dev_err(dev, "Image is too small\n");
851 return -EINVAL;
852 }
853
854 ehdr = (struct elf32_hdr *)fw->data;
855
Ohad Ben-Cohencf59d3e2012-01-31 15:23:41 +0200856 /* We assume the firmware has the same endianess as the host */
857# ifdef __LITTLE_ENDIAN
858 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
859# else /* BIG ENDIAN */
860 if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
861# endif
862 dev_err(dev, "Unsupported firmware endianess\n");
863 return -EINVAL;
864 }
865
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200866 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
867 dev_err(dev, "Image is too small\n");
868 return -EINVAL;
869 }
870
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200871 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
872 dev_err(dev, "Image is corrupted (bad magic)\n");
873 return -EINVAL;
874 }
875
876 if (ehdr->e_phnum == 0) {
877 dev_err(dev, "No loadable segments\n");
878 return -EINVAL;
879 }
880
881 if (ehdr->e_phoff > fw->size) {
882 dev_err(dev, "Firmware size is too small\n");
883 return -EINVAL;
884 }
885
886 return 0;
887}
888
889/*
890 * take a firmware and boot a remote processor with it.
891 */
892static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
893{
894 struct device *dev = rproc->dev;
895 const char *name = rproc->firmware;
896 struct elf32_hdr *ehdr;
897 int ret;
898
899 ret = rproc_fw_sanity_check(rproc, fw);
900 if (ret)
901 return ret;
902
903 ehdr = (struct elf32_hdr *)fw->data;
904
905 dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
906
907 /*
908 * if enabling an IOMMU isn't relevant for this rproc, this is
909 * just a nop
910 */
911 ret = rproc_enable_iommu(rproc);
912 if (ret) {
913 dev_err(dev, "can't enable iommu: %d\n", ret);
914 return ret;
915 }
916
917 /*
918 * The ELF entry point is the rproc's boot addr (though this is not
919 * a configurable property of all remote processors: some will always
920 * boot at a specific hardcoded address).
921 */
922 rproc->bootaddr = ehdr->e_entry;
923
924 /* handle fw resources which are required to boot rproc */
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200925 ret = rproc_handle_resources(rproc, fw->data, fw->size,
926 rproc_handle_boot_rsc);
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200927 if (ret) {
928 dev_err(dev, "Failed to process resources: %d\n", ret);
929 goto clean_up;
930 }
931
932 /* load the ELF segments to memory */
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200933 ret = rproc_load_segments(rproc, fw->data, fw->size);
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200934 if (ret) {
935 dev_err(dev, "Failed to load program segments: %d\n", ret);
936 goto clean_up;
937 }
938
939 /* power up the remote processor */
940 ret = rproc->ops->start(rproc);
941 if (ret) {
942 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
943 goto clean_up;
944 }
945
946 rproc->state = RPROC_RUNNING;
947
948 dev_info(dev, "remote processor %s is now up\n", rproc->name);
949
950 return 0;
951
952clean_up:
953 rproc_resource_cleanup(rproc);
954 rproc_disable_iommu(rproc);
955 return ret;
956}
957
958/*
959 * take a firmware and look for virtio devices to register.
960 *
961 * Note: this function is called asynchronously upon registration of the
962 * remote processor (so we must wait until it completes before we try
963 * to unregister the device. one other option is just to use kref here,
964 * that might be cleaner).
965 */
966static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
967{
968 struct rproc *rproc = context;
969 struct device *dev = rproc->dev;
970 int ret;
971
972 if (rproc_fw_sanity_check(rproc, fw) < 0)
973 goto out;
974
975 /* does the fw supports any virtio devices ? */
Ohad Ben-Cohen9bc91232011-12-13 15:23:26 +0200976 ret = rproc_handle_resources(rproc, fw->data, fw->size,
977 rproc_handle_virtio_rsc);
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +0200978 if (ret) {
979 dev_info(dev, "No fw virtio device was found\n");
980 goto out;
981 }
982
983 /* add the virtio device (currently only rpmsg vdevs are supported) */
984 ret = rproc_add_rpmsg_vdev(rproc);
985 if (ret)
986 goto out;
987
988out:
989 if (fw)
990 release_firmware(fw);
991 /* allow rproc_unregister() contexts, if any, to proceed */
992 complete_all(&rproc->firmware_loading_complete);
993}
994
995/**
996 * rproc_boot() - boot a remote processor
997 * @rproc: handle of a remote processor
998 *
999 * Boot a remote processor (i.e. load its firmware, power it on, ...).
1000 *
1001 * If the remote processor is already powered on, this function immediately
1002 * returns (successfully).
1003 *
1004 * Returns 0 on success, and an appropriate error value otherwise.
1005 */
1006int rproc_boot(struct rproc *rproc)
1007{
1008 const struct firmware *firmware_p;
1009 struct device *dev;
1010 int ret;
1011
1012 if (!rproc) {
1013 pr_err("invalid rproc handle\n");
1014 return -EINVAL;
1015 }
1016
1017 dev = rproc->dev;
1018
1019 ret = mutex_lock_interruptible(&rproc->lock);
1020 if (ret) {
1021 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1022 return ret;
1023 }
1024
1025 /* loading a firmware is required */
1026 if (!rproc->firmware) {
1027 dev_err(dev, "%s: no firmware to load\n", __func__);
1028 ret = -EINVAL;
1029 goto unlock_mutex;
1030 }
1031
1032 /* prevent underlying implementation from being removed */
1033 if (!try_module_get(dev->driver->owner)) {
1034 dev_err(dev, "%s: can't get owner\n", __func__);
1035 ret = -EINVAL;
1036 goto unlock_mutex;
1037 }
1038
1039 /* skip the boot process if rproc is already powered up */
1040 if (atomic_inc_return(&rproc->power) > 1) {
1041 ret = 0;
1042 goto unlock_mutex;
1043 }
1044
1045 dev_info(dev, "powering up %s\n", rproc->name);
1046
1047 /* load firmware */
1048 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1049 if (ret < 0) {
1050 dev_err(dev, "request_firmware failed: %d\n", ret);
1051 goto downref_rproc;
1052 }
1053
1054 ret = rproc_fw_boot(rproc, firmware_p);
1055
1056 release_firmware(firmware_p);
1057
1058downref_rproc:
1059 if (ret) {
1060 module_put(dev->driver->owner);
1061 atomic_dec(&rproc->power);
1062 }
1063unlock_mutex:
1064 mutex_unlock(&rproc->lock);
1065 return ret;
1066}
1067EXPORT_SYMBOL(rproc_boot);
1068
1069/**
1070 * rproc_shutdown() - power off the remote processor
1071 * @rproc: the remote processor
1072 *
1073 * Power off a remote processor (previously booted with rproc_boot()).
1074 *
1075 * In case @rproc is still being used by an additional user(s), then
1076 * this function will just decrement the power refcount and exit,
1077 * without really powering off the device.
1078 *
1079 * Every call to rproc_boot() must (eventually) be accompanied by a call
1080 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
1081 *
1082 * Notes:
1083 * - we're not decrementing the rproc's refcount, only the power refcount.
1084 * which means that the @rproc handle stays valid even after rproc_shutdown()
1085 * returns, and users can still use it with a subsequent rproc_boot(), if
1086 * needed.
1087 * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly
1088 * because rproc_shutdown() _does not_ decrement the refcount of @rproc.
1089 * To decrement the refcount of @rproc, use rproc_put() (but _only_ if
1090 * you acquired @rproc using rproc_get_by_name()).
1091 */
1092void rproc_shutdown(struct rproc *rproc)
1093{
1094 struct device *dev = rproc->dev;
1095 int ret;
1096
1097 ret = mutex_lock_interruptible(&rproc->lock);
1098 if (ret) {
1099 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1100 return;
1101 }
1102
1103 /* if the remote proc is still needed, bail out */
1104 if (!atomic_dec_and_test(&rproc->power))
1105 goto out;
1106
1107 /* power off the remote processor */
1108 ret = rproc->ops->stop(rproc);
1109 if (ret) {
1110 atomic_inc(&rproc->power);
1111 dev_err(dev, "can't stop rproc: %d\n", ret);
1112 goto out;
1113 }
1114
1115 /* clean up all acquired resources */
1116 rproc_resource_cleanup(rproc);
1117
1118 rproc_disable_iommu(rproc);
1119
1120 rproc->state = RPROC_OFFLINE;
1121
1122 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1123
1124out:
1125 mutex_unlock(&rproc->lock);
1126 if (!ret)
1127 module_put(dev->driver->owner);
1128}
1129EXPORT_SYMBOL(rproc_shutdown);
1130
1131/**
1132 * rproc_release() - completely deletes the existence of a remote processor
1133 * @kref: the rproc's kref
1134 *
1135 * This function should _never_ be called directly.
1136 *
1137 * The only reasonable location to use it is as an argument when kref_put'ing
1138 * @rproc's refcount.
1139 *
1140 * This way it will be called when no one holds a valid pointer to this @rproc
1141 * anymore (and obviously after it is removed from the rprocs klist).
1142 *
1143 * Note: this function is not static because rproc_vdev_release() needs it when
1144 * it decrements @rproc's refcount.
1145 */
1146void rproc_release(struct kref *kref)
1147{
1148 struct rproc *rproc = container_of(kref, struct rproc, refcount);
1149
1150 dev_info(rproc->dev, "removing %s\n", rproc->name);
1151
1152 rproc_delete_debug_dir(rproc);
1153
1154 /* at this point no one holds a reference to rproc anymore */
1155 kfree(rproc);
1156}
1157
1158/* will be called when an rproc is added to the rprocs klist */
1159static void klist_rproc_get(struct klist_node *n)
1160{
1161 struct rproc *rproc = container_of(n, struct rproc, node);
1162
1163 kref_get(&rproc->refcount);
1164}
1165
1166/* will be called when an rproc is removed from the rprocs klist */
1167static void klist_rproc_put(struct klist_node *n)
1168{
1169 struct rproc *rproc = container_of(n, struct rproc, node);
1170
1171 kref_put(&rproc->refcount, rproc_release);
1172}
1173
1174static struct rproc *next_rproc(struct klist_iter *i)
1175{
1176 struct klist_node *n;
1177
1178 n = klist_next(i);
1179 if (!n)
1180 return NULL;
1181
1182 return container_of(n, struct rproc, node);
1183}
1184
1185/**
1186 * rproc_get_by_name() - find a remote processor by name and boot it
1187 * @name: name of the remote processor
1188 *
1189 * Finds an rproc handle using the remote processor's name, and then
1190 * boot it. If it's already powered on, then just immediately return
1191 * (successfully).
1192 *
1193 * Returns the rproc handle on success, and NULL on failure.
1194 *
1195 * This function increments the remote processor's refcount, so always
1196 * use rproc_put() to decrement it back once rproc isn't needed anymore.
1197 *
1198 * Note: currently this function (and its counterpart rproc_put()) are not
1199 * used anymore by the rpmsg subsystem. We need to scrutinize the use cases
1200 * that still need them, and see if we can migrate them to use the non
1201 * name-based boot/shutdown interface.
1202 */
1203struct rproc *rproc_get_by_name(const char *name)
1204{
1205 struct rproc *rproc;
1206 struct klist_iter i;
1207 int ret;
1208
1209 /* find the remote processor, and upref its refcount */
1210 klist_iter_init(&rprocs, &i);
1211 while ((rproc = next_rproc(&i)) != NULL)
1212 if (!strcmp(rproc->name, name)) {
1213 kref_get(&rproc->refcount);
1214 break;
1215 }
1216 klist_iter_exit(&i);
1217
1218 /* can't find this rproc ? */
1219 if (!rproc) {
1220 pr_err("can't find remote processor %s\n", name);
1221 return NULL;
1222 }
1223
1224 ret = rproc_boot(rproc);
1225 if (ret < 0) {
1226 kref_put(&rproc->refcount, rproc_release);
1227 return NULL;
1228 }
1229
1230 return rproc;
1231}
1232EXPORT_SYMBOL(rproc_get_by_name);
1233
1234/**
1235 * rproc_put() - decrement the refcount of a remote processor, and shut it down
1236 * @rproc: the remote processor
1237 *
1238 * This function tries to shutdown @rproc, and it then decrements its
1239 * refcount.
1240 *
1241 * After this function returns, @rproc may _not_ be used anymore, and its
1242 * handle should be considered invalid.
1243 *
1244 * This function should be called _iff_ the @rproc handle was grabbed by
1245 * calling rproc_get_by_name().
1246 */
1247void rproc_put(struct rproc *rproc)
1248{
1249 /* try to power off the remote processor */
1250 rproc_shutdown(rproc);
1251
1252 /* downref rproc's refcount */
1253 kref_put(&rproc->refcount, rproc_release);
1254}
1255EXPORT_SYMBOL(rproc_put);
1256
1257/**
1258 * rproc_register() - register a remote processor
1259 * @rproc: the remote processor handle to register
1260 *
1261 * Registers @rproc with the remoteproc framework, after it has been
1262 * allocated with rproc_alloc().
1263 *
1264 * This is called by the platform-specific rproc implementation, whenever
1265 * a new remote processor device is probed.
1266 *
1267 * Returns 0 on success and an appropriate error code otherwise.
1268 *
1269 * Note: this function initiates an asynchronous firmware loading
1270 * context, which will look for virtio devices supported by the rproc's
1271 * firmware.
1272 *
1273 * If found, those virtio devices will be created and added, so as a result
1274 * of registering this remote processor, additional virtio drivers will be
1275 * probed.
1276 *
1277 * Currently, though, we only support a single RPMSG virtio vdev per remote
1278 * processor.
1279 */
1280int rproc_register(struct rproc *rproc)
1281{
1282 struct device *dev = rproc->dev;
1283 int ret = 0;
1284
1285 /* expose to rproc_get_by_name users */
1286 klist_add_tail(&rproc->node, &rprocs);
1287
1288 dev_info(rproc->dev, "%s is available\n", rproc->name);
1289
Ohad Ben-Cohen489d1292011-12-21 11:25:43 +02001290 dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
1291 dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");
1292
Ohad Ben-Cohen400e64d2011-10-20 16:52:46 +02001293 /* create debugfs entries */
1294 rproc_create_debug_dir(rproc);
1295
1296 /* rproc_unregister() calls must wait until async loader completes */
1297 init_completion(&rproc->firmware_loading_complete);
1298
1299 /*
1300 * We must retrieve early virtio configuration info from
1301 * the firmware (e.g. whether to register a virtio rpmsg device,
1302 * what virtio features does it support, ...).
1303 *
1304 * We're initiating an asynchronous firmware loading, so we can
1305 * be built-in kernel code, without hanging the boot process.
1306 */
1307 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1308 rproc->firmware, dev, GFP_KERNEL,
1309 rproc, rproc_fw_config_virtio);
1310 if (ret < 0) {
1311 dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
1312 complete_all(&rproc->firmware_loading_complete);
1313 klist_remove(&rproc->node);
1314 }
1315
1316 return ret;
1317}
1318EXPORT_SYMBOL(rproc_register);
1319
1320/**
1321 * rproc_alloc() - allocate a remote processor handle
1322 * @dev: the underlying device
1323 * @name: name of this remote processor
1324 * @ops: platform-specific handlers (mainly start/stop)
1325 * @firmware: name of firmware file to load
1326 * @len: length of private data needed by the rproc driver (in bytes)
1327 *
1328 * Allocates a new remote processor handle, but does not register
1329 * it yet.
1330 *
1331 * This function should be used by rproc implementations during initialization
1332 * of the remote processor.
1333 *
1334 * After creating an rproc handle using this function, and when ready,
1335 * implementations should then call rproc_register() to complete
1336 * the registration of the remote processor.
1337 *
1338 * On success the new rproc is returned, and on failure, NULL.
1339 *
1340 * Note: _never_ directly deallocate @rproc, even if it was not registered
1341 * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free().
1342 */
1343struct rproc *rproc_alloc(struct device *dev, const char *name,
1344 const struct rproc_ops *ops,
1345 const char *firmware, int len)
1346{
1347 struct rproc *rproc;
1348
1349 if (!dev || !name || !ops)
1350 return NULL;
1351
1352 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
1353 if (!rproc) {
1354 dev_err(dev, "%s: kzalloc failed\n", __func__);
1355 return NULL;
1356 }
1357
1358 rproc->dev = dev;
1359 rproc->name = name;
1360 rproc->ops = ops;
1361 rproc->firmware = firmware;
1362 rproc->priv = &rproc[1];
1363
1364 atomic_set(&rproc->power, 0);
1365
1366 kref_init(&rproc->refcount);
1367
1368 mutex_init(&rproc->lock);
1369
1370 INIT_LIST_HEAD(&rproc->carveouts);
1371 INIT_LIST_HEAD(&rproc->mappings);
1372 INIT_LIST_HEAD(&rproc->traces);
1373
1374 rproc->state = RPROC_OFFLINE;
1375
1376 return rproc;
1377}
1378EXPORT_SYMBOL(rproc_alloc);
1379
1380/**
1381 * rproc_free() - free an rproc handle that was allocated by rproc_alloc
1382 * @rproc: the remote processor handle
1383 *
1384 * This function should _only_ be used if @rproc was only allocated,
1385 * but not registered yet.
1386 *
1387 * If @rproc was already successfully registered (by calling rproc_register()),
1388 * then use rproc_unregister() instead.
1389 */
1390void rproc_free(struct rproc *rproc)
1391{
1392 kfree(rproc);
1393}
1394EXPORT_SYMBOL(rproc_free);
1395
1396/**
1397 * rproc_unregister() - unregister a remote processor
1398 * @rproc: rproc handle to unregister
1399 *
1400 * Unregisters a remote processor, and decrements its refcount.
1401 * If its refcount drops to zero, then @rproc will be freed. If not,
1402 * it will be freed later once the last reference is dropped.
1403 *
1404 * This function should be called when the platform specific rproc
1405 * implementation decides to remove the rproc device. it should
1406 * _only_ be called if a previous invocation of rproc_register()
1407 * has completed successfully.
1408 *
1409 * After rproc_unregister() returns, @rproc is _not_ valid anymore and
1410 * it shouldn't be used. More specifically, don't call rproc_free()
1411 * or try to directly free @rproc after rproc_unregister() returns;
1412 * none of these are needed, and calling them is a bug.
1413 *
1414 * Returns 0 on success and -EINVAL if @rproc isn't valid.
1415 */
1416int rproc_unregister(struct rproc *rproc)
1417{
1418 if (!rproc)
1419 return -EINVAL;
1420
1421 /* if rproc is just being registered, wait */
1422 wait_for_completion(&rproc->firmware_loading_complete);
1423
1424 /* was an rpmsg vdev created ? */
1425 if (rproc->rvdev)
1426 rproc_remove_rpmsg_vdev(rproc);
1427
1428 klist_remove(&rproc->node);
1429
1430 kref_put(&rproc->refcount, rproc_release);
1431
1432 return 0;
1433}
1434EXPORT_SYMBOL(rproc_unregister);
1435
1436static int __init remoteproc_init(void)
1437{
1438 rproc_init_debugfs();
1439 return 0;
1440}
1441module_init(remoteproc_init);
1442
1443static void __exit remoteproc_exit(void)
1444{
1445 rproc_exit_debugfs();
1446}
1447module_exit(remoteproc_exit);
1448
1449MODULE_LICENSE("GPL v2");
1450MODULE_DESCRIPTION("Generic Remote Processor Framework");