blob: d96b7558605c9194e11a5e6f2a0309f2e63f2502 [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott3180a5f2012-08-03 17:31:03 -07005 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Laura Abbottcaafeea2011-12-13 11:43:10 -080029#include <linux/fmem.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080030#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070031#include <linux/dma-mapping.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032
33#include <asm/mach/map.h>
34
Olav Haugan0a852512012-01-09 10:20:55 -080035#include <mach/msm_memtypes.h>
36#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080037#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070038
39#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080040
41#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070042#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080043
Laura Abbott7e446482012-06-13 15:59:39 -070044#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080045/**
46 * struct ion_cp_heap - container for the heap and shared heap data
47
48 * @heap: the heap information structure
49 * @pool: memory pool to allocate from.
50 * @base: the base address of the memory pool.
51 * @permission_type: Identifier for the memory used by SCM for protecting
52 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080053 * @secure_base: Base address used when securing a heap that is shared.
54 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080055 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080056 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080057 * @allocated_bytes: the total number of allocated bytes from the pool.
58 * @total_size: the total size of the memory pool.
59 * @request_region: function pointer to call when first mapping of memory
60 * occurs.
61 * @release_region: function pointer to call when last mapping of memory
62 * unmapped.
63 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080064 * @kmap_cached_count: the total number of times this heap has been mapped in
65 * kernel space (cached).
66 * @kmap_uncached_count:the total number of times this heap has been mapped in
67 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080068 * @umap_count: the total number of times this heap has been mapped in
69 * user space.
Olav Haugan8726caf2012-05-10 15:11:35 -070070 * @iommu_iova: saved iova when mapping full heap at once.
71 * @iommu_partition: partition used to map full heap.
Laura Abbottcaafeea2011-12-13 11:43:10 -080072 * @reusable: indicates if the memory should be reused via fmem.
73 * @reserved_vrange: reserved virtual address range for use with fmem
Olav Haugan8726caf2012-05-10 15:11:35 -070074 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
75 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
Olav Haugan85c95402012-05-30 17:32:37 -070076 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
77*/
Olav Haugan0a852512012-01-09 10:20:55 -080078struct ion_cp_heap {
79 struct ion_heap heap;
80 struct gen_pool *pool;
81 ion_phys_addr_t base;
82 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080083 ion_phys_addr_t secure_base;
84 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080085 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080086 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080087 unsigned long allocated_bytes;
88 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070089 int (*heap_request_region)(void *);
90 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080091 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080092 unsigned long kmap_cached_count;
93 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080094 unsigned long umap_count;
Olav Haugan8726caf2012-05-10 15:11:35 -070095 unsigned long iommu_iova[MAX_DOMAINS];
96 unsigned long iommu_partition[MAX_DOMAINS];
Laura Abbottcaafeea2011-12-13 11:43:10 -080097 int reusable;
98 void *reserved_vrange;
Olav Haugan8726caf2012-05-10 15:11:35 -070099 int iommu_map_all;
100 int iommu_2x_map_domain;
Olav Haugan85c95402012-05-30 17:32:37 -0700101 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -0700102 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700103 void *cpu_addr;
104 size_t heap_size;
105 dma_addr_t handle;
106 int cma;
Laura Abbottac963312012-12-11 15:09:03 -0800107 int disallow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -0800108};
109
110enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800111 HEAP_NOT_PROTECTED = 0,
112 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800113};
114
Laura Abbott60ae9372012-10-10 16:28:59 -0700115struct ion_cp_buffer {
116 phys_addr_t buffer;
117 atomic_t secure_cnt;
118 int is_secure;
Laura Abbott93619302012-10-11 11:51:40 -0700119 int want_delayed_unsecure;
Laura Abbott60ae9372012-10-10 16:28:59 -0700120 /*
121 * Currently all user/kernel mapping is protected by the heap lock.
122 * This is sufficient to protect the map count as well. The lock
123 * should be used to protect map_cnt if the whole heap lock is
124 * ever removed.
125 */
126 atomic_t map_cnt;
127 /*
128 * protects secure_cnt for securing.
129 */
130 struct mutex lock;
Laura Abbott93619302012-10-11 11:51:40 -0700131 int version;
132 void *data;
Laura Abbott60ae9372012-10-10 16:28:59 -0700133};
134
Laura Abbott3180a5f2012-08-03 17:31:03 -0700135#define DMA_ALLOC_TRIES 5
136
Olav Haugan0a852512012-01-09 10:20:55 -0800137static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
Laura Abbott7e446482012-06-13 15:59:39 -0700138 unsigned int permission_type, int version,
139 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800140
141static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
Laura Abbott7e446482012-06-13 15:59:39 -0700142 unsigned int permission_type, int version,
143 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800144
Laura Abbott3180a5f2012-08-03 17:31:03 -0700145static int allocate_heap_memory(struct ion_heap *heap)
146{
147 struct device *dev = heap->priv;
148 struct ion_cp_heap *cp_heap =
149 container_of(heap, struct ion_cp_heap, heap);
150 int ret;
151 int tries = 0;
152 DEFINE_DMA_ATTRS(attrs);
153 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
154
155
156 if (cp_heap->cpu_addr)
157 return 0;
158
159 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
160 cp_heap->cpu_addr = dma_alloc_attrs(dev,
161 cp_heap->heap_size,
162 &(cp_heap->handle),
163 0,
164 &attrs);
165 if (!cp_heap->cpu_addr)
166 msleep(20);
167 }
168
169 if (!cp_heap->cpu_addr)
170 goto out;
171
172 cp_heap->base = cp_heap->handle;
173
174 cp_heap->pool = gen_pool_create(12, -1);
175 if (!cp_heap->pool)
176 goto out_free;
177
178 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
179 cp_heap->heap_size, -1);
180 if (ret < 0)
181 goto out_pool;
182
183 return 0;
184
185out_pool:
186 gen_pool_destroy(cp_heap->pool);
187out_free:
188 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
189 cp_heap->handle);
190out:
191 return ION_CP_ALLOCATE_FAIL;
192}
193
194static void free_heap_memory(struct ion_heap *heap)
195{
196 struct device *dev = heap->priv;
197 struct ion_cp_heap *cp_heap =
198 container_of(heap, struct ion_cp_heap, heap);
199
200 /* release memory */
201 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
202 cp_heap->handle);
203 gen_pool_destroy(cp_heap->pool);
204 cp_heap->pool = NULL;
205 cp_heap->cpu_addr = 0;
206}
207
208
209
Olav Haugan2a5404b2012-02-01 17:51:30 -0800210/**
211 * Get the total number of kernel mappings.
212 * Must be called with heap->lock locked.
213 */
214static unsigned long ion_cp_get_total_kmap_count(
215 const struct ion_cp_heap *cp_heap)
216{
217 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
218}
Olav Haugan0a852512012-01-09 10:20:55 -0800219
Laura Abbott94ad25e2012-08-03 13:56:21 -0700220static int ion_on_first_alloc(struct ion_heap *heap)
221{
222 struct ion_cp_heap *cp_heap =
223 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700224 int ret_value;
225
226 if (cp_heap->reusable) {
227 ret_value = fmem_set_state(FMEM_C_STATE);
228 if (ret_value)
229 return 1;
230 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700231
232 if (cp_heap->cma) {
233 ret_value = allocate_heap_memory(heap);
234 if (ret_value)
235 return 1;
236 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700237 return 0;
238}
239
240static void ion_on_last_free(struct ion_heap *heap)
241{
242 struct ion_cp_heap *cp_heap =
243 container_of(heap, struct ion_cp_heap, heap);
244
245 if (cp_heap->reusable)
246 if (fmem_set_state(FMEM_T_STATE) != 0)
247 pr_err("%s: unable to transition heap to T-state\n",
248 __func__);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700249
250 if (cp_heap->cma)
251 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700252}
253
Laura Abbott93619302012-10-11 11:51:40 -0700254/* Must be protected by ion_cp_buffer lock */
255static int __ion_cp_protect_buffer(struct ion_buffer *buffer, int version,
256 void *data, int flags)
257{
258 struct ion_cp_buffer *buf = buffer->priv_virt;
259 int ret_value = 0;
260
261 if (atomic_inc_return(&buf->secure_cnt) == 1) {
262 ret_value = ion_cp_protect_mem(buf->buffer,
263 buffer->size, 0,
264 version, data);
265
266 if (ret_value) {
267 pr_err("Failed to secure buffer %p, error %d\n",
268 buffer, ret_value);
269 atomic_dec(&buf->secure_cnt);
270 } else {
271 pr_debug("Protected buffer %p from %x-%x\n",
272 buffer, buf->buffer,
273 buf->buffer + buffer->size);
274 buf->want_delayed_unsecure |=
275 flags & ION_UNSECURE_DELAYED ? 1 : 0;
276 buf->data = data;
277 buf->version = version;
278 }
279 }
280 pr_debug("buffer %p protect count %d\n", buffer,
281 atomic_read(&buf->secure_cnt));
282 BUG_ON(atomic_read(&buf->secure_cnt) < 0);
283 return ret_value;
284}
285
286/* Must be protected by ion_cp_buffer lock */
287static int __ion_cp_unprotect_buffer(struct ion_buffer *buffer, int version,
288 void *data, int force_unsecure)
289{
290 struct ion_cp_buffer *buf = buffer->priv_virt;
291 int ret_value = 0;
292
293 if (force_unsecure) {
294 if (!buf->is_secure || atomic_read(&buf->secure_cnt) == 0)
295 return 0;
296
297 if (atomic_read(&buf->secure_cnt) != 1) {
298 WARN(1, "Forcing unsecure of buffer with outstanding secure count %d!\n",
299 atomic_read(&buf->secure_cnt));
300 atomic_set(&buf->secure_cnt, 1);
301 }
302 }
303
304 if (atomic_dec_and_test(&buf->secure_cnt)) {
305 ret_value = ion_cp_unprotect_mem(
306 buf->buffer, buffer->size,
307 0, version, data);
308
309 if (ret_value) {
310 pr_err("Failed to unsecure buffer %p, error %d\n",
311 buffer, ret_value);
312 /*
313 * If the force unsecure is happening, the buffer
314 * is being destroyed. We failed to unsecure the
315 * buffer even though the memory is given back.
316 * Just die now rather than discovering later what
317 * happens when trying to use the secured memory as
318 * unsecured...
319 */
320 BUG_ON(force_unsecure);
321 /* Bump the count back up one to try again later */
322 atomic_inc(&buf->secure_cnt);
323 } else {
324 buf->version = -1;
325 buf->data = NULL;
326 }
327 }
328 pr_debug("buffer %p unprotect count %d\n", buffer,
329 atomic_read(&buf->secure_cnt));
330 BUG_ON(atomic_read(&buf->secure_cnt) < 0);
331 return ret_value;
332}
333
334int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
335 int flags)
336{
337 int ret_value;
338 struct ion_cp_buffer *buf = buffer->priv_virt;
339
340 mutex_lock(&buf->lock);
341 if (!buf->is_secure) {
342 pr_err("%s: buffer %p was not allocated as secure\n",
343 __func__, buffer);
344 ret_value = -EINVAL;
345 goto out_unlock;
346 }
347
348 if (ION_IS_CACHED(buffer->flags)) {
349 pr_err("%s: buffer %p was allocated as cached\n",
350 __func__, buffer);
351 ret_value = -EINVAL;
352 goto out_unlock;
353 }
354
355 if (atomic_read(&buf->map_cnt)) {
356 pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d",
357 __func__, buffer, atomic_read(&buf->map_cnt));
358 ret_value = -EINVAL;
359 goto out_unlock;
360 }
361
362 if (atomic_read(&buf->secure_cnt)) {
363 if (buf->version != version || buf->data != data) {
364 pr_err("%s: Trying to re-secure buffer with different values",
365 __func__);
366 pr_err("Last secured version: %d Currrent %d\n",
367 buf->version, version);
368 pr_err("Last secured data: %p current %p\n",
369 buf->data, data);
370 ret_value = -EINVAL;
371 goto out_unlock;
372 }
373 }
374 ret_value = __ion_cp_protect_buffer(buffer, version, data, flags);
375
376out_unlock:
377 mutex_unlock(&buf->lock);
378 return ret_value;
379}
380
381int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure)
382{
383 int ret_value = 0;
384 struct ion_cp_buffer *buf = buffer->priv_virt;
385
386 mutex_lock(&buf->lock);
387 ret_value = __ion_cp_unprotect_buffer(buffer, buf->version, buf->data,
388 force_unsecure);
389 mutex_unlock(&buf->lock);
390 return ret_value;
391}
392
Olav Haugan0a852512012-01-09 10:20:55 -0800393/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800394 * Protects memory if heap is unsecured heap. Also ensures that we are in
395 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800396 * Must be called with heap->lock locked.
397 */
Laura Abbott7e446482012-06-13 15:59:39 -0700398static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800399{
400 struct ion_cp_heap *cp_heap =
401 container_of(heap, struct ion_cp_heap, heap);
402 int ret_value = 0;
403
Laura Abbottf68983e2012-06-13 16:23:23 -0700404 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800405 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700406 if (!cp_heap->allocated_bytes)
407 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800408 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800409
Olav Hauganea66e7a2012-01-23 17:30:27 -0800410 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700411 cp_heap->secure_size, cp_heap->permission_type,
412 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800413 if (ret_value) {
414 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800415 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800416
Laura Abbott94ad25e2012-08-03 13:56:21 -0700417 if (!cp_heap->allocated_bytes)
418 ion_on_last_free(heap);
419
Laura Abbottf68983e2012-06-13 16:23:23 -0700420 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800421 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800422 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800423 pr_debug("Protected heap %s @ 0x%lx\n",
424 heap->name, cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800425 }
426 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800427out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700428 pr_debug("%s: protect count is %d\n", __func__,
429 atomic_read(&cp_heap->protect_cnt));
430 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800431 return ret_value;
432}
433
434/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800435 * Unprotects memory if heap is secure heap. Also ensures that we are in
436 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800437 * Must be called with heap->lock locked.
438 */
Laura Abbott7e446482012-06-13 15:59:39 -0700439static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800440{
441 struct ion_cp_heap *cp_heap =
442 container_of(heap, struct ion_cp_heap, heap);
443
Laura Abbottf68983e2012-06-13 16:23:23 -0700444 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800445 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800446 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700447 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800448 if (error_code) {
449 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800450 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800451 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800452 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800453 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800454 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800455
Laura Abbott94ad25e2012-08-03 13:56:21 -0700456 if (!cp_heap->allocated_bytes)
457 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800458 }
459 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700460 pr_debug("%s: protect count is %d\n", __func__,
461 atomic_read(&cp_heap->protect_cnt));
462 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800463}
464
465ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
466 unsigned long size,
467 unsigned long align,
468 unsigned long flags)
469{
470 unsigned long offset;
471 unsigned long secure_allocation = flags & ION_SECURE;
472
473 struct ion_cp_heap *cp_heap =
474 container_of(heap, struct ion_cp_heap, heap);
475
476 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800477 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800478 mutex_unlock(&cp_heap->lock);
479 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800480 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800481 return ION_CP_ALLOCATE_FAIL;
482 }
483
Laura Abbottac963312012-12-11 15:09:03 -0800484 if (!secure_allocation && cp_heap->disallow_non_secure_allocation) {
485 mutex_unlock(&cp_heap->lock);
486 pr_debug("%s: non-secure allocation disallowed from this heap\n",
487 __func__);
488 return ION_CP_ALLOCATE_FAIL;
489 }
490
Laura Abbott087db592012-11-01 09:41:37 -0700491 /*
492 * The check above already checked for non-secure allocations when the
493 * heap is protected. HEAP_PROTECTED implies that this must be a secure
494 * allocation. If the heap is protected and there are userspace or
495 * cached kernel mappings, something has gone wrong in the security
496 * model.
497 */
498 if (cp_heap->heap_protected == HEAP_PROTECTED) {
499 BUG_ON(cp_heap->umap_count != 0);
500 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800501 }
502
Laura Abbottcaafeea2011-12-13 11:43:10 -0800503 /*
504 * if this is the first reusable allocation, transition
505 * the heap
506 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700507 if (!cp_heap->allocated_bytes)
508 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800509 mutex_unlock(&cp_heap->lock);
510 return ION_RESERVED_ALLOCATE_FAIL;
511 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800512
Olav Haugan0a852512012-01-09 10:20:55 -0800513 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800514 mutex_unlock(&cp_heap->lock);
515
516 offset = gen_pool_alloc_aligned(cp_heap->pool,
517 size, ilog2(align));
518
519 if (!offset) {
520 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700521 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800522 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700523 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800524 pr_debug("%s: heap %s has enough memory (%lx) but"
525 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800526 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800527 __func__, heap->name,
528 cp_heap->total_size -
529 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700530 if (!cp_heap->allocated_bytes &&
531 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
532 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800533 mutex_unlock(&cp_heap->lock);
534
535 return ION_CP_ALLOCATE_FAIL;
536 }
537
538 return offset;
539}
540
Olav Haugan8726caf2012-05-10 15:11:35 -0700541static void iommu_unmap_all(unsigned long domain_num,
542 struct ion_cp_heap *cp_heap)
543{
544 unsigned long left_to_unmap = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700545 unsigned long page_size = SZ_64K;
546
547 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
548 if (domain) {
549 unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
550
551 while (left_to_unmap) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700552 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700553 temp_iova += page_size;
554 left_to_unmap -= page_size;
555 }
556 if (domain_num == cp_heap->iommu_2x_map_domain)
557 msm_iommu_unmap_extra(domain, temp_iova,
558 cp_heap->total_size, SZ_64K);
559 } else {
560 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
561 }
562}
563
Olav Haugan0a852512012-01-09 10:20:55 -0800564void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
565 unsigned long size)
566{
567 struct ion_cp_heap *cp_heap =
568 container_of(heap, struct ion_cp_heap, heap);
569
570 if (addr == ION_CP_ALLOCATE_FAIL)
571 return;
572 gen_pool_free(cp_heap->pool, addr, size);
573
574 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800575 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800576
Laura Abbott94ad25e2012-08-03 13:56:21 -0700577 if (!cp_heap->allocated_bytes &&
578 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
579 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700580
581 /* Unmap everything if we previously mapped the whole heap at once. */
582 if (!cp_heap->allocated_bytes) {
583 unsigned int i;
584 for (i = 0; i < MAX_DOMAINS; ++i) {
585 if (cp_heap->iommu_iova[i]) {
586 unsigned long vaddr_len = cp_heap->total_size;
587
588 if (i == cp_heap->iommu_2x_map_domain)
589 vaddr_len <<= 1;
590 iommu_unmap_all(i, cp_heap);
591
592 msm_free_iova_address(cp_heap->iommu_iova[i], i,
593 cp_heap->iommu_partition[i],
594 vaddr_len);
595 }
596 cp_heap->iommu_iova[i] = 0;
597 cp_heap->iommu_partition[i] = 0;
598 }
599 }
Olav Haugan0a852512012-01-09 10:20:55 -0800600 mutex_unlock(&cp_heap->lock);
601}
602
603static int ion_cp_heap_phys(struct ion_heap *heap,
604 struct ion_buffer *buffer,
605 ion_phys_addr_t *addr, size_t *len)
606{
Laura Abbott60ae9372012-10-10 16:28:59 -0700607 struct ion_cp_buffer *buf = buffer->priv_virt;
608
609 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800610 *len = buffer->size;
611 return 0;
612}
613
614static int ion_cp_heap_allocate(struct ion_heap *heap,
615 struct ion_buffer *buffer,
616 unsigned long size, unsigned long align,
617 unsigned long flags)
618{
Laura Abbott60ae9372012-10-10 16:28:59 -0700619 struct ion_cp_buffer *buf;
620 phys_addr_t addr;
621
622 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
623 if (!buf)
624 return ION_CP_ALLOCATE_FAIL;
625
626 addr = ion_cp_allocate(heap, size, align, flags);
627 if (addr == ION_CP_ALLOCATE_FAIL)
628 return -ENOMEM;
629
630 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700631 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700632 atomic_set(&buf->secure_cnt, 0);
633 mutex_init(&buf->lock);
634 buf->is_secure = flags & ION_SECURE ? 1 : 0;
635 buffer->priv_virt = buf;
636
637 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800638}
639
640static void ion_cp_heap_free(struct ion_buffer *buffer)
641{
642 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700643 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800644
Laura Abbott60ae9372012-10-10 16:28:59 -0700645 ion_cp_free(heap, buf->buffer, buffer->size);
646 WARN_ON(atomic_read(&buf->secure_cnt));
647 WARN_ON(atomic_read(&buf->map_cnt));
648 kfree(buf);
649
650 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800651}
652
Laura Abbottb14ed962012-01-30 14:18:08 -0800653struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800654{
Laura Abbottb14ed962012-01-30 14:18:08 -0800655 struct sg_table *table;
656 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700657 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800658
Laura Abbottb14ed962012-01-30 14:18:08 -0800659 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
660 if (!table)
Olav Haugan0a852512012-01-09 10:20:55 -0800661 return ERR_PTR(-ENOMEM);
662
Laura Abbottc3824d72012-11-02 09:57:19 -0700663 if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) {
Laura Abbotte84d77e2012-10-10 16:59:46 -0700664 int n_chunks;
665 int i;
666 struct scatterlist *sg;
Olav Haugan0a852512012-01-09 10:20:55 -0800667
Laura Abbotte84d77e2012-10-10 16:59:46 -0700668 /* Count number of 1MB chunks. Alignment is already checked. */
669 n_chunks = buffer->size >> 20;
670
671 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
672 if (ret)
673 goto err0;
674
675 for_each_sg(table->sgl, sg, table->nents, i) {
676 sg_dma_address(sg) = buf->buffer + i * SZ_1M;
677 sg->length = SZ_1M;
678 sg->offset = 0;
679 }
680 } else {
681 ret = sg_alloc_table(table, 1, GFP_KERNEL);
682 if (ret)
683 goto err0;
684
685 table->sgl->length = buffer->size;
686 table->sgl->offset = 0;
687 table->sgl->dma_address = buf->buffer;
688 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800689
690 return table;
691err0:
692 kfree(table);
693 return ERR_PTR(ret);
Olav Haugan0a852512012-01-09 10:20:55 -0800694}
695
Laura Abbottb14ed962012-01-30 14:18:08 -0800696struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700697 struct ion_buffer *buffer)
698{
Laura Abbottb14ed962012-01-30 14:18:08 -0800699 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700700}
701
Olav Haugan0a852512012-01-09 10:20:55 -0800702void ion_cp_heap_unmap_dma(struct ion_heap *heap,
703 struct ion_buffer *buffer)
704{
Laura Abbottb14ed962012-01-30 14:18:08 -0800705 if (buffer->sg_table)
706 sg_free_table(buffer->sg_table);
707 kfree(buffer->sg_table);
708 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800709}
710
711/**
712 * Call request region for SMI memory of this is the first mapping.
713 */
714static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
715{
716 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800717 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700718 if (cp_heap->heap_request_region)
719 ret_value = cp_heap->heap_request_region(
720 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800721 return ret_value;
722}
723
724/**
725 * Call release region for SMI memory of this is the last un-mapping.
726 */
727static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
728{
729 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800730 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700731 if (cp_heap->heap_release_region)
732 ret_value = cp_heap->heap_release_region(
733 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800734 return ret_value;
735}
736
Laura Abbottcaafeea2011-12-13 11:43:10 -0800737void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
738 void *virt_base, unsigned long flags)
739{
740 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700741 struct ion_cp_buffer *buf = buffer->priv_virt;
742 unsigned int offset = buf->buffer - phys_base;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800743 unsigned long start = ((unsigned long)virt_base) + offset;
744 const struct mem_type *type = ION_IS_CACHED(flags) ?
745 get_mem_type(MT_DEVICE_CACHED) :
746 get_mem_type(MT_DEVICE);
747
Laura Abbott60ae9372012-10-10 16:28:59 -0700748 if (phys_base > buf->buffer)
Laura Abbottcaafeea2011-12-13 11:43:10 -0800749 return NULL;
750
751
Laura Abbott60ae9372012-10-10 16:28:59 -0700752 ret = ioremap_pages(start, buf->buffer, buffer->size, type);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800753
754 if (!ret)
755 return (void *)start;
756 else
757 return NULL;
758}
759
Laura Abbottb14ed962012-01-30 14:18:08 -0800760void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800761{
762 struct ion_cp_heap *cp_heap =
763 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800764 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700765 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800766
767 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800768 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
769 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800770 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800771
Olav Hauganea66e7a2012-01-23 17:30:27 -0800772 if (ion_cp_request_region(cp_heap)) {
773 mutex_unlock(&cp_heap->lock);
774 return NULL;
775 }
Olav Haugan0a852512012-01-09 10:20:55 -0800776
Laura Abbottcaafeea2011-12-13 11:43:10 -0800777 if (cp_heap->reusable) {
778 ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
Laura Abbottb14ed962012-01-30 14:18:08 -0800779 cp_heap->reserved_vrange, buffer->flags);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700780 } else if (cp_heap->cma) {
781 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
782 struct page **pages = vmalloc(
783 sizeof(struct page *) * npages);
784 int i;
785 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800786
Laura Abbott3180a5f2012-08-03 17:31:03 -0700787 if (ION_IS_CACHED(buffer->flags))
788 pgprot = PAGE_KERNEL;
789 else
790 pgprot = pgprot_writecombine(PAGE_KERNEL);
791
792 for (i = 0; i < npages; i++) {
793 pages[i] = phys_to_page(buf->buffer +
794 i * PAGE_SIZE);
795 }
796 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
797 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800798 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800799 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700800 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800801 buffer->size);
802 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700803 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800804 buffer->size);
805 }
Olav Haugan0a852512012-01-09 10:20:55 -0800806
Olav Haugan2a5404b2012-02-01 17:51:30 -0800807 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800808 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800809 } else {
810 if (ION_IS_CACHED(buffer->flags))
811 ++cp_heap->kmap_cached_count;
812 else
813 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700814 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800815 }
Olav Haugan0a852512012-01-09 10:20:55 -0800816 }
817 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800818 return ret_value;
819}
820
821void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
822 struct ion_buffer *buffer)
823{
824 struct ion_cp_heap *cp_heap =
825 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700826 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800827
Laura Abbottcaafeea2011-12-13 11:43:10 -0800828 if (cp_heap->reusable)
829 unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700830 else if (cp_heap->cma)
831 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800832 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700833 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800834
Olav Haugan0a852512012-01-09 10:20:55 -0800835 buffer->vaddr = NULL;
836
837 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800838 if (ION_IS_CACHED(buffer->flags))
839 --cp_heap->kmap_cached_count;
840 else
841 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700842
843 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800844 ion_cp_release_region(cp_heap);
845 mutex_unlock(&cp_heap->lock);
846
847 return;
848}
849
850int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800851 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800852{
853 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800854 struct ion_cp_heap *cp_heap =
855 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700856 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800857
Olav Hauganea66e7a2012-01-23 17:30:27 -0800858 mutex_lock(&cp_heap->lock);
859 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800860 if (ion_cp_request_region(cp_heap)) {
861 mutex_unlock(&cp_heap->lock);
862 return -EINVAL;
863 }
Olav Haugan0a852512012-01-09 10:20:55 -0800864
Laura Abbottb14ed962012-01-30 14:18:08 -0800865 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800866 vma->vm_page_prot = pgprot_writecombine(
867 vma->vm_page_prot);
868
869 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700870 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800871 vma->vm_end - vma->vm_start,
872 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800873
Laura Abbott60ae9372012-10-10 16:28:59 -0700874 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800875 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700876 } else {
877 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800878 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700879 }
880
Olav Haugan0a852512012-01-09 10:20:55 -0800881 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800882 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800883 return ret_value;
884}
885
886void ion_cp_heap_unmap_user(struct ion_heap *heap,
887 struct ion_buffer *buffer)
888{
889 struct ion_cp_heap *cp_heap =
890 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700891 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800892
893 mutex_lock(&cp_heap->lock);
894 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700895 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800896 ion_cp_release_region(cp_heap);
897 mutex_unlock(&cp_heap->lock);
898}
899
900int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
901 void *vaddr, unsigned int offset, unsigned int length,
902 unsigned int cmd)
903{
Olav Haugan85c95402012-05-30 17:32:37 -0700904 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
905 struct ion_cp_heap *cp_heap =
906 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700907 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800908
909 switch (cmd) {
910 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700911 dmac_clean_range(vaddr, vaddr + length);
912 outer_cache_op = outer_clean_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800913 break;
914 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700915 dmac_inv_range(vaddr, vaddr + length);
916 outer_cache_op = outer_inv_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800917 break;
918 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700919 dmac_flush_range(vaddr, vaddr + length);
920 outer_cache_op = outer_flush_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800921 break;
922 default:
923 return -EINVAL;
924 }
925
Olav Haugan85c95402012-05-30 17:32:37 -0700926 if (cp_heap->has_outer_cache) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700927 unsigned long pstart = buf->buffer + offset;
Olav Haugan85c95402012-05-30 17:32:37 -0700928 outer_cache_op(pstart, pstart + length);
929 }
Olav Haugan0a852512012-01-09 10:20:55 -0800930 return 0;
931}
932
Olav Haugan0671b9a2012-05-25 11:58:56 -0700933static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
934 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800935{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800936 unsigned long total_alloc;
937 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800938 unsigned long umap_count;
939 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800940 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800941 struct ion_cp_heap *cp_heap =
942 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800943
944 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800945 total_alloc = cp_heap->allocated_bytes;
946 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800947 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800948 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800949 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800950 mutex_unlock(&cp_heap->lock);
951
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800952 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
953 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800954 seq_printf(s, "umapping count: %lx\n", umap_count);
955 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800956 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Laura Abbottcaafeea2011-12-13 11:43:10 -0800957 seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800958
Olav Haugan0671b9a2012-05-25 11:58:56 -0700959 if (mem_map) {
960 unsigned long base = cp_heap->base;
961 unsigned long size = cp_heap->total_size;
962 unsigned long end = base+size;
963 unsigned long last_end = base;
964 struct rb_node *n;
965
966 seq_printf(s, "\nMemory Map\n");
967 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
968 "client", "start address", "end address",
969 "size (hex)");
970
971 for (n = rb_first(mem_map); n; n = rb_next(n)) {
972 struct mem_map_data *data =
973 rb_entry(n, struct mem_map_data, node);
974 const char *client_name = "(null)";
975
976 if (last_end < data->addr) {
977 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
978 "FREE", last_end, data->addr-1,
979 data->addr-last_end,
980 data->addr-last_end);
981 }
982
983 if (data->client_name)
984 client_name = data->client_name;
985
986 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
987 client_name, data->addr,
988 data->addr_end,
989 data->size, data->size);
990 last_end = data->addr_end+1;
991 }
992 if (last_end < end) {
993 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
994 last_end, end-1, end-last_end, end-last_end);
995 }
996 }
997
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800998 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800999}
1000
Laura Abbott7e446482012-06-13 15:59:39 -07001001int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001002{
1003 int ret_value;
1004 struct ion_cp_heap *cp_heap =
1005 container_of(heap, struct ion_cp_heap, heap);
1006 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -08001007 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -07001008 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -08001009 } else {
1010 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -08001011 "User space: %lu, kernel space (cached): %lu\n",
1012 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -08001013 ret_value = -EINVAL;
1014 }
1015
Olav Haugan0a852512012-01-09 10:20:55 -08001016 mutex_unlock(&cp_heap->lock);
1017 return ret_value;
1018}
1019
Laura Abbott7e446482012-06-13 15:59:39 -07001020int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001021{
1022 int ret_value = 0;
1023 struct ion_cp_heap *cp_heap =
1024 container_of(heap, struct ion_cp_heap, heap);
1025 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -07001026 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001027 mutex_unlock(&cp_heap->lock);
1028 return ret_value;
1029}
1030
Olav Haugan8726caf2012-05-10 15:11:35 -07001031static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
Olav Haugan3450cae2012-05-14 11:36:38 -07001032 int partition, unsigned long prot)
Olav Haugan8726caf2012-05-10 15:11:35 -07001033{
1034 unsigned long left_to_map = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -07001035 unsigned long page_size = SZ_64K;
1036 int ret_value = 0;
1037 unsigned long virt_addr_len = cp_heap->total_size;
1038 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
1039
1040 /* If we are mapping into the video domain we need to map twice the
1041 * size of the heap to account for prefetch issue in video core.
1042 */
1043 if (domain_num == cp_heap->iommu_2x_map_domain)
1044 virt_addr_len <<= 1;
1045
1046 if (cp_heap->total_size & (SZ_64K-1)) {
1047 pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
1048 ret_value = -EINVAL;
1049 }
1050 if (cp_heap->base & (SZ_64K-1)) {
1051 pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
1052 ret_value = -EINVAL;
1053 }
1054 if (!ret_value && domain) {
1055 unsigned long temp_phys = cp_heap->base;
Laura Abbottd01221b2012-05-16 17:52:49 -07001056 unsigned long temp_iova;
1057
1058 ret_value = msm_allocate_iova_address(domain_num, partition,
1059 virt_addr_len, SZ_64K,
1060 &temp_iova);
1061
1062 if (ret_value) {
Olav Haugan8726caf2012-05-10 15:11:35 -07001063 pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
1064 __func__, domain_num, partition);
Olav Haugan8726caf2012-05-10 15:11:35 -07001065 goto out;
1066 }
1067 cp_heap->iommu_iova[domain_num] = temp_iova;
1068
1069 while (left_to_map) {
1070 int ret = iommu_map(domain, temp_iova, temp_phys,
Steve Mucklef132c6c2012-06-06 18:30:57 -07001071 page_size, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -07001072 if (ret) {
1073 pr_err("%s: could not map %lx in domain %p, error: %d\n",
1074 __func__, temp_iova, domain, ret);
1075 ret_value = -EAGAIN;
1076 goto free_iova;
1077 }
1078 temp_iova += page_size;
1079 temp_phys += page_size;
1080 left_to_map -= page_size;
1081 }
1082 if (domain_num == cp_heap->iommu_2x_map_domain)
1083 ret_value = msm_iommu_map_extra(domain, temp_iova,
1084 cp_heap->total_size,
1085 SZ_64K, prot);
1086 if (ret_value)
1087 goto free_iova;
1088 } else {
1089 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
1090 ret_value = -ENOMEM;
1091 }
1092 goto out;
1093
1094free_iova:
1095 msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
1096 partition, virt_addr_len);
1097out:
1098 return ret_value;
1099}
1100
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001101static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
1102 struct ion_iommu_map *data,
1103 unsigned int domain_num,
1104 unsigned int partition_num,
1105 unsigned long align,
1106 unsigned long iova_length,
1107 unsigned long flags)
1108{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001109 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -07001110 int ret = 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001111 unsigned long extra;
Olav Haugan8726caf2012-05-10 15:11:35 -07001112 struct ion_cp_heap *cp_heap =
1113 container_of(buffer->heap, struct ion_cp_heap, heap);
Olav Hauganf310cf22012-05-08 08:42:49 -07001114 int prot = IOMMU_WRITE | IOMMU_READ;
Laura Abbott60ae9372012-10-10 16:28:59 -07001115 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -07001116 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001117
1118 data->mapped_size = iova_length;
1119
1120 if (!msm_use_iommu()) {
Laura Abbott60ae9372012-10-10 16:28:59 -07001121 data->iova_addr = buf->buffer;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001122 return 0;
1123 }
1124
Olav Haugan8726caf2012-05-10 15:11:35 -07001125 if (cp_heap->iommu_iova[domain_num]) {
1126 /* Already mapped. */
Laura Abbott60ae9372012-10-10 16:28:59 -07001127 unsigned long offset = buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -07001128 data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
1129 return 0;
1130 } else if (cp_heap->iommu_map_all) {
Olav Haugan3450cae2012-05-14 11:36:38 -07001131 ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -07001132 if (!ret) {
1133 unsigned long offset =
Laura Abbott60ae9372012-10-10 16:28:59 -07001134 buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -07001135 data->iova_addr =
1136 cp_heap->iommu_iova[domain_num] + offset;
1137 cp_heap->iommu_partition[domain_num] = partition_num;
1138 /*
1139 clear delayed map flag so that we don't interfere
1140 with this feature (we are already delaying).
1141 */
1142 data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
1143 return 0;
1144 } else {
1145 cp_heap->iommu_iova[domain_num] = 0;
1146 cp_heap->iommu_partition[domain_num] = 0;
1147 return ret;
1148 }
1149 }
1150
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001151 extra = iova_length - buffer->size;
1152
Laura Abbottd01221b2012-05-16 17:52:49 -07001153 ret = msm_allocate_iova_address(domain_num, partition_num,
1154 data->mapped_size, align,
1155 &data->iova_addr);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001156
Laura Abbottd01221b2012-05-16 17:52:49 -07001157 if (ret)
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001158 goto out;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001159
1160 domain = msm_get_iommu_domain(domain_num);
1161
1162 if (!domain) {
1163 ret = -ENOMEM;
1164 goto out1;
1165 }
1166
Laura Abbottb14ed962012-01-30 14:18:08 -08001167 ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -07001168 buffer->size, prot);
1169 if (ret) {
1170 pr_err("%s: could not map %lx in domain %p\n",
1171 __func__, data->iova_addr, domain);
1172 goto out1;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001173 }
1174
Olav Haugan16cdb412012-03-27 13:02:17 -07001175 if (extra) {
1176 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -07001177 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
1178 SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -07001179 if (ret)
1180 goto out2;
1181 }
Olav Haugan16cdb412012-03-27 13:02:17 -07001182 return ret;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001183
1184out2:
Olav Haugan16cdb412012-03-27 13:02:17 -07001185 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001186out1:
1187 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1188 data->mapped_size);
1189out:
1190 return ret;
1191}
1192
1193static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
1194{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001195 unsigned int domain_num;
1196 unsigned int partition_num;
1197 struct iommu_domain *domain;
Olav Haugan8726caf2012-05-10 15:11:35 -07001198 struct ion_cp_heap *cp_heap =
1199 container_of(data->buffer->heap, struct ion_cp_heap, heap);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001200
1201 if (!msm_use_iommu())
1202 return;
1203
Olav Haugan8726caf2012-05-10 15:11:35 -07001204
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001205 domain_num = iommu_map_domain(data);
Olav Haugan8726caf2012-05-10 15:11:35 -07001206
1207 /* If we are mapping everything we'll wait to unmap until everything
1208 is freed. */
1209 if (cp_heap->iommu_iova[domain_num])
1210 return;
1211
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001212 partition_num = iommu_map_partition(data);
1213
1214 domain = msm_get_iommu_domain(domain_num);
1215
1216 if (!domain) {
1217 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
1218 return;
1219 }
1220
Olav Haugan16cdb412012-03-27 13:02:17 -07001221 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001222 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1223 data->mapped_size);
1224
1225 return;
1226}
Olav Haugan0a852512012-01-09 10:20:55 -08001227
1228static struct ion_heap_ops cp_heap_ops = {
1229 .allocate = ion_cp_heap_allocate,
1230 .free = ion_cp_heap_free,
1231 .phys = ion_cp_heap_phys,
1232 .map_user = ion_cp_heap_map_user,
1233 .unmap_user = ion_cp_heap_unmap_user,
1234 .map_kernel = ion_cp_heap_map_kernel,
1235 .unmap_kernel = ion_cp_heap_unmap_kernel,
1236 .map_dma = ion_cp_heap_map_dma,
1237 .unmap_dma = ion_cp_heap_unmap_dma,
1238 .cache_op = ion_cp_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -08001239 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -08001240 .secure_heap = ion_cp_secure_heap,
1241 .unsecure_heap = ion_cp_unsecure_heap,
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001242 .map_iommu = ion_cp_heap_map_iommu,
1243 .unmap_iommu = ion_cp_heap_unmap_iommu,
Laura Abbott93619302012-10-11 11:51:40 -07001244 .secure_buffer = ion_cp_secure_buffer,
1245 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -08001246};
1247
Olav Haugan0a852512012-01-09 10:20:55 -08001248struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
1249{
1250 struct ion_cp_heap *cp_heap;
1251 int ret;
1252
1253 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
1254 if (!cp_heap)
1255 return ERR_PTR(-ENOMEM);
1256
Olav Haugan0a852512012-01-09 10:20:55 -08001257 mutex_init(&cp_heap->lock);
1258
Olav Haugan0a852512012-01-09 10:20:55 -08001259
Olav Haugan0a852512012-01-09 10:20:55 -08001260 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001261 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -08001262 cp_heap->kmap_cached_count = 0;
1263 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001264 cp_heap->total_size = heap_data->size;
1265 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07001266 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -08001267 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001268 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -08001269 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -07001270 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001271 cp_heap->heap_size = heap_data->size;
1272
Laura Abbottf68983e2012-06-13 16:23:23 -07001273 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -08001274 if (heap_data->extra_data) {
1275 struct ion_cp_heap_pdata *extra_data =
1276 heap_data->extra_data;
Laura Abbottcaafeea2011-12-13 11:43:10 -08001277 cp_heap->reusable = extra_data->reusable;
1278 cp_heap->reserved_vrange = extra_data->virt_addr;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001279 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -08001280 if (extra_data->secure_size) {
1281 cp_heap->secure_base = extra_data->secure_base;
1282 cp_heap->secure_size = extra_data->secure_size;
1283 }
Olav Haugan0703dbf2011-12-19 17:53:38 -08001284 if (extra_data->setup_region)
1285 cp_heap->bus_id = extra_data->setup_region();
1286 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001287 cp_heap->heap_request_region =
1288 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001289 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001290 cp_heap->heap_release_region =
1291 extra_data->release_region;
Olav Haugan8726caf2012-05-10 15:11:35 -07001292 cp_heap->iommu_map_all =
1293 extra_data->iommu_map_all;
1294 cp_heap->iommu_2x_map_domain =
1295 extra_data->iommu_2x_map_domain;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001296 cp_heap->cma = extra_data->is_cma;
Laura Abbottac963312012-12-11 15:09:03 -08001297 cp_heap->disallow_non_secure_allocation =
1298 extra_data->no_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -07001299
Olav Haugan0703dbf2011-12-19 17:53:38 -08001300 }
Olav Haugan8726caf2012-05-10 15:11:35 -07001301
Laura Abbott3180a5f2012-08-03 17:31:03 -07001302 if (cp_heap->cma) {
1303 cp_heap->pool = NULL;
1304 cp_heap->cpu_addr = 0;
1305 cp_heap->heap.priv = heap_data->priv;
1306 } else {
1307 cp_heap->pool = gen_pool_create(12, -1);
1308 if (!cp_heap->pool)
1309 goto free_heap;
1310
1311 cp_heap->base = heap_data->base;
1312 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
1313 heap_data->size, -1);
1314 if (ret < 0)
1315 goto destroy_pool;
1316
1317 }
Olav Haugan0a852512012-01-09 10:20:55 -08001318 return &cp_heap->heap;
1319
1320destroy_pool:
1321 gen_pool_destroy(cp_heap->pool);
1322
1323free_heap:
1324 kfree(cp_heap);
1325
1326 return ERR_PTR(-ENOMEM);
1327}
1328
1329void ion_cp_heap_destroy(struct ion_heap *heap)
1330{
1331 struct ion_cp_heap *cp_heap =
1332 container_of(heap, struct ion_cp_heap, heap);
1333
1334 gen_pool_destroy(cp_heap->pool);
1335 kfree(cp_heap);
1336 cp_heap = NULL;
1337}
1338
Olav Haugan0671b9a2012-05-25 11:58:56 -07001339void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
1340 unsigned long *size) \
1341{
1342 struct ion_cp_heap *cp_heap =
1343 container_of(heap, struct ion_cp_heap, heap);
1344 *base = cp_heap->base;
1345 *size = cp_heap->total_size;
1346}
Olav Haugan0a852512012-01-09 10:20:55 -08001347
1348/* SCM related code for locking down memory for content protection */
1349
1350#define SCM_CP_LOCK_CMD_ID 0x1
1351#define SCM_CP_PROTECT 0x1
1352#define SCM_CP_UNPROTECT 0x0
1353
1354struct cp_lock_msg {
1355 unsigned int start;
1356 unsigned int end;
1357 unsigned int permission_type;
1358 unsigned char lock;
Olav Haugan41cf3e32012-01-16 12:13:24 -08001359} __attribute__ ((__packed__));
Olav Haugan0a852512012-01-09 10:20:55 -08001360
Laura Abbott7e446482012-06-13 15:59:39 -07001361static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
Olav Haugan0a852512012-01-09 10:20:55 -08001362 unsigned int permission_type)
1363{
1364 struct cp_lock_msg cmd;
1365 cmd.start = phy_base;
1366 cmd.end = phy_base + size;
1367 cmd.permission_type = permission_type;
1368 cmd.lock = SCM_CP_PROTECT;
1369
1370 return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
1371 &cmd, sizeof(cmd), NULL, 0);
1372}
1373
Laura Abbott7e446482012-06-13 15:59:39 -07001374static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
Olav Haugan0a852512012-01-09 10:20:55 -08001375 unsigned int permission_type)
1376{
1377 struct cp_lock_msg cmd;
1378 cmd.start = phy_base;
1379 cmd.end = phy_base + size;
1380 cmd.permission_type = permission_type;
1381 cmd.lock = SCM_CP_UNPROTECT;
1382
1383 return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
1384 &cmd, sizeof(cmd), NULL, 0);
1385}
Laura Abbott7e446482012-06-13 15:59:39 -07001386
1387#define V2_CHUNK_SIZE SZ_1M
1388
1389static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
1390 void *data, int lock)
1391{
1392 enum cp_mem_usage usage = (enum cp_mem_usage) data;
1393 unsigned long *chunk_list;
1394 int nchunks;
1395 int ret;
1396 int i;
1397
1398 if (usage < 0 || usage >= MAX_USAGE)
1399 return -EINVAL;
1400
1401 if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
1402 pr_err("%s: heap size is not aligned to %x\n",
1403 __func__, V2_CHUNK_SIZE);
1404 return -EINVAL;
1405 }
1406
1407 nchunks = size / V2_CHUNK_SIZE;
1408
1409 chunk_list = allocate_contiguous_ebi(sizeof(unsigned long)*nchunks,
1410 SZ_4K, 0);
1411 if (!chunk_list)
1412 return -ENOMEM;
1413
1414 for (i = 0; i < nchunks; i++)
1415 chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;
1416
1417 ret = ion_cp_change_chunks_state(memory_pool_node_paddr(chunk_list),
1418 nchunks, V2_CHUNK_SIZE, usage, lock);
1419
1420 free_contiguous_memory(chunk_list);
1421 return ret;
1422}
1423
1424static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
1425 unsigned int permission_type, int version,
1426 void *data)
1427{
1428 switch (version) {
1429 case ION_CP_V1:
1430 return ion_cp_protect_mem_v1(phy_base, size, permission_type);
1431 case ION_CP_V2:
1432 return ion_cp_change_mem_v2(phy_base, size, data,
1433 SCM_CP_PROTECT);
1434 default:
1435 return -EINVAL;
1436 }
1437}
1438
1439static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
1440 unsigned int permission_type, int version,
1441 void *data)
1442{
1443 switch (version) {
1444 case ION_CP_V1:
1445 return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
1446 case ION_CP_V2:
1447 return ion_cp_change_mem_v2(phy_base, size, data,
1448 SCM_CP_UNPROTECT);
1449 default:
1450 return -EINVAL;
1451 }
1452}