blob: c7b58ce4f3b92e5951da99e4700084f8c780be6a [file] [log] [blame]
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
Laura Abbott29defcc2014-08-01 16:13:40 -070015#include <linux/io.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070016#include <linux/msm_ion.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/of.h>
20#include <linux/of_platform.h>
21#include <linux/of_address.h>
22#include <linux/mm.h>
23#include <linux/mm_types.h>
24#include <linux/sched.h>
25#include <linux/rwsem.h>
26#include <linux/uaccess.h>
27#include <linux/memblock.h>
28#include <linux/dma-mapping.h>
29#include <linux/dma-contiguous.h>
30#include <linux/vmalloc.h>
31#include <linux/highmem.h>
32#include <linux/cma.h>
33#include <linux/module.h>
Sudarshan Rajagopalan33ae0432017-05-18 00:12:53 -070034#include <linux/bitops.h>
Laura Abbott29defcc2014-08-01 16:13:40 -070035#include <linux/show_mem_notifier.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070036#include <asm/cacheflush.h>
37#include "../ion_priv.h"
38#include "compat_msm_ion.h"
Laura Abbott29defcc2014-08-01 16:13:40 -070039#include <soc/qcom/secure_buffer.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070040
41#define ION_COMPAT_STR "qcom,msm-ion"
42
43static struct ion_device *idev;
44static int num_heaps;
45static struct ion_heap **heaps;
46
47struct ion_heap_desc {
48 unsigned int id;
49 enum ion_heap_type type;
50 const char *name;
51 unsigned int permission_type;
52};
53
54#ifdef CONFIG_OF
55static struct ion_heap_desc ion_heap_meta[] = {
56 {
57 .id = ION_SYSTEM_HEAP_ID,
58 .name = ION_SYSTEM_HEAP_NAME,
59 },
60 {
61 .id = ION_SYSTEM_CONTIG_HEAP_ID,
62 .name = ION_KMALLOC_HEAP_NAME,
63 },
64 {
Patrick Dalyc1005d82016-09-22 17:43:26 -070065 .id = ION_SECURE_HEAP_ID,
66 .name = ION_SECURE_HEAP_NAME,
67 },
68 {
Patrick Dalyeeeb9402016-11-01 20:54:41 -070069 .id = ION_CP_MM_HEAP_ID,
70 .name = ION_MM_HEAP_NAME,
71 .permission_type = IPT_TYPE_MM_CARVEOUT,
72 },
73 {
74 .id = ION_MM_FIRMWARE_HEAP_ID,
75 .name = ION_MM_FIRMWARE_HEAP_NAME,
76 },
77 {
78 .id = ION_CP_MFC_HEAP_ID,
79 .name = ION_MFC_HEAP_NAME,
80 .permission_type = IPT_TYPE_MFC_SHAREDMEM,
81 },
82 {
83 .id = ION_SF_HEAP_ID,
84 .name = ION_SF_HEAP_NAME,
85 },
86 {
87 .id = ION_QSECOM_HEAP_ID,
88 .name = ION_QSECOM_HEAP_NAME,
89 },
90 {
Laura Abbott29defcc2014-08-01 16:13:40 -070091 .id = ION_SPSS_HEAP_ID,
92 .name = ION_SPSS_HEAP_NAME,
93 },
94 {
Patrick Dalyeeeb9402016-11-01 20:54:41 -070095 .id = ION_AUDIO_HEAP_ID,
96 .name = ION_AUDIO_HEAP_NAME,
97 },
98 {
99 .id = ION_PIL1_HEAP_ID,
100 .name = ION_PIL1_HEAP_NAME,
101 },
102 {
103 .id = ION_PIL2_HEAP_ID,
104 .name = ION_PIL2_HEAP_NAME,
105 },
106 {
107 .id = ION_CP_WB_HEAP_ID,
108 .name = ION_WB_HEAP_NAME,
109 },
110 {
111 .id = ION_CAMERA_HEAP_ID,
112 .name = ION_CAMERA_HEAP_NAME,
113 },
114 {
115 .id = ION_ADSP_HEAP_ID,
116 .name = ION_ADSP_HEAP_NAME,
Laura Abbott29defcc2014-08-01 16:13:40 -0700117 },
118 {
119 .id = ION_SECURE_DISPLAY_HEAP_ID,
120 .name = ION_SECURE_DISPLAY_HEAP_NAME,
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700121 }
122};
123#endif
124
Laura Abbott29defcc2014-08-01 16:13:40 -0700125static int msm_ion_lowmem_notifier(struct notifier_block *nb,
126 unsigned long action, void *data)
127{
128 show_ion_usage(idev);
129 return 0;
130}
131
132static struct notifier_block msm_ion_nb = {
133 .notifier_call = msm_ion_lowmem_notifier,
134};
135
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700136struct ion_client *msm_ion_client_create(const char *name)
137{
138 /*
139 * The assumption is that if there is a NULL device, the ion
140 * driver has not yet probed.
141 */
142 if (!idev)
143 return ERR_PTR(-EPROBE_DEFER);
144
145 if (IS_ERR(idev))
146 return (struct ion_client *)idev;
147
148 return ion_client_create(idev, name);
149}
150EXPORT_SYMBOL(msm_ion_client_create);
151
152static int ion_no_pages_cache_ops(
153 struct ion_client *client,
154 struct ion_handle *handle,
155 void *vaddr,
156 unsigned int offset, unsigned int length,
157 unsigned int cmd)
158{
159 unsigned long size_to_vmap, total_size;
160 int i, j, ret;
161 void *ptr = NULL;
162 ion_phys_addr_t buff_phys = 0;
163 ion_phys_addr_t buff_phys_start = 0;
164 size_t buf_length = 0;
165
166 ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
167 if (ret)
168 return -EINVAL;
169
170 buff_phys = buff_phys_start;
171
172 if (!vaddr) {
173 /*
174 * Split the vmalloc space into smaller regions in
175 * order to clean and/or invalidate the cache.
176 */
177 size_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8);
178 total_size = buf_length;
179
180 for (i = 0; i < total_size; i += size_to_vmap) {
181 size_to_vmap = min(size_to_vmap, total_size - i);
182 for (j = 0; !ptr && j < 10 && size_to_vmap; ++j) {
183 ptr = ioremap(buff_phys, size_to_vmap);
184 if (ptr) {
185 switch (cmd) {
186 case ION_IOC_CLEAN_CACHES:
187 __dma_clean_area(
188 ptr,
189 size_to_vmap);
190 break;
191 case ION_IOC_INV_CACHES:
192 __dma_inv_area(
193 ptr,
194 size_to_vmap);
195 break;
196 case ION_IOC_CLEAN_INV_CACHES:
197 __dma_flush_area(
198 ptr,
199 size_to_vmap);
200 break;
201 default:
202 return -EINVAL;
203 }
204 buff_phys += size_to_vmap;
205 } else {
206 size_to_vmap >>= 1;
207 }
208 }
209 if (!ptr) {
210 pr_err("Couldn't io-remap the memory\n");
211 return -EINVAL;
212 }
213 iounmap(ptr);
214 }
215 } else {
216 switch (cmd) {
217 case ION_IOC_CLEAN_CACHES:
218 __dma_clean_area(vaddr, length);
219 break;
220 case ION_IOC_INV_CACHES:
221 __dma_inv_area(vaddr, length);
222 break;
223 case ION_IOC_CLEAN_INV_CACHES:
224 __dma_flush_area(vaddr, length);
225 break;
226 default:
227 return -EINVAL;
228 }
229 }
230
231 return 0;
232}
233
Laura Abbott29defcc2014-08-01 16:13:40 -0700234static void __do_cache_ops(struct page *page, unsigned int offset,
235 unsigned int length,
236 void (*op)(const void *, size_t))
237{
238 unsigned int left = length;
239 unsigned long pfn;
240 void *vaddr;
241
242 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
243 page = pfn_to_page(pfn);
244 offset &= ~PAGE_MASK;
245
246 if (!PageHighMem(page)) {
247 vaddr = page_address(page) + offset;
248 op(vaddr, length);
249 goto out;
250 }
251
252 do {
253 unsigned int len;
254
255 len = left;
256 if (len + offset > PAGE_SIZE)
257 len = PAGE_SIZE - offset;
258
259 page = pfn_to_page(pfn);
260 vaddr = kmap_atomic(page);
261 op(vaddr + offset, len);
262 kunmap_atomic(vaddr);
263
264 offset = 0;
265 pfn++;
266 left -= len;
267 } while (left);
268
269out:
270 return;
271}
272
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700273static int ion_pages_cache_ops(
274 struct ion_client *client,
275 struct ion_handle *handle,
276 void *vaddr, unsigned int offset, unsigned int length,
277 unsigned int cmd)
278{
279 struct sg_table *table = NULL;
Laura Abbott29defcc2014-08-01 16:13:40 -0700280 struct scatterlist *sg;
281 int i;
282 unsigned int len = 0;
283 void (*op)(const void *, size_t);
284
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700285
286 table = ion_sg_table(client, handle);
287 if (IS_ERR_OR_NULL(table))
288 return PTR_ERR(table);
289
290 switch (cmd) {
291 case ION_IOC_CLEAN_CACHES:
Laura Abbott29defcc2014-08-01 16:13:40 -0700292 op = __dma_clean_area;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700293 break;
294 case ION_IOC_INV_CACHES:
Laura Abbott29defcc2014-08-01 16:13:40 -0700295 op = __dma_inv_area;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700296 break;
297 case ION_IOC_CLEAN_INV_CACHES:
Laura Abbott29defcc2014-08-01 16:13:40 -0700298 op = __dma_flush_area;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700299 break;
300 default:
301 return -EINVAL;
Laura Abbott29defcc2014-08-01 16:13:40 -0700302 };
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700303
Laura Abbott29defcc2014-08-01 16:13:40 -0700304 for_each_sg(table->sgl, sg, table->nents, i) {
Susheel Khianid58f5a02016-04-21 12:56:57 +0530305 unsigned int sg_offset, sg_left, size = 0;
306
Laura Abbott29defcc2014-08-01 16:13:40 -0700307 len += sg->length;
Susheel Khianid58f5a02016-04-21 12:56:57 +0530308 if (len <= offset)
Laura Abbott29defcc2014-08-01 16:13:40 -0700309 continue;
310
Susheel Khianid58f5a02016-04-21 12:56:57 +0530311 sg_left = len - offset;
312 sg_offset = sg->length - sg_left;
Laura Abbott29defcc2014-08-01 16:13:40 -0700313
Susheel Khianid58f5a02016-04-21 12:56:57 +0530314 size = (length < sg_left) ? length : sg_left;
315
316 __do_cache_ops(sg_page(sg), sg_offset, size, op);
317
318 offset += size;
319 length -= size;
320
321 if (length == 0)
Laura Abbott29defcc2014-08-01 16:13:40 -0700322 break;
323 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700324 return 0;
325}
326
327static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
328 void *uaddr, unsigned long offset, unsigned long len,
329 unsigned int cmd)
330{
331 int ret = -EINVAL;
332 unsigned long flags;
333 struct sg_table *table;
334 struct page *page;
335
336 ret = ion_handle_get_flags(client, handle, &flags);
337 if (ret)
338 return -EINVAL;
339
340 if (!ION_IS_CACHED(flags))
341 return 0;
342
343 if (flags & ION_FLAG_SECURE)
344 return 0;
345
346 table = ion_sg_table(client, handle);
347
348 if (IS_ERR_OR_NULL(table))
349 return PTR_ERR(table);
350
351 page = sg_page(table->sgl);
352
353 if (page)
354 ret = ion_pages_cache_ops(client, handle, uaddr,
355 offset, len, cmd);
356 else
357 ret = ion_no_pages_cache_ops(client, handle, uaddr,
358 offset, len, cmd);
359
360 return ret;
361}
362
363int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
364 void *vaddr, unsigned long len, unsigned int cmd)
365{
366 return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
367}
368EXPORT_SYMBOL(msm_ion_do_cache_op);
369
Susheel Khianid58f5a02016-04-21 12:56:57 +0530370int msm_ion_do_cache_offset_op(
371 struct ion_client *client, struct ion_handle *handle,
372 void *vaddr, unsigned int offset, unsigned long len,
373 unsigned int cmd)
374{
375 return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
376}
377EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
378
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700379static void msm_ion_allocate(struct ion_platform_heap *heap)
380{
381 if (!heap->base && heap->extra_data) {
382 WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead");
383 return;
384 }
385}
386
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700387#ifdef CONFIG_OF
388static int msm_init_extra_data(struct device_node *node,
389 struct ion_platform_heap *heap,
390 const struct ion_heap_desc *heap_desc)
391{
392 int ret = 0;
393
394 switch ((int)heap->type) {
395 case ION_HEAP_TYPE_CARVEOUT:
396 {
397 heap->extra_data = kzalloc(sizeof(*heap->extra_data),
398 GFP_KERNEL);
399 if (!heap->extra_data)
400 ret = -ENOMEM;
401 break;
402 }
403 default:
404 heap->extra_data = 0;
405 break;
406 }
407 return ret;
408}
409
410#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
411 .heap_type = ION_HEAP_TYPE_##h, }
412
413static struct heap_types_info {
414 const char *name;
415 int heap_type;
416} heap_types_info[] = {
417 MAKE_HEAP_TYPE_MAPPING(SYSTEM),
418 MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
419 MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
420 MAKE_HEAP_TYPE_MAPPING(CHUNK),
421 MAKE_HEAP_TYPE_MAPPING(DMA),
Patrick Dalyc1005d82016-09-22 17:43:26 -0700422 MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
Laura Abbott29defcc2014-08-01 16:13:40 -0700423 MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700424};
425
426static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
427 int *heap_type)
428{
429 const char *name;
430 int i, ret = -EINVAL;
431
432 ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
433 if (ret)
434 goto out;
435 for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
436 if (!strcmp(heap_types_info[i].name, name)) {
437 *heap_type = heap_types_info[i].heap_type;
438 ret = 0;
439 goto out;
440 }
441 }
442 WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
443 name, __FILE__);
444out:
445 return ret;
446}
447
448static int msm_ion_populate_heap(struct device_node *node,
449 struct ion_platform_heap *heap)
450{
451 unsigned int i;
452 int ret = -EINVAL, heap_type = -1;
453 unsigned int len = ARRAY_SIZE(ion_heap_meta);
454
455 for (i = 0; i < len; ++i) {
456 if (ion_heap_meta[i].id == heap->id) {
457 heap->name = ion_heap_meta[i].name;
458 ret = msm_ion_get_heap_type_from_dt_node(
459 node, &heap_type);
460 if (ret)
461 break;
462 heap->type = heap_type;
463 ret = msm_init_extra_data(node, heap,
464 &ion_heap_meta[i]);
465 break;
466 }
467 }
468 if (ret)
469 pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
470 return ret;
471}
472
473static void free_pdata(const struct ion_platform_data *pdata)
474{
475 unsigned int i;
476
477 for (i = 0; i < pdata->nr; ++i)
478 kfree(pdata->heaps[i].extra_data);
479 kfree(pdata->heaps);
480 kfree(pdata);
481}
482
Laura Abbott29defcc2014-08-01 16:13:40 -0700483static void msm_ion_get_heap_dt_data(struct device_node *node,
484 struct ion_platform_heap *heap)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700485{
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700486 struct device_node *pnode;
487
Laura Abbott29defcc2014-08-01 16:13:40 -0700488 pnode = of_parse_phandle(node, "memory-region", 0);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700489 if (pnode) {
Laura Abbott29defcc2014-08-01 16:13:40 -0700490 const __be32 *basep;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700491 u64 size;
Laura Abbott29defcc2014-08-01 16:13:40 -0700492 u64 base;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700493
Laura Abbott29defcc2014-08-01 16:13:40 -0700494 basep = of_get_address(pnode, 0, &size, NULL);
495 if (!basep) {
496 base = cma_get_base(dev_get_cma_area(heap->priv));
497 size = cma_get_size(dev_get_cma_area(heap->priv));
498 } else {
499 base = of_translate_address(pnode, basep);
500 WARN(base == OF_BAD_ADDR, "Failed to parse DT node for heap %s\n",
501 heap->name);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700502 }
Laura Abbott29defcc2014-08-01 16:13:40 -0700503 heap->base = base;
504 heap->size = size;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700505 of_node_put(pnode);
506 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700507}
508
509static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
510{
511 struct ion_platform_data *pdata = 0;
512 struct ion_platform_heap *heaps = NULL;
513 struct device_node *node;
514 struct platform_device *new_dev = NULL;
515 const struct device_node *dt_node = pdev->dev.of_node;
516 const __be32 *val;
517 int ret = -EINVAL;
518 u32 num_heaps = 0;
519 int idx = 0;
520
521 for_each_available_child_of_node(dt_node, node)
522 num_heaps++;
523
524 if (!num_heaps)
525 return ERR_PTR(-EINVAL);
526
527 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
528 if (!pdata)
529 return ERR_PTR(-ENOMEM);
530
531 heaps = kcalloc(num_heaps, sizeof(struct ion_platform_heap),
532 GFP_KERNEL);
533 if (!heaps) {
534 kfree(pdata);
535 return ERR_PTR(-ENOMEM);
536 }
537
538 pdata->heaps = heaps;
539 pdata->nr = num_heaps;
540
541 for_each_available_child_of_node(dt_node, node) {
542 new_dev = of_platform_device_create(node, NULL, &pdev->dev);
543 if (!new_dev) {
544 pr_err("Failed to create device %s\n", node->name);
545 goto free_heaps;
546 }
547
548 pdata->heaps[idx].priv = &new_dev->dev;
549 val = of_get_address(node, 0, NULL, NULL);
550 if (!val) {
551 pr_err("%s: Unable to find reg key", __func__);
552 goto free_heaps;
553 }
554 pdata->heaps[idx].id = (u32)of_read_number(val, 1);
555
556 ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
557 if (ret)
558 goto free_heaps;
559
Laura Abbott29defcc2014-08-01 16:13:40 -0700560 msm_ion_get_heap_dt_data(node, &pdata->heaps[idx]);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700561
562 ++idx;
563 }
564 return pdata;
565
566free_heaps:
567 free_pdata(pdata);
568 return ERR_PTR(ret);
569}
570#else
571static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
572{
573 return NULL;
574}
575
576static void free_pdata(const struct ion_platform_data *pdata)
577{
578}
579#endif
580
581static int check_vaddr_bounds(unsigned long start, unsigned long end)
582{
583 struct mm_struct *mm = current->active_mm;
584 struct vm_area_struct *vma;
585 int ret = 1;
586
587 if (end < start)
588 goto out;
589
590 vma = find_vma(mm, start);
591 if (vma && vma->vm_start < end) {
592 if (start < vma->vm_start)
593 goto out;
594 if (end > vma->vm_end)
595 goto out;
596 ret = 0;
597 }
598
599out:
600 return ret;
601}
602
Patrick Dalyc1005d82016-09-22 17:43:26 -0700603int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
604{
605 return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
606}
607
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700608int ion_heap_allow_heap_secure(enum ion_heap_type type)
609{
610 return false;
611}
612
Laura Abbott29defcc2014-08-01 16:13:40 -0700613bool is_secure_vmid_valid(int vmid)
614{
615 return (vmid == VMID_CP_TOUCH ||
616 vmid == VMID_CP_BITSTREAM ||
617 vmid == VMID_CP_PIXEL ||
618 vmid == VMID_CP_NON_PIXEL ||
619 vmid == VMID_CP_CAMERA ||
620 vmid == VMID_CP_SEC_DISPLAY ||
Liam Markd9a50852016-09-22 11:30:51 -0700621 vmid == VMID_CP_APP ||
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -0700622 vmid == VMID_CP_CAMERA_PREVIEW ||
Sudarshan Rajagopalan8e206792017-06-28 17:45:57 -0700623 vmid == VMID_CP_SPSS_SP ||
Sudarshan Rajagopalane08afb62017-07-13 11:19:46 -0700624 vmid == VMID_CP_SPSS_SP_SHARED ||
625 vmid == VMID_CP_SPSS_HLOS_SHARED);
Laura Abbott29defcc2014-08-01 16:13:40 -0700626}
627
Sudarshan Rajagopalan33ae0432017-05-18 00:12:53 -0700628unsigned int count_set_bits(unsigned long val)
629{
630 return ((unsigned int)bitmap_weight(&val, BITS_PER_LONG));
631}
632
633int populate_vm_list(unsigned long flags, unsigned int *vm_list,
634 int nelems)
635{
636 unsigned int itr = 0;
637 int vmid;
638
639 flags = flags & ION_FLAGS_CP_MASK;
640 for_each_set_bit(itr, &flags, BITS_PER_LONG) {
Sudarshan Rajagopalanacc4a032017-07-20 16:56:20 -0700641 vmid = get_vmid(0x1UL << itr);
Sudarshan Rajagopalan33ae0432017-05-18 00:12:53 -0700642 if (vmid < 0 || !nelems)
643 return -EINVAL;
644
645 vm_list[nelems - 1] = vmid;
646 nelems--;
647 }
648 return 0;
649}
650
Laura Abbott29defcc2014-08-01 16:13:40 -0700651int get_secure_vmid(unsigned long flags)
652{
653 if (flags & ION_FLAG_CP_TOUCH)
654 return VMID_CP_TOUCH;
655 if (flags & ION_FLAG_CP_BITSTREAM)
656 return VMID_CP_BITSTREAM;
657 if (flags & ION_FLAG_CP_PIXEL)
658 return VMID_CP_PIXEL;
659 if (flags & ION_FLAG_CP_NON_PIXEL)
660 return VMID_CP_NON_PIXEL;
661 if (flags & ION_FLAG_CP_CAMERA)
662 return VMID_CP_CAMERA;
663 if (flags & ION_FLAG_CP_SEC_DISPLAY)
664 return VMID_CP_SEC_DISPLAY;
665 if (flags & ION_FLAG_CP_APP)
666 return VMID_CP_APP;
Liam Markd9a50852016-09-22 11:30:51 -0700667 if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
668 return VMID_CP_CAMERA_PREVIEW;
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -0700669 if (flags & ION_FLAG_CP_SPSS_SP)
670 return VMID_CP_SPSS_SP;
Sudarshan Rajagopalan8e206792017-06-28 17:45:57 -0700671 if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
672 return VMID_CP_SPSS_SP_SHARED;
Sudarshan Rajagopalane08afb62017-07-13 11:19:46 -0700673 if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
674 return VMID_CP_SPSS_HLOS_SHARED;
Laura Abbott29defcc2014-08-01 16:13:40 -0700675 return -EINVAL;
676}
Sudarshan Rajagopalanacc4a032017-07-20 16:56:20 -0700677
678int get_vmid(unsigned long flags)
679{
680 int vmid;
681
682 vmid = get_secure_vmid(flags);
683 if (vmid < 0) {
684 if (flags & ION_FLAG_CP_HLOS)
685 vmid = VMID_HLOS;
686 }
687 return vmid;
688}
689
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700690/* fix up the cases where the ioctl direction bits are incorrect */
691static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
692{
693 switch (cmd) {
694 case ION_IOC_CLEAN_CACHES:
695 case ION_IOC_INV_CACHES:
696 case ION_IOC_CLEAN_INV_CACHES:
697 case ION_IOC_PREFETCH:
698 case ION_IOC_DRAIN:
699 return _IOC_WRITE;
700 default:
701 return _IOC_DIR(cmd);
702 }
703}
704
705long msm_ion_custom_ioctl(struct ion_client *client,
706 unsigned int cmd,
707 unsigned long arg)
708{
709 unsigned int dir;
710 union {
711 struct ion_flush_data flush_data;
712 struct ion_prefetch_data prefetch_data;
713 } data;
714
715 dir = msm_ion_ioctl_dir(cmd);
716
717 if (_IOC_SIZE(cmd) > sizeof(data))
718 return -EINVAL;
719
720 if (dir & _IOC_WRITE)
721 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
722 return -EFAULT;
723
724 switch (cmd) {
725 case ION_IOC_CLEAN_CACHES:
726 case ION_IOC_INV_CACHES:
727 case ION_IOC_CLEAN_INV_CACHES:
728 {
729 unsigned long start, end;
730 struct ion_handle *handle = NULL;
731 int ret;
732 struct mm_struct *mm = current->active_mm;
733
734 if (data.flush_data.handle > 0) {
735 handle = ion_handle_get_by_id(
736 client, (int)data.flush_data.handle);
737 if (IS_ERR(handle)) {
738 pr_info("%s: Could not find handle: %d\n",
739 __func__, (int)data.flush_data.handle);
740 return PTR_ERR(handle);
741 }
742 } else {
743 handle = ion_import_dma_buf_fd(client,
744 data.flush_data.fd);
745 if (IS_ERR(handle)) {
Satyajit Desai3c702822016-09-02 14:18:13 -0700746 pr_info("%s: Could not import handle: %pK\n",
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700747 __func__, handle);
748 return -EINVAL;
749 }
750 }
751
752 down_read(&mm->mmap_sem);
753
754 start = (unsigned long)data.flush_data.vaddr;
755 end = (unsigned long)data.flush_data.vaddr
756 + data.flush_data.length;
757
758 if (check_vaddr_bounds(start, end)) {
Satyajit Desai3c702822016-09-02 14:18:13 -0700759 pr_err("%s: virtual address %pK is out of bounds\n",
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700760 __func__, data.flush_data.vaddr);
761 ret = -EINVAL;
762 } else {
763 ret = ion_do_cache_op(
764 client, handle, data.flush_data.vaddr,
765 data.flush_data.offset,
766 data.flush_data.length, cmd);
767 }
768 up_read(&mm->mmap_sem);
769
770 ion_free(client, handle);
771
772 if (ret < 0)
773 return ret;
774 break;
775 }
Laura Abbott29defcc2014-08-01 16:13:40 -0700776 case ION_IOC_PREFETCH:
777 {
778 int ret;
779
780 ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
781 ION_HEAP_TYPE_SYSTEM_SECURE,
782 (void *)&data.prefetch_data,
783 ion_system_secure_heap_prefetch);
784 if (ret)
785 return ret;
786 break;
787 }
788 case ION_IOC_DRAIN:
789 {
790 int ret;
791
792 ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
793 ION_HEAP_TYPE_SYSTEM_SECURE,
794 (void *)&data.prefetch_data,
795 ion_system_secure_heap_drain);
796
797 if (ret)
798 return ret;
799 break;
800 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700801
802 default:
803 return -ENOTTY;
804 }
805 return 0;
806}
807
808#define MAX_VMAP_RETRIES 10
809
810/**
811 * An optimized page-zero'ing function. vmaps arrays of pages in large
812 * chunks to minimize the number of memsets and vmaps/vunmaps.
813 *
814 * Note that the `pages' array should be composed of all 4K pages.
815 *
816 * NOTE: This function does not guarantee synchronization of the caches
817 * and thus caller is responsible for handling any cache maintenance
818 * operations needed.
819 */
820int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
821{
822 int i, j, npages_to_vmap;
823 void *ptr = NULL;
824
825 /*
826 * As an optimization, we manually zero out all of the pages
827 * in one fell swoop here. To safeguard against insufficient
828 * vmalloc space, we only vmap `npages_to_vmap' at a time,
829 * starting with a conservative estimate of 1/8 of the total
830 * number of vmalloc pages available.
831 */
832 npages_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8)
833 >> PAGE_SHIFT;
834 for (i = 0; i < num_pages; i += npages_to_vmap) {
835 npages_to_vmap = min(npages_to_vmap, num_pages - i);
836 for (j = 0; !ptr && j < MAX_VMAP_RETRIES && npages_to_vmap;
837 ++j) {
838 ptr = vmap(&pages[i], npages_to_vmap,
839 VM_IOREMAP, PAGE_KERNEL);
840 if (!ptr)
841 npages_to_vmap >>= 1;
842 }
843 if (!ptr)
844 return -ENOMEM;
845
846 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
847 vunmap(ptr);
848 }
849
850 return 0;
851}
852
853int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
854{
855 struct page **pages;
856 unsigned int page_tbl_size;
857
858 pages_mem->free_fn = kfree;
859 page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
860 if (page_tbl_size > SZ_8K) {
861 /*
862 * Do fallback to ensure we have a balance between
863 * performance and availability.
864 */
865 pages = kmalloc(page_tbl_size,
866 __GFP_COMP | __GFP_NORETRY |
867 __GFP_NOWARN);
868 if (!pages) {
869 pages = vmalloc(page_tbl_size);
870 pages_mem->free_fn = vfree;
871 }
872 } else {
873 pages = kmalloc(page_tbl_size, GFP_KERNEL);
874 }
875
876 if (!pages)
877 return -ENOMEM;
878
879 pages_mem->pages = pages;
880 return 0;
881}
882
883void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
884{
885 pages_mem->free_fn(pages_mem->pages);
886}
887
Laura Abbott29defcc2014-08-01 16:13:40 -0700888int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
889 int order)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700890{
891 int i, ret;
892 struct pages_mem pages_mem;
893 int npages = 1 << order;
894
895 pages_mem.size = npages * PAGE_SIZE;
896
897 if (msm_ion_heap_alloc_pages_mem(&pages_mem))
898 return -ENOMEM;
899
900 for (i = 0; i < (1 << order); ++i)
901 pages_mem.pages[i] = page + i;
902
903 ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
Laura Abbott29defcc2014-08-01 16:13:40 -0700904 dma_sync_single_for_device(dev, page_to_phys(page), pages_mem.size,
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700905 DMA_BIDIRECTIONAL);
906 msm_ion_heap_free_pages_mem(&pages_mem);
907 return ret;
908}
909
Laura Abbott29defcc2014-08-01 16:13:40 -0700910int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *table,
911 size_t size)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700912{
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700913 struct scatterlist *sg;
914 int i, j, ret = 0, npages = 0;
915 struct pages_mem pages_mem;
916
Laura Abbott29defcc2014-08-01 16:13:40 -0700917 pages_mem.size = PAGE_ALIGN(size);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700918
919 if (msm_ion_heap_alloc_pages_mem(&pages_mem))
920 return -ENOMEM;
921
922 for_each_sg(table->sgl, sg, table->nents, i) {
923 struct page *page = sg_page(sg);
924 unsigned long len = sg->length;
Laura Abbott29defcc2014-08-01 16:13:40 -0700925 /* needed to make dma_sync_sg_for_device work: */
926 sg->dma_address = sg_phys(sg);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700927
928 for (j = 0; j < len / PAGE_SIZE; j++)
929 pages_mem.pages[npages++] = page + j;
930 }
931
932 ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
Laura Abbott29defcc2014-08-01 16:13:40 -0700933 dma_sync_sg_for_device(dev, table->sgl, table->nents,
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700934 DMA_BIDIRECTIONAL);
935 msm_ion_heap_free_pages_mem(&pages_mem);
936 return ret;
937}
938
939static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
940{
941 struct ion_heap *heap = NULL;
942
943 switch ((int)heap_data->type) {
Patrick Dalyc1005d82016-09-22 17:43:26 -0700944 case ION_HEAP_TYPE_SYSTEM_SECURE:
945 heap = ion_system_secure_heap_create(heap_data);
946 break;
Laura Abbott29defcc2014-08-01 16:13:40 -0700947 case ION_HEAP_TYPE_HYP_CMA:
948 heap = ion_cma_secure_heap_create(heap_data);
949 break;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700950 default:
951 heap = ion_heap_create(heap_data);
952 }
953
954 if (IS_ERR_OR_NULL(heap)) {
955 pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
956 __func__, heap_data->name, heap_data->type,
957 &heap_data->base, heap_data->size);
958 return ERR_PTR(-EINVAL);
959 }
960
961 heap->name = heap_data->name;
962 heap->id = heap_data->id;
963 heap->priv = heap_data->priv;
964 return heap;
965}
966
967static void msm_ion_heap_destroy(struct ion_heap *heap)
968{
969 if (!heap)
970 return;
971
972 switch ((int)heap->type) {
Patrick Dalyc1005d82016-09-22 17:43:26 -0700973 case ION_HEAP_TYPE_SYSTEM_SECURE:
974 ion_system_secure_heap_destroy(heap);
975 break;
Laura Abbott29defcc2014-08-01 16:13:40 -0700976
977 case ION_HEAP_TYPE_HYP_CMA:
978 ion_cma_secure_heap_destroy(heap);
979 break;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700980 default:
981 ion_heap_destroy(heap);
982 }
983}
984
Patrick Dalyc1005d82016-09-22 17:43:26 -0700985struct ion_heap *get_ion_heap(int heap_id)
986{
987 int i;
988 struct ion_heap *heap;
989
990 for (i = 0; i < num_heaps; i++) {
991 heap = heaps[i];
992 if (heap->id == heap_id)
993 return heap;
994 }
995
996 pr_err("%s: heap_id %d not found\n", __func__, heap_id);
997 return NULL;
998}
999
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001000static int msm_ion_probe(struct platform_device *pdev)
1001{
1002 static struct ion_device *new_dev;
1003 struct ion_platform_data *pdata;
1004 unsigned int pdata_needs_to_be_freed;
1005 int err = -1;
1006 int i;
1007
1008 if (pdev->dev.of_node) {
1009 pdata = msm_ion_parse_dt(pdev);
1010 if (IS_ERR(pdata))
1011 return PTR_ERR(pdata);
1012 pdata_needs_to_be_freed = 1;
1013 } else {
1014 pdata = pdev->dev.platform_data;
1015 pdata_needs_to_be_freed = 0;
1016 }
1017
1018 num_heaps = pdata->nr;
1019
1020 heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
1021
1022 if (!heaps) {
1023 err = -ENOMEM;
1024 goto out;
1025 }
1026
1027 new_dev = ion_device_create(compat_msm_ion_ioctl);
1028 if (IS_ERR_OR_NULL(new_dev)) {
1029 /*
1030 * set this to the ERR to indicate to the clients
1031 * that Ion failed to probe.
1032 */
1033 idev = new_dev;
1034 err = PTR_ERR(new_dev);
1035 goto out;
1036 }
1037
1038 /* create the heaps as specified in the board file */
1039 for (i = 0; i < num_heaps; i++) {
1040 struct ion_platform_heap *heap_data = &pdata->heaps[i];
1041
1042 msm_ion_allocate(heap_data);
1043
1044 heap_data->has_outer_cache = pdata->has_outer_cache;
1045 heaps[i] = msm_ion_heap_create(heap_data);
1046 if (IS_ERR_OR_NULL(heaps[i])) {
1047 heaps[i] = 0;
1048 continue;
1049 } else {
1050 if (heap_data->size)
1051 pr_info("ION heap %s created at %pa with size %zx\n",
1052 heap_data->name,
1053 &heap_data->base,
1054 heap_data->size);
1055 else
1056 pr_info("ION heap %s created\n",
1057 heap_data->name);
1058 }
1059
1060 ion_device_add_heap(new_dev, heaps[i]);
1061 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001062 if (pdata_needs_to_be_freed)
1063 free_pdata(pdata);
1064
1065 platform_set_drvdata(pdev, new_dev);
1066 /*
1067 * intentionally set this at the very end to allow probes to be deferred
1068 * completely until Ion is setup
1069 */
1070 idev = new_dev;
Laura Abbott29defcc2014-08-01 16:13:40 -07001071
1072 show_mem_notifier_register(&msm_ion_nb);
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001073 return 0;
1074
1075out:
1076 kfree(heaps);
1077 if (pdata_needs_to_be_freed)
1078 free_pdata(pdata);
1079 return err;
1080}
1081
1082static int msm_ion_remove(struct platform_device *pdev)
1083{
1084 struct ion_device *idev = platform_get_drvdata(pdev);
1085 int i;
1086
1087 for (i = 0; i < num_heaps; i++)
1088 msm_ion_heap_destroy(heaps[i]);
1089
1090 ion_device_destroy(idev);
1091 kfree(heaps);
1092 return 0;
1093}
1094
1095static const struct of_device_id msm_ion_match_table[] = {
1096 {.compatible = ION_COMPAT_STR},
1097 {},
1098};
1099
1100static struct platform_driver msm_ion_driver = {
1101 .probe = msm_ion_probe,
1102 .remove = msm_ion_remove,
1103 .driver = {
1104 .name = "ion-msm",
1105 .of_match_table = msm_ion_match_table,
1106 },
1107};
1108
1109static int __init msm_ion_init(void)
1110{
1111 return platform_driver_register(&msm_ion_driver);
1112}
1113
1114static void __exit msm_ion_exit(void)
1115{
1116 platform_driver_unregister(&msm_ion_driver);
1117}
1118
1119subsys_initcall(msm_ion_init);
1120module_exit(msm_ion_exit);