blob: 9d53391136b1a690e6bec0d3235c63d30297e0b7 [file] [log] [blame]
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
Laura Abbott29defcc2014-08-01 16:13:40 -070015#include <linux/io.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070016#include <linux/msm_ion.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/of.h>
20#include <linux/of_platform.h>
21#include <linux/of_address.h>
22#include <linux/mm.h>
23#include <linux/mm_types.h>
24#include <linux/sched.h>
25#include <linux/rwsem.h>
26#include <linux/uaccess.h>
27#include <linux/memblock.h>
28#include <linux/dma-mapping.h>
29#include <linux/dma-contiguous.h>
30#include <linux/vmalloc.h>
31#include <linux/highmem.h>
32#include <linux/cma.h>
33#include <linux/module.h>
Sudarshan Rajagopalan33ae0432017-05-18 00:12:53 -070034#include <linux/bitops.h>
Laura Abbott29defcc2014-08-01 16:13:40 -070035#include <linux/show_mem_notifier.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070036#include <asm/cacheflush.h>
37#include "../ion_priv.h"
38#include "compat_msm_ion.h"
Laura Abbott29defcc2014-08-01 16:13:40 -070039#include <soc/qcom/secure_buffer.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070040
41#define ION_COMPAT_STR "qcom,msm-ion"
42
43static struct ion_device *idev;
44static int num_heaps;
45static struct ion_heap **heaps;
46
47struct ion_heap_desc {
48 unsigned int id;
49 enum ion_heap_type type;
50 const char *name;
51 unsigned int permission_type;
52};
53
54#ifdef CONFIG_OF
55static struct ion_heap_desc ion_heap_meta[] = {
56 {
57 .id = ION_SYSTEM_HEAP_ID,
58 .name = ION_SYSTEM_HEAP_NAME,
59 },
60 {
61 .id = ION_SYSTEM_CONTIG_HEAP_ID,
62 .name = ION_KMALLOC_HEAP_NAME,
63 },
64 {
Patrick Dalyc1005d82016-09-22 17:43:26 -070065 .id = ION_SECURE_HEAP_ID,
66 .name = ION_SECURE_HEAP_NAME,
67 },
68 {
Patrick Dalyeeeb9402016-11-01 20:54:41 -070069 .id = ION_CP_MM_HEAP_ID,
70 .name = ION_MM_HEAP_NAME,
71 .permission_type = IPT_TYPE_MM_CARVEOUT,
72 },
73 {
74 .id = ION_MM_FIRMWARE_HEAP_ID,
75 .name = ION_MM_FIRMWARE_HEAP_NAME,
76 },
77 {
78 .id = ION_CP_MFC_HEAP_ID,
79 .name = ION_MFC_HEAP_NAME,
80 .permission_type = IPT_TYPE_MFC_SHAREDMEM,
81 },
82 {
83 .id = ION_SF_HEAP_ID,
84 .name = ION_SF_HEAP_NAME,
85 },
86 {
87 .id = ION_QSECOM_HEAP_ID,
88 .name = ION_QSECOM_HEAP_NAME,
89 },
90 {
Patrick Daly05be2052017-12-06 17:43:49 -080091 .id = ION_QSECOM_TA_HEAP_ID,
92 .name = ION_QSECOM_TA_HEAP_NAME,
93 },
94 {
Laura Abbott29defcc2014-08-01 16:13:40 -070095 .id = ION_SPSS_HEAP_ID,
96 .name = ION_SPSS_HEAP_NAME,
97 },
98 {
Patrick Dalyeeeb9402016-11-01 20:54:41 -070099 .id = ION_AUDIO_HEAP_ID,
100 .name = ION_AUDIO_HEAP_NAME,
101 },
102 {
103 .id = ION_PIL1_HEAP_ID,
104 .name = ION_PIL1_HEAP_NAME,
105 },
106 {
107 .id = ION_PIL2_HEAP_ID,
108 .name = ION_PIL2_HEAP_NAME,
109 },
110 {
111 .id = ION_CP_WB_HEAP_ID,
112 .name = ION_WB_HEAP_NAME,
113 },
114 {
115 .id = ION_CAMERA_HEAP_ID,
116 .name = ION_CAMERA_HEAP_NAME,
117 },
118 {
119 .id = ION_ADSP_HEAP_ID,
120 .name = ION_ADSP_HEAP_NAME,
Laura Abbott29defcc2014-08-01 16:13:40 -0700121 },
122 {
123 .id = ION_SECURE_DISPLAY_HEAP_ID,
124 .name = ION_SECURE_DISPLAY_HEAP_NAME,
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700125 }
126};
127#endif
128
Laura Abbott29defcc2014-08-01 16:13:40 -0700129static int msm_ion_lowmem_notifier(struct notifier_block *nb,
130 unsigned long action, void *data)
131{
132 show_ion_usage(idev);
133 return 0;
134}
135
136static struct notifier_block msm_ion_nb = {
137 .notifier_call = msm_ion_lowmem_notifier,
138};
139
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700140struct ion_client *msm_ion_client_create(const char *name)
141{
142 /*
143 * The assumption is that if there is a NULL device, the ion
144 * driver has not yet probed.
145 */
146 if (!idev)
147 return ERR_PTR(-EPROBE_DEFER);
148
149 if (IS_ERR(idev))
150 return (struct ion_client *)idev;
151
152 return ion_client_create(idev, name);
153}
154EXPORT_SYMBOL(msm_ion_client_create);
155
156static int ion_no_pages_cache_ops(
157 struct ion_client *client,
158 struct ion_handle *handle,
159 void *vaddr,
160 unsigned int offset, unsigned int length,
161 unsigned int cmd)
162{
163 unsigned long size_to_vmap, total_size;
164 int i, j, ret;
165 void *ptr = NULL;
166 ion_phys_addr_t buff_phys = 0;
167 ion_phys_addr_t buff_phys_start = 0;
168 size_t buf_length = 0;
169
170 ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
171 if (ret)
172 return -EINVAL;
173
174 buff_phys = buff_phys_start;
175
176 if (!vaddr) {
177 /*
178 * Split the vmalloc space into smaller regions in
179 * order to clean and/or invalidate the cache.
180 */
181 size_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8);
182 total_size = buf_length;
183
184 for (i = 0; i < total_size; i += size_to_vmap) {
185 size_to_vmap = min(size_to_vmap, total_size - i);
186 for (j = 0; !ptr && j < 10 && size_to_vmap; ++j) {
187 ptr = ioremap(buff_phys, size_to_vmap);
188 if (ptr) {
189 switch (cmd) {
190 case ION_IOC_CLEAN_CACHES:
191 __dma_clean_area(
192 ptr,
193 size_to_vmap);
194 break;
195 case ION_IOC_INV_CACHES:
196 __dma_inv_area(
197 ptr,
198 size_to_vmap);
199 break;
200 case ION_IOC_CLEAN_INV_CACHES:
201 __dma_flush_area(
202 ptr,
203 size_to_vmap);
204 break;
205 default:
206 return -EINVAL;
207 }
208 buff_phys += size_to_vmap;
209 } else {
210 size_to_vmap >>= 1;
211 }
212 }
213 if (!ptr) {
214 pr_err("Couldn't io-remap the memory\n");
215 return -EINVAL;
216 }
217 iounmap(ptr);
218 }
219 } else {
220 switch (cmd) {
221 case ION_IOC_CLEAN_CACHES:
222 __dma_clean_area(vaddr, length);
223 break;
224 case ION_IOC_INV_CACHES:
225 __dma_inv_area(vaddr, length);
226 break;
227 case ION_IOC_CLEAN_INV_CACHES:
228 __dma_flush_area(vaddr, length);
229 break;
230 default:
231 return -EINVAL;
232 }
233 }
234
235 return 0;
236}
237
Laura Abbott29defcc2014-08-01 16:13:40 -0700238static void __do_cache_ops(struct page *page, unsigned int offset,
239 unsigned int length,
240 void (*op)(const void *, size_t))
241{
242 unsigned int left = length;
243 unsigned long pfn;
244 void *vaddr;
245
246 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
247 page = pfn_to_page(pfn);
248 offset &= ~PAGE_MASK;
249
250 if (!PageHighMem(page)) {
251 vaddr = page_address(page) + offset;
252 op(vaddr, length);
253 goto out;
254 }
255
256 do {
257 unsigned int len;
258
259 len = left;
260 if (len + offset > PAGE_SIZE)
261 len = PAGE_SIZE - offset;
262
263 page = pfn_to_page(pfn);
264 vaddr = kmap_atomic(page);
265 op(vaddr + offset, len);
266 kunmap_atomic(vaddr);
267
268 offset = 0;
269 pfn++;
270 left -= len;
271 } while (left);
272
273out:
274 return;
275}
276
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700277static int ion_pages_cache_ops(
278 struct ion_client *client,
279 struct ion_handle *handle,
280 void *vaddr, unsigned int offset, unsigned int length,
281 unsigned int cmd)
282{
283 struct sg_table *table = NULL;
Laura Abbott29defcc2014-08-01 16:13:40 -0700284 struct scatterlist *sg;
285 int i;
286 unsigned int len = 0;
287 void (*op)(const void *, size_t);
288
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700289
290 table = ion_sg_table(client, handle);
291 if (IS_ERR_OR_NULL(table))
292 return PTR_ERR(table);
293
294 switch (cmd) {
295 case ION_IOC_CLEAN_CACHES:
Laura Abbott29defcc2014-08-01 16:13:40 -0700296 op = __dma_clean_area;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700297 break;
298 case ION_IOC_INV_CACHES:
Laura Abbott29defcc2014-08-01 16:13:40 -0700299 op = __dma_inv_area;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700300 break;
301 case ION_IOC_CLEAN_INV_CACHES:
Laura Abbott29defcc2014-08-01 16:13:40 -0700302 op = __dma_flush_area;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700303 break;
304 default:
305 return -EINVAL;
Laura Abbott29defcc2014-08-01 16:13:40 -0700306 };
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700307
Laura Abbott29defcc2014-08-01 16:13:40 -0700308 for_each_sg(table->sgl, sg, table->nents, i) {
Susheel Khianid58f5a02016-04-21 12:56:57 +0530309 unsigned int sg_offset, sg_left, size = 0;
310
Laura Abbott29defcc2014-08-01 16:13:40 -0700311 len += sg->length;
Susheel Khianid58f5a02016-04-21 12:56:57 +0530312 if (len <= offset)
Laura Abbott29defcc2014-08-01 16:13:40 -0700313 continue;
314
Susheel Khianid58f5a02016-04-21 12:56:57 +0530315 sg_left = len - offset;
316 sg_offset = sg->length - sg_left;
Laura Abbott29defcc2014-08-01 16:13:40 -0700317
Susheel Khianid58f5a02016-04-21 12:56:57 +0530318 size = (length < sg_left) ? length : sg_left;
319
320 __do_cache_ops(sg_page(sg), sg_offset, size, op);
321
322 offset += size;
323 length -= size;
324
325 if (length == 0)
Laura Abbott29defcc2014-08-01 16:13:40 -0700326 break;
327 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700328 return 0;
329}
330
331static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
332 void *uaddr, unsigned long offset, unsigned long len,
333 unsigned int cmd)
334{
335 int ret = -EINVAL;
336 unsigned long flags;
337 struct sg_table *table;
338 struct page *page;
339
340 ret = ion_handle_get_flags(client, handle, &flags);
341 if (ret)
342 return -EINVAL;
343
344 if (!ION_IS_CACHED(flags))
345 return 0;
346
Liam Mark53261412017-12-04 10:58:55 -0800347 if (!is_buffer_hlos_assigned(ion_handle_buffer(handle)))
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700348 return 0;
349
350 table = ion_sg_table(client, handle);
351
352 if (IS_ERR_OR_NULL(table))
353 return PTR_ERR(table);
354
355 page = sg_page(table->sgl);
356
357 if (page)
358 ret = ion_pages_cache_ops(client, handle, uaddr,
359 offset, len, cmd);
360 else
361 ret = ion_no_pages_cache_ops(client, handle, uaddr,
362 offset, len, cmd);
363
364 return ret;
365}
366
367int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
368 void *vaddr, unsigned long len, unsigned int cmd)
369{
370 return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
371}
372EXPORT_SYMBOL(msm_ion_do_cache_op);
373
Susheel Khianid58f5a02016-04-21 12:56:57 +0530374int msm_ion_do_cache_offset_op(
375 struct ion_client *client, struct ion_handle *handle,
376 void *vaddr, unsigned int offset, unsigned long len,
377 unsigned int cmd)
378{
379 return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
380}
381EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
382
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700383static void msm_ion_allocate(struct ion_platform_heap *heap)
384{
385 if (!heap->base && heap->extra_data) {
386 WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead");
387 return;
388 }
389}
390
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700391#ifdef CONFIG_OF
392static int msm_init_extra_data(struct device_node *node,
393 struct ion_platform_heap *heap,
394 const struct ion_heap_desc *heap_desc)
395{
396 int ret = 0;
397
398 switch ((int)heap->type) {
399 case ION_HEAP_TYPE_CARVEOUT:
400 {
401 heap->extra_data = kzalloc(sizeof(*heap->extra_data),
402 GFP_KERNEL);
403 if (!heap->extra_data)
404 ret = -ENOMEM;
405 break;
406 }
407 default:
408 heap->extra_data = 0;
409 break;
410 }
411 return ret;
412}
413
414#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
415 .heap_type = ION_HEAP_TYPE_##h, }
416
417static struct heap_types_info {
418 const char *name;
419 int heap_type;
420} heap_types_info[] = {
421 MAKE_HEAP_TYPE_MAPPING(SYSTEM),
422 MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
423 MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
424 MAKE_HEAP_TYPE_MAPPING(CHUNK),
425 MAKE_HEAP_TYPE_MAPPING(DMA),
Patrick Dalyc1005d82016-09-22 17:43:26 -0700426 MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
Laura Abbott29defcc2014-08-01 16:13:40 -0700427 MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700428};
429
430static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
431 int *heap_type)
432{
433 const char *name;
434 int i, ret = -EINVAL;
435
436 ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
437 if (ret)
438 goto out;
439 for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
440 if (!strcmp(heap_types_info[i].name, name)) {
441 *heap_type = heap_types_info[i].heap_type;
442 ret = 0;
443 goto out;
444 }
445 }
446 WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
447 name, __FILE__);
448out:
449 return ret;
450}
451
452static int msm_ion_populate_heap(struct device_node *node,
453 struct ion_platform_heap *heap)
454{
455 unsigned int i;
456 int ret = -EINVAL, heap_type = -1;
457 unsigned int len = ARRAY_SIZE(ion_heap_meta);
458
459 for (i = 0; i < len; ++i) {
460 if (ion_heap_meta[i].id == heap->id) {
461 heap->name = ion_heap_meta[i].name;
462 ret = msm_ion_get_heap_type_from_dt_node(
463 node, &heap_type);
464 if (ret)
465 break;
466 heap->type = heap_type;
467 ret = msm_init_extra_data(node, heap,
468 &ion_heap_meta[i]);
469 break;
470 }
471 }
472 if (ret)
473 pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
474 return ret;
475}
476
477static void free_pdata(const struct ion_platform_data *pdata)
478{
479 unsigned int i;
480
481 for (i = 0; i < pdata->nr; ++i)
482 kfree(pdata->heaps[i].extra_data);
483 kfree(pdata->heaps);
484 kfree(pdata);
485}
486
Laura Abbott29defcc2014-08-01 16:13:40 -0700487static void msm_ion_get_heap_dt_data(struct device_node *node,
488 struct ion_platform_heap *heap)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700489{
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700490 struct device_node *pnode;
491
Laura Abbott29defcc2014-08-01 16:13:40 -0700492 pnode = of_parse_phandle(node, "memory-region", 0);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700493 if (pnode) {
Laura Abbott29defcc2014-08-01 16:13:40 -0700494 const __be32 *basep;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700495 u64 size;
Laura Abbott29defcc2014-08-01 16:13:40 -0700496 u64 base;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700497
Laura Abbott29defcc2014-08-01 16:13:40 -0700498 basep = of_get_address(pnode, 0, &size, NULL);
499 if (!basep) {
500 base = cma_get_base(dev_get_cma_area(heap->priv));
501 size = cma_get_size(dev_get_cma_area(heap->priv));
502 } else {
503 base = of_translate_address(pnode, basep);
504 WARN(base == OF_BAD_ADDR, "Failed to parse DT node for heap %s\n",
505 heap->name);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700506 }
Laura Abbott29defcc2014-08-01 16:13:40 -0700507 heap->base = base;
508 heap->size = size;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700509 of_node_put(pnode);
510 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700511}
512
513static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
514{
515 struct ion_platform_data *pdata = 0;
516 struct ion_platform_heap *heaps = NULL;
517 struct device_node *node;
518 struct platform_device *new_dev = NULL;
519 const struct device_node *dt_node = pdev->dev.of_node;
520 const __be32 *val;
521 int ret = -EINVAL;
522 u32 num_heaps = 0;
523 int idx = 0;
524
525 for_each_available_child_of_node(dt_node, node)
526 num_heaps++;
527
528 if (!num_heaps)
529 return ERR_PTR(-EINVAL);
530
531 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
532 if (!pdata)
533 return ERR_PTR(-ENOMEM);
534
535 heaps = kcalloc(num_heaps, sizeof(struct ion_platform_heap),
536 GFP_KERNEL);
537 if (!heaps) {
538 kfree(pdata);
539 return ERR_PTR(-ENOMEM);
540 }
541
542 pdata->heaps = heaps;
543 pdata->nr = num_heaps;
544
545 for_each_available_child_of_node(dt_node, node) {
546 new_dev = of_platform_device_create(node, NULL, &pdev->dev);
547 if (!new_dev) {
548 pr_err("Failed to create device %s\n", node->name);
549 goto free_heaps;
550 }
551
552 pdata->heaps[idx].priv = &new_dev->dev;
553 val = of_get_address(node, 0, NULL, NULL);
554 if (!val) {
555 pr_err("%s: Unable to find reg key", __func__);
556 goto free_heaps;
557 }
558 pdata->heaps[idx].id = (u32)of_read_number(val, 1);
559
560 ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
561 if (ret)
562 goto free_heaps;
563
Laura Abbott29defcc2014-08-01 16:13:40 -0700564 msm_ion_get_heap_dt_data(node, &pdata->heaps[idx]);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700565
566 ++idx;
567 }
568 return pdata;
569
570free_heaps:
571 free_pdata(pdata);
572 return ERR_PTR(ret);
573}
574#else
575static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
576{
577 return NULL;
578}
579
580static void free_pdata(const struct ion_platform_data *pdata)
581{
582}
583#endif
584
585static int check_vaddr_bounds(unsigned long start, unsigned long end)
586{
587 struct mm_struct *mm = current->active_mm;
588 struct vm_area_struct *vma;
589 int ret = 1;
590
591 if (end < start)
592 goto out;
593
594 vma = find_vma(mm, start);
595 if (vma && vma->vm_start < end) {
596 if (start < vma->vm_start)
597 goto out;
598 if (end > vma->vm_end)
599 goto out;
600 ret = 0;
601 }
602
603out:
604 return ret;
605}
606
Patrick Dalyc1005d82016-09-22 17:43:26 -0700607int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
608{
609 return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
610}
611
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700612int ion_heap_allow_heap_secure(enum ion_heap_type type)
613{
614 return false;
615}
616
Laura Abbott29defcc2014-08-01 16:13:40 -0700617bool is_secure_vmid_valid(int vmid)
618{
619 return (vmid == VMID_CP_TOUCH ||
620 vmid == VMID_CP_BITSTREAM ||
621 vmid == VMID_CP_PIXEL ||
622 vmid == VMID_CP_NON_PIXEL ||
623 vmid == VMID_CP_CAMERA ||
624 vmid == VMID_CP_SEC_DISPLAY ||
Liam Markd9a50852016-09-22 11:30:51 -0700625 vmid == VMID_CP_APP ||
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -0700626 vmid == VMID_CP_CAMERA_PREVIEW ||
Sudarshan Rajagopalan8e206792017-06-28 17:45:57 -0700627 vmid == VMID_CP_SPSS_SP ||
Sudarshan Rajagopalane08afb62017-07-13 11:19:46 -0700628 vmid == VMID_CP_SPSS_SP_SHARED ||
629 vmid == VMID_CP_SPSS_HLOS_SHARED);
Laura Abbott29defcc2014-08-01 16:13:40 -0700630}
631
Sudarshan Rajagopalan33ae0432017-05-18 00:12:53 -0700632unsigned int count_set_bits(unsigned long val)
633{
634 return ((unsigned int)bitmap_weight(&val, BITS_PER_LONG));
635}
636
637int populate_vm_list(unsigned long flags, unsigned int *vm_list,
638 int nelems)
639{
640 unsigned int itr = 0;
641 int vmid;
642
643 flags = flags & ION_FLAGS_CP_MASK;
644 for_each_set_bit(itr, &flags, BITS_PER_LONG) {
Sudarshan Rajagopalanacc4a032017-07-20 16:56:20 -0700645 vmid = get_vmid(0x1UL << itr);
Sudarshan Rajagopalan33ae0432017-05-18 00:12:53 -0700646 if (vmid < 0 || !nelems)
647 return -EINVAL;
648
649 vm_list[nelems - 1] = vmid;
650 nelems--;
651 }
652 return 0;
653}
654
Laura Abbott29defcc2014-08-01 16:13:40 -0700655int get_secure_vmid(unsigned long flags)
656{
657 if (flags & ION_FLAG_CP_TOUCH)
658 return VMID_CP_TOUCH;
659 if (flags & ION_FLAG_CP_BITSTREAM)
660 return VMID_CP_BITSTREAM;
661 if (flags & ION_FLAG_CP_PIXEL)
662 return VMID_CP_PIXEL;
663 if (flags & ION_FLAG_CP_NON_PIXEL)
664 return VMID_CP_NON_PIXEL;
665 if (flags & ION_FLAG_CP_CAMERA)
666 return VMID_CP_CAMERA;
667 if (flags & ION_FLAG_CP_SEC_DISPLAY)
668 return VMID_CP_SEC_DISPLAY;
669 if (flags & ION_FLAG_CP_APP)
670 return VMID_CP_APP;
Liam Markd9a50852016-09-22 11:30:51 -0700671 if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
672 return VMID_CP_CAMERA_PREVIEW;
Sudarshan Rajagopalanc9342282017-05-18 00:11:06 -0700673 if (flags & ION_FLAG_CP_SPSS_SP)
674 return VMID_CP_SPSS_SP;
Sudarshan Rajagopalan8e206792017-06-28 17:45:57 -0700675 if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
676 return VMID_CP_SPSS_SP_SHARED;
Sudarshan Rajagopalane08afb62017-07-13 11:19:46 -0700677 if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
678 return VMID_CP_SPSS_HLOS_SHARED;
Laura Abbott29defcc2014-08-01 16:13:40 -0700679 return -EINVAL;
680}
Sudarshan Rajagopalanacc4a032017-07-20 16:56:20 -0700681
Liam Mark53261412017-12-04 10:58:55 -0800682bool is_buffer_hlos_assigned(struct ion_buffer *buffer)
683{
684 bool is_hlos = false;
685
686 if (buffer->heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA &&
687 (buffer->flags & ION_FLAG_CP_HLOS))
688 is_hlos = true;
689
690 if (get_secure_vmid(buffer->flags) <= 0)
691 is_hlos = true;
692
693 return is_hlos;
694}
695
Sudarshan Rajagopalanacc4a032017-07-20 16:56:20 -0700696int get_vmid(unsigned long flags)
697{
698 int vmid;
699
700 vmid = get_secure_vmid(flags);
701 if (vmid < 0) {
702 if (flags & ION_FLAG_CP_HLOS)
703 vmid = VMID_HLOS;
704 }
705 return vmid;
706}
707
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700708/* fix up the cases where the ioctl direction bits are incorrect */
709static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
710{
711 switch (cmd) {
712 case ION_IOC_CLEAN_CACHES:
713 case ION_IOC_INV_CACHES:
714 case ION_IOC_CLEAN_INV_CACHES:
715 case ION_IOC_PREFETCH:
716 case ION_IOC_DRAIN:
717 return _IOC_WRITE;
718 default:
719 return _IOC_DIR(cmd);
720 }
721}
722
723long msm_ion_custom_ioctl(struct ion_client *client,
724 unsigned int cmd,
725 unsigned long arg)
726{
727 unsigned int dir;
728 union {
729 struct ion_flush_data flush_data;
730 struct ion_prefetch_data prefetch_data;
731 } data;
732
733 dir = msm_ion_ioctl_dir(cmd);
734
735 if (_IOC_SIZE(cmd) > sizeof(data))
736 return -EINVAL;
737
738 if (dir & _IOC_WRITE)
739 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
740 return -EFAULT;
741
742 switch (cmd) {
743 case ION_IOC_CLEAN_CACHES:
744 case ION_IOC_INV_CACHES:
745 case ION_IOC_CLEAN_INV_CACHES:
746 {
747 unsigned long start, end;
748 struct ion_handle *handle = NULL;
749 int ret;
750 struct mm_struct *mm = current->active_mm;
751
752 if (data.flush_data.handle > 0) {
753 handle = ion_handle_get_by_id(
754 client, (int)data.flush_data.handle);
755 if (IS_ERR(handle)) {
756 pr_info("%s: Could not find handle: %d\n",
757 __func__, (int)data.flush_data.handle);
758 return PTR_ERR(handle);
759 }
760 } else {
761 handle = ion_import_dma_buf_fd(client,
762 data.flush_data.fd);
763 if (IS_ERR(handle)) {
Satyajit Desai3c702822016-09-02 14:18:13 -0700764 pr_info("%s: Could not import handle: %pK\n",
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700765 __func__, handle);
766 return -EINVAL;
767 }
768 }
769
770 down_read(&mm->mmap_sem);
771
Liam Mark53261412017-12-04 10:58:55 -0800772 start = (unsigned long)data.flush_data.vaddr +
773 data.flush_data.offset;
774 end = start + data.flush_data.length;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700775
776 if (check_vaddr_bounds(start, end)) {
Satyajit Desai3c702822016-09-02 14:18:13 -0700777 pr_err("%s: virtual address %pK is out of bounds\n",
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700778 __func__, data.flush_data.vaddr);
779 ret = -EINVAL;
780 } else {
781 ret = ion_do_cache_op(
782 client, handle, data.flush_data.vaddr,
783 data.flush_data.offset,
784 data.flush_data.length, cmd);
785 }
786 up_read(&mm->mmap_sem);
787
788 ion_free(client, handle);
789
790 if (ret < 0)
791 return ret;
792 break;
793 }
Laura Abbott29defcc2014-08-01 16:13:40 -0700794 case ION_IOC_PREFETCH:
795 {
796 int ret;
797
798 ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
799 ION_HEAP_TYPE_SYSTEM_SECURE,
800 (void *)&data.prefetch_data,
801 ion_system_secure_heap_prefetch);
802 if (ret)
803 return ret;
804 break;
805 }
806 case ION_IOC_DRAIN:
807 {
808 int ret;
809
810 ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
811 ION_HEAP_TYPE_SYSTEM_SECURE,
812 (void *)&data.prefetch_data,
813 ion_system_secure_heap_drain);
814
815 if (ret)
816 return ret;
817 break;
818 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700819
820 default:
821 return -ENOTTY;
822 }
823 return 0;
824}
825
826#define MAX_VMAP_RETRIES 10
827
828/**
829 * An optimized page-zero'ing function. vmaps arrays of pages in large
830 * chunks to minimize the number of memsets and vmaps/vunmaps.
831 *
832 * Note that the `pages' array should be composed of all 4K pages.
833 *
834 * NOTE: This function does not guarantee synchronization of the caches
835 * and thus caller is responsible for handling any cache maintenance
836 * operations needed.
837 */
838int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
839{
840 int i, j, npages_to_vmap;
841 void *ptr = NULL;
842
843 /*
844 * As an optimization, we manually zero out all of the pages
845 * in one fell swoop here. To safeguard against insufficient
846 * vmalloc space, we only vmap `npages_to_vmap' at a time,
847 * starting with a conservative estimate of 1/8 of the total
848 * number of vmalloc pages available.
849 */
850 npages_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8)
851 >> PAGE_SHIFT;
852 for (i = 0; i < num_pages; i += npages_to_vmap) {
853 npages_to_vmap = min(npages_to_vmap, num_pages - i);
854 for (j = 0; !ptr && j < MAX_VMAP_RETRIES && npages_to_vmap;
855 ++j) {
856 ptr = vmap(&pages[i], npages_to_vmap,
857 VM_IOREMAP, PAGE_KERNEL);
858 if (!ptr)
859 npages_to_vmap >>= 1;
860 }
861 if (!ptr)
862 return -ENOMEM;
863
864 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
865 vunmap(ptr);
866 }
867
868 return 0;
869}
870
871int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
872{
873 struct page **pages;
874 unsigned int page_tbl_size;
875
876 pages_mem->free_fn = kfree;
877 page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
878 if (page_tbl_size > SZ_8K) {
879 /*
880 * Do fallback to ensure we have a balance between
881 * performance and availability.
882 */
883 pages = kmalloc(page_tbl_size,
884 __GFP_COMP | __GFP_NORETRY |
885 __GFP_NOWARN);
886 if (!pages) {
887 pages = vmalloc(page_tbl_size);
888 pages_mem->free_fn = vfree;
889 }
890 } else {
891 pages = kmalloc(page_tbl_size, GFP_KERNEL);
892 }
893
894 if (!pages)
895 return -ENOMEM;
896
897 pages_mem->pages = pages;
898 return 0;
899}
900
901void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
902{
903 pages_mem->free_fn(pages_mem->pages);
904}
905
Laura Abbott29defcc2014-08-01 16:13:40 -0700906int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
907 int order)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700908{
909 int i, ret;
910 struct pages_mem pages_mem;
911 int npages = 1 << order;
912
913 pages_mem.size = npages * PAGE_SIZE;
914
915 if (msm_ion_heap_alloc_pages_mem(&pages_mem))
916 return -ENOMEM;
917
918 for (i = 0; i < (1 << order); ++i)
919 pages_mem.pages[i] = page + i;
920
921 ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
Laura Abbott29defcc2014-08-01 16:13:40 -0700922 dma_sync_single_for_device(dev, page_to_phys(page), pages_mem.size,
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700923 DMA_BIDIRECTIONAL);
924 msm_ion_heap_free_pages_mem(&pages_mem);
925 return ret;
926}
927
Laura Abbott29defcc2014-08-01 16:13:40 -0700928int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *table,
929 size_t size)
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700930{
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700931 struct scatterlist *sg;
932 int i, j, ret = 0, npages = 0;
933 struct pages_mem pages_mem;
934
Laura Abbott29defcc2014-08-01 16:13:40 -0700935 pages_mem.size = PAGE_ALIGN(size);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700936
937 if (msm_ion_heap_alloc_pages_mem(&pages_mem))
938 return -ENOMEM;
939
940 for_each_sg(table->sgl, sg, table->nents, i) {
941 struct page *page = sg_page(sg);
942 unsigned long len = sg->length;
Laura Abbott29defcc2014-08-01 16:13:40 -0700943 /* needed to make dma_sync_sg_for_device work: */
944 sg->dma_address = sg_phys(sg);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700945
946 for (j = 0; j < len / PAGE_SIZE; j++)
947 pages_mem.pages[npages++] = page + j;
948 }
949
950 ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
Laura Abbott29defcc2014-08-01 16:13:40 -0700951 dma_sync_sg_for_device(dev, table->sgl, table->nents,
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700952 DMA_BIDIRECTIONAL);
953 msm_ion_heap_free_pages_mem(&pages_mem);
954 return ret;
955}
956
957static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
958{
959 struct ion_heap *heap = NULL;
960
961 switch ((int)heap_data->type) {
Patrick Dalyc1005d82016-09-22 17:43:26 -0700962 case ION_HEAP_TYPE_SYSTEM_SECURE:
963 heap = ion_system_secure_heap_create(heap_data);
964 break;
Laura Abbott29defcc2014-08-01 16:13:40 -0700965 case ION_HEAP_TYPE_HYP_CMA:
966 heap = ion_cma_secure_heap_create(heap_data);
967 break;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700968 default:
969 heap = ion_heap_create(heap_data);
970 }
971
972 if (IS_ERR_OR_NULL(heap)) {
973 pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
974 __func__, heap_data->name, heap_data->type,
975 &heap_data->base, heap_data->size);
976 return ERR_PTR(-EINVAL);
977 }
978
979 heap->name = heap_data->name;
980 heap->id = heap_data->id;
981 heap->priv = heap_data->priv;
982 return heap;
983}
984
985static void msm_ion_heap_destroy(struct ion_heap *heap)
986{
987 if (!heap)
988 return;
989
990 switch ((int)heap->type) {
Patrick Dalyc1005d82016-09-22 17:43:26 -0700991 case ION_HEAP_TYPE_SYSTEM_SECURE:
992 ion_system_secure_heap_destroy(heap);
993 break;
Laura Abbott29defcc2014-08-01 16:13:40 -0700994
995 case ION_HEAP_TYPE_HYP_CMA:
996 ion_cma_secure_heap_destroy(heap);
997 break;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700998 default:
999 ion_heap_destroy(heap);
1000 }
1001}
1002
Patrick Dalyc1005d82016-09-22 17:43:26 -07001003struct ion_heap *get_ion_heap(int heap_id)
1004{
1005 int i;
1006 struct ion_heap *heap;
1007
1008 for (i = 0; i < num_heaps; i++) {
1009 heap = heaps[i];
1010 if (heap->id == heap_id)
1011 return heap;
1012 }
1013
1014 pr_err("%s: heap_id %d not found\n", __func__, heap_id);
1015 return NULL;
1016}
1017
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001018static int msm_ion_probe(struct platform_device *pdev)
1019{
1020 static struct ion_device *new_dev;
1021 struct ion_platform_data *pdata;
1022 unsigned int pdata_needs_to_be_freed;
1023 int err = -1;
1024 int i;
1025
1026 if (pdev->dev.of_node) {
1027 pdata = msm_ion_parse_dt(pdev);
1028 if (IS_ERR(pdata))
1029 return PTR_ERR(pdata);
1030 pdata_needs_to_be_freed = 1;
1031 } else {
1032 pdata = pdev->dev.platform_data;
1033 pdata_needs_to_be_freed = 0;
1034 }
1035
1036 num_heaps = pdata->nr;
1037
1038 heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
1039
1040 if (!heaps) {
1041 err = -ENOMEM;
1042 goto out;
1043 }
1044
1045 new_dev = ion_device_create(compat_msm_ion_ioctl);
1046 if (IS_ERR_OR_NULL(new_dev)) {
1047 /*
1048 * set this to the ERR to indicate to the clients
1049 * that Ion failed to probe.
1050 */
1051 idev = new_dev;
1052 err = PTR_ERR(new_dev);
1053 goto out;
1054 }
1055
1056 /* create the heaps as specified in the board file */
1057 for (i = 0; i < num_heaps; i++) {
1058 struct ion_platform_heap *heap_data = &pdata->heaps[i];
1059
1060 msm_ion_allocate(heap_data);
1061
1062 heap_data->has_outer_cache = pdata->has_outer_cache;
1063 heaps[i] = msm_ion_heap_create(heap_data);
1064 if (IS_ERR_OR_NULL(heaps[i])) {
1065 heaps[i] = 0;
1066 continue;
1067 } else {
1068 if (heap_data->size)
1069 pr_info("ION heap %s created at %pa with size %zx\n",
1070 heap_data->name,
1071 &heap_data->base,
1072 heap_data->size);
1073 else
1074 pr_info("ION heap %s created\n",
1075 heap_data->name);
1076 }
1077
1078 ion_device_add_heap(new_dev, heaps[i]);
1079 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001080 if (pdata_needs_to_be_freed)
1081 free_pdata(pdata);
1082
1083 platform_set_drvdata(pdev, new_dev);
1084 /*
1085 * intentionally set this at the very end to allow probes to be deferred
1086 * completely until Ion is setup
1087 */
1088 idev = new_dev;
Laura Abbott29defcc2014-08-01 16:13:40 -07001089
1090 show_mem_notifier_register(&msm_ion_nb);
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001091 return 0;
1092
1093out:
1094 kfree(heaps);
1095 if (pdata_needs_to_be_freed)
1096 free_pdata(pdata);
1097 return err;
1098}
1099
1100static int msm_ion_remove(struct platform_device *pdev)
1101{
1102 struct ion_device *idev = platform_get_drvdata(pdev);
1103 int i;
1104
1105 for (i = 0; i < num_heaps; i++)
1106 msm_ion_heap_destroy(heaps[i]);
1107
1108 ion_device_destroy(idev);
1109 kfree(heaps);
1110 return 0;
1111}
1112
1113static const struct of_device_id msm_ion_match_table[] = {
1114 {.compatible = ION_COMPAT_STR},
1115 {},
1116};
1117
1118static struct platform_driver msm_ion_driver = {
1119 .probe = msm_ion_probe,
1120 .remove = msm_ion_remove,
1121 .driver = {
1122 .name = "ion-msm",
1123 .of_match_table = msm_ion_match_table,
1124 },
1125};
1126
1127static int __init msm_ion_init(void)
1128{
1129 return platform_driver_register(&msm_ion_driver);
1130}
1131
1132static void __exit msm_ion_exit(void)
1133{
1134 platform_driver_unregister(&msm_ion_driver);
1135}
1136
1137subsys_initcall(msm_ion_init);
1138module_exit(msm_ion_exit);