blob: 5061f6a86e90e1a8401a9b674d4f3e312ee40872 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/export.h>
15#include <linux/vmalloc.h>
16#include <asm/cacheflush.h>
17#include <linux/slab.h>
18#include <linux/kmemleak.h>
19#include <linux/highmem.h>
20#include <linux/scatterlist.h>
21#include <soc/qcom/scm.h>
22#include <soc/qcom/secure_buffer.h>
Deepak Kumarf1ffe202017-06-21 13:12:33 +053023#include <linux/ratelimit.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070024
25#include "kgsl.h"
26#include "kgsl_sharedmem.h"
27#include "kgsl_device.h"
28#include "kgsl_log.h"
29#include "kgsl_mmu.h"
30#include "kgsl_pool.h"
31
32/*
33 * The user can set this from debugfs to force failed memory allocations to
34 * fail without trying OOM first. This is a debug setting useful for
35 * stress applications that want to test failure cases without pushing the
36 * system into unrecoverable OOM panics
37 */
38
39static bool sharedmem_noretry_flag;
40
41static DEFINE_MUTEX(kernel_map_global_lock);
42
43struct cp2_mem_chunks {
44 unsigned int chunk_list;
45 unsigned int chunk_list_size;
46 unsigned int chunk_size;
47} __attribute__ ((__packed__));
48
49struct cp2_lock_req {
50 struct cp2_mem_chunks chunks;
51 unsigned int mem_usage;
52 unsigned int lock;
53} __attribute__ ((__packed__));
54
55#define MEM_PROTECT_LOCK_ID2 0x0A
56#define MEM_PROTECT_LOCK_ID2_FLAT 0x11
57
58/* An attribute for showing per-process memory statistics */
59struct kgsl_mem_entry_attribute {
60 struct attribute attr;
61 int memtype;
62 ssize_t (*show)(struct kgsl_process_private *priv,
63 int type, char *buf);
64};
65
66#define to_mem_entry_attr(a) \
67container_of(a, struct kgsl_mem_entry_attribute, attr)
68
69#define __MEM_ENTRY_ATTR(_type, _name, _show) \
70{ \
71 .attr = { .name = __stringify(_name), .mode = 0444 }, \
72 .memtype = _type, \
73 .show = _show, \
74}
75
76/*
77 * A structure to hold the attributes for a particular memory type.
78 * For each memory type in each process we store the current and maximum
79 * memory usage and display the counts in sysfs. This structure and
80 * the following macro allow us to simplify the definition for those
81 * adding new memory types
82 */
83
84struct mem_entry_stats {
85 int memtype;
86 struct kgsl_mem_entry_attribute attr;
87 struct kgsl_mem_entry_attribute max_attr;
88};
89
90
91#define MEM_ENTRY_STAT(_type, _name) \
92{ \
93 .memtype = _type, \
94 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
95 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
96 mem_entry_max_show), \
97}
98
99static void kgsl_cma_unlock_secure(struct kgsl_memdesc *memdesc);
100
101/**
102 * Show the current amount of memory allocated for the given memtype
103 */
104
105static ssize_t
106mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
107{
108 return snprintf(buf, PAGE_SIZE, "%llu\n", priv->stats[type].cur);
109}
110
111/**
112 * Show the maximum memory allocated for the given memtype through the life of
113 * the process
114 */
115
116static ssize_t
117mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
118{
119 return snprintf(buf, PAGE_SIZE, "%llu\n", priv->stats[type].max);
120}
121
122static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
123 struct attribute *attr, char *buf)
124{
125 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
126 struct kgsl_process_private *priv;
127 ssize_t ret;
128
129 /*
130 * 1. sysfs_remove_file waits for reads to complete before the node
131 * is deleted.
132 * 2. kgsl_process_init_sysfs takes a refcount to the process_private,
133 * which is put at the end of kgsl_process_uninit_sysfs.
134 * These two conditions imply that priv will not be freed until this
135 * function completes, and no further locking is needed.
136 */
137 priv = kobj ? container_of(kobj, struct kgsl_process_private, kobj) :
138 NULL;
139
140 if (priv && pattr->show)
141 ret = pattr->show(priv, pattr->memtype, buf);
142 else
143 ret = -EIO;
144
145 return ret;
146}
147
148static const struct sysfs_ops mem_entry_sysfs_ops = {
149 .show = mem_entry_sysfs_show,
150};
151
152static struct kobj_type ktype_mem_entry = {
153 .sysfs_ops = &mem_entry_sysfs_ops,
154};
155
156static struct mem_entry_stats mem_stats[] = {
157 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
158 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
159#ifdef CONFIG_ION
160 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
161#endif
162};
163
164void
165kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
166{
167 int i;
168
169 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
170 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
171 sysfs_remove_file(&private->kobj,
172 &mem_stats[i].max_attr.attr);
173 }
174
175 kobject_put(&private->kobj);
176 /* Put the refcount we got in kgsl_process_init_sysfs */
177 kgsl_process_private_put(private);
178}
179
180/**
181 * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
182 *
183 * @device: Pointer to kgsl device struct
184 * @private: Pointer to the structure for the process
185 *
186 * kgsl_process_init_sysfs() is called at the time of creating the
187 * process struct when a process opens the kgsl device for the first time.
188 * This function creates the sysfs files for the process.
189 */
190void kgsl_process_init_sysfs(struct kgsl_device *device,
191 struct kgsl_process_private *private)
192{
193 unsigned char name[16];
194 int i;
195
196 /* Keep private valid until the sysfs enries are removed. */
197 kgsl_process_private_get(private);
198
199 snprintf(name, sizeof(name), "%d", private->pid);
200
201 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
202 kgsl_driver.prockobj, name)) {
203 WARN(1, "Unable to add sysfs dir '%s'\n", name);
204 return;
205 }
206
207 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
208 if (sysfs_create_file(&private->kobj,
209 &mem_stats[i].attr.attr))
210 WARN(1, "Couldn't create sysfs file '%s'\n",
211 mem_stats[i].attr.attr.name);
212
213 if (sysfs_create_file(&private->kobj,
214 &mem_stats[i].max_attr.attr))
215 WARN(1, "Couldn't create sysfs file '%s'\n",
216 mem_stats[i].max_attr.attr.name);
217
218 }
219}
220
221static ssize_t kgsl_drv_memstat_show(struct device *dev,
222 struct device_attribute *attr,
223 char *buf)
224{
225 uint64_t val = 0;
226
227 if (!strcmp(attr->attr.name, "vmalloc"))
228 val = atomic_long_read(&kgsl_driver.stats.vmalloc);
229 else if (!strcmp(attr->attr.name, "vmalloc_max"))
230 val = atomic_long_read(&kgsl_driver.stats.vmalloc_max);
231 else if (!strcmp(attr->attr.name, "page_alloc"))
232 val = atomic_long_read(&kgsl_driver.stats.page_alloc);
233 else if (!strcmp(attr->attr.name, "page_alloc_max"))
234 val = atomic_long_read(&kgsl_driver.stats.page_alloc_max);
235 else if (!strcmp(attr->attr.name, "coherent"))
236 val = atomic_long_read(&kgsl_driver.stats.coherent);
237 else if (!strcmp(attr->attr.name, "coherent_max"))
238 val = atomic_long_read(&kgsl_driver.stats.coherent_max);
239 else if (!strcmp(attr->attr.name, "secure"))
240 val = atomic_long_read(&kgsl_driver.stats.secure);
241 else if (!strcmp(attr->attr.name, "secure_max"))
242 val = atomic_long_read(&kgsl_driver.stats.secure_max);
243 else if (!strcmp(attr->attr.name, "mapped"))
244 val = atomic_long_read(&kgsl_driver.stats.mapped);
245 else if (!strcmp(attr->attr.name, "mapped_max"))
246 val = atomic_long_read(&kgsl_driver.stats.mapped_max);
247
248 return snprintf(buf, PAGE_SIZE, "%llu\n", val);
249}
250
251static ssize_t kgsl_drv_full_cache_threshold_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t count)
254{
255 int ret;
256 unsigned int thresh = 0;
257
258 ret = kgsl_sysfs_store(buf, &thresh);
259 if (ret)
260 return ret;
261
262 kgsl_driver.full_cache_threshold = thresh;
263 return count;
264}
265
266static ssize_t kgsl_drv_full_cache_threshold_show(struct device *dev,
267 struct device_attribute *attr,
268 char *buf)
269{
270 return snprintf(buf, PAGE_SIZE, "%d\n",
271 kgsl_driver.full_cache_threshold);
272}
273
274static DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
275static DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
276static DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
277static DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
278static DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
279static DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
280static DEVICE_ATTR(secure, 0444, kgsl_drv_memstat_show, NULL);
281static DEVICE_ATTR(secure_max, 0444, kgsl_drv_memstat_show, NULL);
282static DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
283static DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
284static DEVICE_ATTR(full_cache_threshold, 0644,
285 kgsl_drv_full_cache_threshold_show,
286 kgsl_drv_full_cache_threshold_store);
287
288static const struct device_attribute *drv_attr_list[] = {
289 &dev_attr_vmalloc,
290 &dev_attr_vmalloc_max,
291 &dev_attr_page_alloc,
292 &dev_attr_page_alloc_max,
293 &dev_attr_coherent,
294 &dev_attr_coherent_max,
295 &dev_attr_secure,
296 &dev_attr_secure_max,
297 &dev_attr_mapped,
298 &dev_attr_mapped_max,
299 &dev_attr_full_cache_threshold,
300 NULL
301};
302
303void
304kgsl_sharedmem_uninit_sysfs(void)
305{
306 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
307}
308
309int
310kgsl_sharedmem_init_sysfs(void)
311{
312 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
313 drv_attr_list);
314}
315
316static int kgsl_cma_alloc_secure(struct kgsl_device *device,
317 struct kgsl_memdesc *memdesc, uint64_t size);
318
319static int kgsl_allocate_secure(struct kgsl_device *device,
320 struct kgsl_memdesc *memdesc,
321 uint64_t size) {
322 int ret;
323
324 if (MMU_FEATURE(&device->mmu, KGSL_MMU_HYP_SECURE_ALLOC))
325 ret = kgsl_sharedmem_page_alloc_user(memdesc, size);
326 else
327 ret = kgsl_cma_alloc_secure(device, memdesc, size);
328
329 return ret;
330}
331
332int kgsl_allocate_user(struct kgsl_device *device,
333 struct kgsl_memdesc *memdesc,
334 uint64_t size, uint64_t flags)
335{
336 int ret;
337
338 memdesc->flags = flags;
339
340 if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
341 ret = kgsl_sharedmem_alloc_contig(device, memdesc, size);
342 else if (flags & KGSL_MEMFLAGS_SECURE)
343 ret = kgsl_allocate_secure(device, memdesc, size);
344 else
345 ret = kgsl_sharedmem_page_alloc_user(memdesc, size);
346
347 return ret;
348}
349
350static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
351 struct vm_area_struct *vma,
352 struct vm_fault *vmf)
353{
354 int pgoff;
355 unsigned int offset;
356
357 offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
358
359 if (offset >= memdesc->size)
360 return VM_FAULT_SIGBUS;
361
362 pgoff = offset >> PAGE_SHIFT;
363
364 if (pgoff < memdesc->page_count) {
365 struct page *page = memdesc->pages[pgoff];
366
367 get_page(page);
368 vmf->page = page;
369
370 memdesc->mapsize += PAGE_SIZE;
371
372 return 0;
373 }
374
375 return VM_FAULT_SIGBUS;
376}
377
378/*
379 * kgsl_page_alloc_unmap_kernel() - Unmap the memory in memdesc
380 *
381 * @memdesc: The memory descriptor which contains information about the memory
382 *
383 * Unmaps the memory mapped into kernel address space
384 */
385static void kgsl_page_alloc_unmap_kernel(struct kgsl_memdesc *memdesc)
386{
387 mutex_lock(&kernel_map_global_lock);
388 if (!memdesc->hostptr) {
389 /* If already unmapped the refcount should be 0 */
390 WARN_ON(memdesc->hostptr_count);
391 goto done;
392 }
393 memdesc->hostptr_count--;
394 if (memdesc->hostptr_count)
395 goto done;
396 vunmap(memdesc->hostptr);
397
398 atomic_long_sub(memdesc->size, &kgsl_driver.stats.vmalloc);
399 memdesc->hostptr = NULL;
400done:
401 mutex_unlock(&kernel_map_global_lock);
402}
403
404static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
405{
406 kgsl_page_alloc_unmap_kernel(memdesc);
407 /* we certainly do not expect the hostptr to still be mapped */
408 BUG_ON(memdesc->hostptr);
409
410 /* Secure buffers need to be unlocked before being freed */
411 if (memdesc->priv & KGSL_MEMDESC_TZ_LOCKED) {
412 int ret;
413 int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
414 int source_vm = VMID_CP_PIXEL;
415 int dest_vm = VMID_HLOS;
416
417 ret = hyp_assign_table(memdesc->sgt, &source_vm, 1,
418 &dest_vm, &dest_perms, 1);
419 if (ret) {
420 pr_err("Secure buf unlock failed: gpuaddr: %llx size: %llx ret: %d\n",
421 memdesc->gpuaddr, memdesc->size, ret);
422 BUG();
423 }
424
425 atomic_long_sub(memdesc->size, &kgsl_driver.stats.secure);
426 } else {
427 atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
428 }
429
430 if (memdesc->priv & KGSL_MEMDESC_TZ_LOCKED) {
431 struct sg_page_iter sg_iter;
432
433 for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
434 memdesc->sgt->nents, 0)
435 ClearPagePrivate(sg_page_iter_page(&sg_iter));
436
437 }
438
439 /* Free pages using the pages array for non secure paged memory */
440 if (memdesc->pages != NULL)
441 kgsl_pool_free_pages(memdesc->pages, memdesc->page_count);
442 else
443 kgsl_pool_free_sgt(memdesc->sgt);
444
445}
446
447/*
448 * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
449 * space
450 *
451 * @memdesc - The memory descriptor which contains information about the memory
452 *
453 * Return: 0 on success else error code
454 */
455static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
456{
457 int ret = 0;
458
459 /* Sanity check - don't map more than we could possibly chew */
460 if (memdesc->size > ULONG_MAX)
461 return -ENOMEM;
462
463 mutex_lock(&kernel_map_global_lock);
464 if ((!memdesc->hostptr) && (memdesc->pages != NULL)) {
465 pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
466
467 memdesc->hostptr = vmap(memdesc->pages, memdesc->page_count,
468 VM_IOREMAP, page_prot);
469 if (memdesc->hostptr)
470 KGSL_STATS_ADD(memdesc->size,
471 &kgsl_driver.stats.vmalloc,
472 &kgsl_driver.stats.vmalloc_max);
473 else
474 ret = -ENOMEM;
475 }
476 if (memdesc->hostptr)
477 memdesc->hostptr_count++;
478
479 mutex_unlock(&kernel_map_global_lock);
480
481 return ret;
482}
483
484static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
485 struct vm_area_struct *vma,
486 struct vm_fault *vmf)
487{
488 unsigned long offset, pfn;
489 int ret;
490
491 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
492 PAGE_SHIFT;
493
494 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
495 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
496
497 if (ret == -ENOMEM || ret == -EAGAIN)
498 return VM_FAULT_OOM;
499 else if (ret == -EFAULT)
500 return VM_FAULT_SIGBUS;
501
502 memdesc->mapsize += PAGE_SIZE;
503
504 return VM_FAULT_NOPAGE;
505}
506
507static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
508{
509 unsigned long attrs = 0;
510
511 if (memdesc->hostptr) {
512 if (memdesc->priv & KGSL_MEMDESC_SECURE) {
513 atomic_long_sub(memdesc->size,
514 &kgsl_driver.stats.secure);
515
516 kgsl_cma_unlock_secure(memdesc);
517 attrs = (unsigned long)&memdesc->attrs;
518 } else
519 atomic_long_sub(memdesc->size,
520 &kgsl_driver.stats.coherent);
521
522 dma_free_attrs(memdesc->dev, (size_t) memdesc->size,
523 memdesc->hostptr, memdesc->physaddr, attrs);
524 }
525}
526
527/* Global */
528static struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
529 .free = kgsl_page_alloc_free,
530 .vmflags = VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY,
531 .vmfault = kgsl_page_alloc_vmfault,
532 .map_kernel = kgsl_page_alloc_map_kernel,
533 .unmap_kernel = kgsl_page_alloc_unmap_kernel,
534};
535
536/* CMA ops - used during NOMMU mode */
537static struct kgsl_memdesc_ops kgsl_cma_ops = {
538 .free = kgsl_cma_coherent_free,
539 .vmflags = VM_DONTDUMP | VM_PFNMAP | VM_DONTEXPAND | VM_DONTCOPY,
540 .vmfault = kgsl_contiguous_vmfault,
541};
542
543#ifdef CONFIG_ARM64
544/*
545 * For security reasons, ARMv8 doesn't allow invalidate only on read-only
546 * mapping. It would be performance prohibitive to read the permissions on
547 * the buffer before the operation. Every use case that we have found does not
548 * assume that an invalidate operation is invalidate only, so we feel
549 * comfortable turning invalidates into flushes for these targets
550 */
551static inline unsigned int _fixup_cache_range_op(unsigned int op)
552{
553 if (op == KGSL_CACHE_OP_INV)
554 return KGSL_CACHE_OP_FLUSH;
555 return op;
556}
557#else
558static inline unsigned int _fixup_cache_range_op(unsigned int op)
559{
560 return op;
561}
562#endif
563
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530564static inline void _cache_op(unsigned int op,
565 const void *start, const void *end)
566{
567 /*
568 * The dmac_xxx_range functions handle addresses and sizes that
569 * are not aligned to the cacheline size correctly.
570 */
571 switch (_fixup_cache_range_op(op)) {
572 case KGSL_CACHE_OP_FLUSH:
573 dmac_flush_range(start, end);
574 break;
575 case KGSL_CACHE_OP_CLEAN:
576 dmac_clean_range(start, end);
577 break;
578 case KGSL_CACHE_OP_INV:
579 dmac_inv_range(start, end);
580 break;
581 }
582}
583
584static int kgsl_do_cache_op(struct page *page, void *addr,
585 uint64_t offset, uint64_t size, unsigned int op)
586{
587 if (page != NULL) {
588 unsigned long pfn = page_to_pfn(page) + offset / PAGE_SIZE;
589 /*
590 * page_address() returns the kernel virtual address of page.
591 * For high memory kernel virtual address exists only if page
592 * has been mapped. So use a version of kmap rather than
593 * page_address() for high memory.
594 */
595 if (PageHighMem(page)) {
596 offset &= ~PAGE_MASK;
597
598 do {
599 unsigned int len = size;
600
601 if (len + offset > PAGE_SIZE)
602 len = PAGE_SIZE - offset;
603
604 page = pfn_to_page(pfn++);
605 addr = kmap_atomic(page);
606 _cache_op(op, addr + offset,
607 addr + offset + len);
608 kunmap_atomic(addr);
609
610 size -= len;
611 offset = 0;
612 } while (size);
613
614 return 0;
615 }
616
617 addr = page_address(page);
618 }
619
620 _cache_op(op, addr + offset, addr + offset + (size_t) size);
621 return 0;
622}
623
Shrenuj Bansala419c792016-10-20 14:05:11 -0700624int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
625 uint64_t size, unsigned int op)
626{
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530627 void *addr = NULL;
Deepak Kumarcbadd1f2017-05-23 13:13:35 +0530628 struct sg_table *sgt = NULL;
629 struct scatterlist *sg;
630 unsigned int i, pos = 0;
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530631 int ret = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700632
633 if (size == 0 || size > UINT_MAX)
634 return -EINVAL;
635
636 /* Make sure that the offset + size does not overflow */
637 if ((offset + size < offset) || (offset + size < size))
638 return -ERANGE;
639
Shrenuj Bansala419c792016-10-20 14:05:11 -0700640 /* Check that offset+length does not exceed memdesc->size */
641 if (offset + size > memdesc->size)
642 return -ERANGE;
643
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530644 if (memdesc->hostptr) {
645 addr = memdesc->hostptr;
646 /* Make sure the offset + size do not overflow the address */
647 if (addr + ((size_t) offset + (size_t) size) < addr)
648 return -ERANGE;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700649
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530650 ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
651 return ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700652 }
653
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530654 /*
655 * If the buffer is not to mapped to kernel, perform cache
656 * operations after mapping to kernel.
657 */
Deepak Kumarcbadd1f2017-05-23 13:13:35 +0530658 if (memdesc->sgt != NULL)
659 sgt = memdesc->sgt;
660 else {
661 if (memdesc->pages == NULL)
662 return ret;
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530663
Deepak Kumarcbadd1f2017-05-23 13:13:35 +0530664 sgt = kgsl_alloc_sgt_from_pages(memdesc);
665 if (IS_ERR(sgt))
666 return PTR_ERR(sgt);
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530667 }
Deepak Kumarcbadd1f2017-05-23 13:13:35 +0530668
669 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
670 uint64_t sg_offset, sg_left;
671
672 if (offset >= (pos + sg->length)) {
673 pos += sg->length;
674 continue;
675 }
676 sg_offset = offset > pos ? offset - pos : 0;
677 sg_left = (sg->length - sg_offset > size) ? size :
678 sg->length - sg_offset;
679 ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
680 sg_left, op);
681 size -= sg_left;
682 if (size == 0)
683 break;
684 pos += sg->length;
685 }
686
687 if (memdesc->sgt == NULL)
688 kgsl_free_sgt(sgt);
689
Hareesh Gundu2e943a32017-04-21 15:38:13 +0530690 return ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700691}
692EXPORT_SYMBOL(kgsl_cache_range_op);
693
Shrenuj Bansala419c792016-10-20 14:05:11 -0700694int
695kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
696 uint64_t size)
697{
698 int ret = 0;
699 unsigned int j, page_size, len_alloc;
700 unsigned int pcount = 0;
701 size_t len;
702 unsigned int align;
703
Deepak Kumarf1ffe202017-06-21 13:12:33 +0530704 static DEFINE_RATELIMIT_STATE(_rs,
705 DEFAULT_RATELIMIT_INTERVAL,
706 DEFAULT_RATELIMIT_BURST);
707
Shrenuj Bansala419c792016-10-20 14:05:11 -0700708 size = PAGE_ALIGN(size);
709 if (size == 0 || size > UINT_MAX)
710 return -EINVAL;
711
712 align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
713
Hareesh Gunduf32a49f2016-11-21 19:18:29 +0530714 page_size = kgsl_get_page_size(size, align);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700715
716 /*
717 * The alignment cannot be less than the intended page size - it can be
718 * larger however to accommodate hardware quirks
719 */
720
721 if (align < ilog2(page_size)) {
722 kgsl_memdesc_set_align(memdesc, ilog2(page_size));
723 align = ilog2(page_size);
724 }
725
726 /*
727 * There needs to be enough room in the page array to be able to
728 * service the allocation entirely with PAGE_SIZE sized chunks
729 */
730
731 len_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
732
733 memdesc->ops = &kgsl_page_alloc_ops;
734
735 /*
736 * Allocate space to store the list of pages. This is an array of
737 * pointers so we can track 1024 pages per page of allocation.
738 * Keep this array around for non global non secure buffers that
739 * are allocated by kgsl. This helps with improving the vm fault
740 * routine by finding the faulted page in constant time.
741 */
742
743 memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
Lynus Vazae902682017-06-21 12:09:37 +0530744 memdesc->page_count = 0;
745 memdesc->size = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700746
747 if (memdesc->pages == NULL) {
748 ret = -ENOMEM;
749 goto done;
750 }
751
752 len = size;
753
754 while (len > 0) {
755 int page_count;
756
757 page_count = kgsl_pool_alloc_page(&page_size,
758 memdesc->pages + pcount,
759 len_alloc - pcount,
760 &align);
761 if (page_count <= 0) {
762 if (page_count == -EAGAIN)
763 continue;
764
765 /*
766 * Update sglen and memdesc size,as requested allocation
767 * not served fully. So that they can be correctly freed
768 * in kgsl_sharedmem_free().
769 */
770 memdesc->size = (size - len);
771
Deepak Kumarf1ffe202017-06-21 13:12:33 +0530772 if (sharedmem_noretry_flag != true &&
773 __ratelimit(&_rs))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700774 KGSL_CORE_ERR(
775 "Out of memory: only allocated %lldKB of %lldKB requested\n",
776 (size - len) >> 10, size >> 10);
777
778 ret = -ENOMEM;
779 goto done;
780 }
781
782 pcount += page_count;
783 len -= page_size;
784 memdesc->size += page_size;
785 memdesc->page_count += page_count;
786
787 /* Get the needed page size for the next iteration */
Hareesh Gunduf32a49f2016-11-21 19:18:29 +0530788 page_size = kgsl_get_page_size(len, align);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700789 }
790
791 /* Call to the hypervisor to lock any secure buffer allocations */
792 if (memdesc->flags & KGSL_MEMFLAGS_SECURE) {
793 unsigned int i;
794 struct scatterlist *sg;
795 int dest_perms = PERM_READ | PERM_WRITE;
796 int source_vm = VMID_HLOS;
797 int dest_vm = VMID_CP_PIXEL;
798
799 memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
800 if (memdesc->sgt == NULL) {
801 ret = -ENOMEM;
802 goto done;
803 }
804
805 ret = sg_alloc_table_from_pages(memdesc->sgt, memdesc->pages,
806 memdesc->page_count, 0, memdesc->size, GFP_KERNEL);
807 if (ret) {
808 kfree(memdesc->sgt);
809 goto done;
810 }
811
812 ret = hyp_assign_table(memdesc->sgt, &source_vm, 1,
813 &dest_vm, &dest_perms, 1);
814 if (ret) {
815 sg_free_table(memdesc->sgt);
816 kfree(memdesc->sgt);
817 memdesc->sgt = NULL;
818 goto done;
819 }
820
821 /* Set private bit for each sg to indicate that its secured */
822 for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i)
823 SetPagePrivate(sg_page(sg));
824
825 memdesc->priv |= KGSL_MEMDESC_TZ_LOCKED;
826
827 /* Record statistics */
828 KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.secure,
829 &kgsl_driver.stats.secure_max);
830
831 /*
832 * We don't need the array for secure buffers because they are
833 * not mapped to CPU
834 */
835 kgsl_free(memdesc->pages);
836 memdesc->pages = NULL;
837 memdesc->page_count = 0;
838
839 /* Don't map and zero the locked secure buffer */
840 goto done;
841 }
842
843 KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.page_alloc,
844 &kgsl_driver.stats.page_alloc_max);
845
846done:
847 if (ret) {
848 if (memdesc->pages) {
849 unsigned int count = 1;
850
851 for (j = 0; j < pcount; j += count) {
852 count = 1 << compound_order(memdesc->pages[j]);
853 kgsl_pool_free_page(memdesc->pages[j]);
854 }
855 }
856
857 kgsl_free(memdesc->pages);
858 memset(memdesc, 0, sizeof(*memdesc));
859 }
860
861 return ret;
862}
863
864void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
865{
866 if (memdesc == NULL || memdesc->size == 0)
867 return;
868
869 /* Make sure the memory object has been unmapped */
870 kgsl_mmu_put_gpuaddr(memdesc);
871
872 if (memdesc->ops && memdesc->ops->free)
873 memdesc->ops->free(memdesc);
874
875 if (memdesc->sgt) {
876 sg_free_table(memdesc->sgt);
877 kfree(memdesc->sgt);
878 }
879
880 if (memdesc->pages)
881 kgsl_free(memdesc->pages);
882
883 memset(memdesc, 0, sizeof(*memdesc));
884}
885EXPORT_SYMBOL(kgsl_sharedmem_free);
886
887int
888kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
889 uint32_t *dst,
890 uint64_t offsetbytes)
891{
892 uint32_t *src;
893
894 if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL ||
895 dst == NULL))
896 return -EINVAL;
897
898 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
899 if (offsetbytes % sizeof(uint32_t) != 0)
900 return -EINVAL;
901
902 WARN_ON(offsetbytes > (memdesc->size - sizeof(uint32_t)));
903 if (offsetbytes > (memdesc->size - sizeof(uint32_t)))
904 return -ERANGE;
905
906 /*
907 * We are reading shared memory between CPU and GPU.
908 * Make sure reads before this are complete
909 */
910 rmb();
911 src = (uint32_t *)(memdesc->hostptr + offsetbytes);
912 *dst = *src;
913 return 0;
914}
915EXPORT_SYMBOL(kgsl_sharedmem_readl);
916
917int
918kgsl_sharedmem_writel(struct kgsl_device *device,
919 const struct kgsl_memdesc *memdesc,
920 uint64_t offsetbytes,
921 uint32_t src)
922{
923 uint32_t *dst;
924
925 if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL))
926 return -EINVAL;
927
928 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
929 if (offsetbytes % sizeof(uint32_t) != 0)
930 return -EINVAL;
931
932 WARN_ON(offsetbytes > (memdesc->size - sizeof(uint32_t)));
933 if (offsetbytes > (memdesc->size - sizeof(uint32_t)))
934 return -ERANGE;
935 dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
936 *dst = src;
937
938 /*
939 * We are writing to shared memory between CPU and GPU.
940 * Make sure write above is posted immediately
941 */
942 wmb();
943
944 return 0;
945}
946EXPORT_SYMBOL(kgsl_sharedmem_writel);
947
948int
949kgsl_sharedmem_readq(const struct kgsl_memdesc *memdesc,
950 uint64_t *dst,
951 uint64_t offsetbytes)
952{
953 uint64_t *src;
954
955 if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL ||
956 dst == NULL))
957 return -EINVAL;
958
959 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
960 if (offsetbytes % sizeof(uint32_t) != 0)
961 return -EINVAL;
962
963 WARN_ON(offsetbytes > (memdesc->size - sizeof(uint32_t)));
964 if (offsetbytes > (memdesc->size - sizeof(uint32_t)))
965 return -ERANGE;
966
967 /*
968 * We are reading shared memory between CPU and GPU.
969 * Make sure reads before this are complete
970 */
971 rmb();
972 src = (uint64_t *)(memdesc->hostptr + offsetbytes);
973 *dst = *src;
974 return 0;
975}
976EXPORT_SYMBOL(kgsl_sharedmem_readq);
977
978int
979kgsl_sharedmem_writeq(struct kgsl_device *device,
980 const struct kgsl_memdesc *memdesc,
981 uint64_t offsetbytes,
982 uint64_t src)
983{
984 uint64_t *dst;
985
986 if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL))
987 return -EINVAL;
988
989 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
990 if (offsetbytes % sizeof(uint32_t) != 0)
991 return -EINVAL;
992
993 WARN_ON(offsetbytes > (memdesc->size - sizeof(uint32_t)));
994 if (offsetbytes > (memdesc->size - sizeof(uint32_t)))
995 return -ERANGE;
996 dst = (uint64_t *)(memdesc->hostptr + offsetbytes);
997 *dst = src;
998
999 /*
1000 * We are writing to shared memory between CPU and GPU.
1001 * Make sure write above is posted immediately
1002 */
1003 wmb();
1004
1005 return 0;
1006}
1007EXPORT_SYMBOL(kgsl_sharedmem_writeq);
1008
1009int
1010kgsl_sharedmem_set(struct kgsl_device *device,
1011 const struct kgsl_memdesc *memdesc, uint64_t offsetbytes,
1012 unsigned int value, uint64_t sizebytes)
1013{
1014 if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL))
1015 return -EINVAL;
1016
1017 if (WARN_ON(offsetbytes + sizebytes > memdesc->size))
1018 return -EINVAL;
1019
1020 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
1021 return 0;
1022}
1023EXPORT_SYMBOL(kgsl_sharedmem_set);
1024
1025static const char * const memtype_str[] = {
1026 [KGSL_MEMTYPE_OBJECTANY] = "any(0)",
1027 [KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer",
1028 [KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer",
1029 [KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer",
1030 [KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer",
1031 [KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer",
1032 [KGSL_MEMTYPE_TEXTURE] = "texture",
1033 [KGSL_MEMTYPE_SURFACE] = "surface",
1034 [KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface",
1035 [KGSL_MEMTYPE_GL] = "gl",
1036 [KGSL_MEMTYPE_CL] = "cl",
1037 [KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map",
1038 [KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap",
1039 [KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map",
1040 [KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap",
1041 [KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack",
1042 [KGSL_MEMTYPE_COMMAND] = "command",
1043 [KGSL_MEMTYPE_2D] = "2d",
1044 [KGSL_MEMTYPE_EGL_IMAGE] = "egl_image",
1045 [KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow",
1046 [KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample",
1047 /* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */
1048};
1049
1050void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
1051{
1052 unsigned int type = MEMFLAGS(memflags, KGSL_MEMTYPE_MASK,
1053 KGSL_MEMTYPE_SHIFT);
1054
1055 if (type == KGSL_MEMTYPE_KERNEL)
1056 strlcpy(name, "kernel", name_size);
1057 else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
1058 strlcpy(name, memtype_str[type], name_size);
1059 else
1060 snprintf(name, name_size, "unknown(%3d)", type);
1061}
1062EXPORT_SYMBOL(kgsl_get_memory_usage);
1063
1064int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
1065 struct kgsl_memdesc *memdesc, uint64_t size)
1066{
1067 int result = 0;
1068
1069 size = PAGE_ALIGN(size);
1070 if (size == 0 || size > SIZE_MAX)
1071 return -EINVAL;
1072
1073 memdesc->size = size;
1074 memdesc->ops = &kgsl_cma_ops;
1075 memdesc->dev = device->dev->parent;
1076
1077 memdesc->hostptr = dma_alloc_attrs(memdesc->dev, (size_t) size,
1078 &memdesc->physaddr, GFP_KERNEL, 0);
1079
1080 if (memdesc->hostptr == NULL) {
1081 result = -ENOMEM;
1082 goto err;
1083 }
1084
1085 result = memdesc_sg_dma(memdesc, memdesc->physaddr, size);
1086 if (result)
1087 goto err;
1088
1089 /* Record statistics */
1090
1091 if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
1092 memdesc->gpuaddr = memdesc->physaddr;
1093
1094 KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
1095 &kgsl_driver.stats.coherent_max);
1096
1097err:
1098 if (result)
1099 kgsl_sharedmem_free(memdesc);
1100
1101 return result;
1102}
1103EXPORT_SYMBOL(kgsl_sharedmem_alloc_contig);
1104
1105static int scm_lock_chunk(struct kgsl_memdesc *memdesc, int lock)
1106{
1107 struct cp2_lock_req request;
1108 unsigned int resp;
1109 unsigned int *chunk_list;
1110 struct scm_desc desc = {0};
1111 int result;
1112
1113 /*
1114 * Flush the virt addr range before sending the memory to the
1115 * secure environment to ensure the data is actually present
1116 * in RAM
1117 *
1118 * Chunk_list holds the physical address of secure memory.
1119 * Pass in the virtual address of chunk_list to flush.
1120 * Chunk_list size is 1 because secure memory is physically
1121 * contiguous.
1122 */
1123 chunk_list = kzalloc(sizeof(unsigned int), GFP_KERNEL);
1124 if (!chunk_list)
1125 return -ENOMEM;
1126
1127 chunk_list[0] = memdesc->physaddr;
1128 dmac_flush_range((void *)chunk_list, (void *)chunk_list + 1);
1129
1130 request.chunks.chunk_list = virt_to_phys(chunk_list);
1131 /*
1132 * virt_to_phys(chunk_list) may be an address > 4GB. It is guaranteed
1133 * that when using scm_call (the older interface), the phys addresses
1134 * will be restricted to below 4GB.
1135 */
1136 desc.args[0] = virt_to_phys(chunk_list);
1137 desc.args[1] = request.chunks.chunk_list_size = 1;
1138 desc.args[2] = request.chunks.chunk_size = (unsigned int) memdesc->size;
1139 desc.args[3] = request.mem_usage = 0;
1140 desc.args[4] = request.lock = lock;
1141 desc.args[5] = 0;
1142 desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
1143 SCM_VAL);
1144 kmap_flush_unused();
1145 kmap_atomic_flush_unused();
1146 if (!is_scm_armv8()) {
1147 result = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
1148 &request, sizeof(request), &resp, sizeof(resp));
1149 } else {
1150 result = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
1151 MEM_PROTECT_LOCK_ID2_FLAT), &desc);
1152 resp = desc.ret[0];
1153 }
1154
1155 kfree(chunk_list);
1156 return result;
1157}
1158
1159static int kgsl_cma_alloc_secure(struct kgsl_device *device,
1160 struct kgsl_memdesc *memdesc, uint64_t size)
1161{
1162 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
1163 int result = 0;
1164 size_t aligned;
1165
1166 /* Align size to 1M boundaries */
1167 aligned = ALIGN(size, SZ_1M);
1168
1169 /* The SCM call uses an unsigned int for the size */
1170 if (aligned == 0 || aligned > UINT_MAX)
1171 return -EINVAL;
1172
1173 /*
1174 * If there is more than a page gap between the requested size and the
1175 * aligned size we don't need to add more memory for a guard page. Yay!
1176 */
1177
1178 if (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE)
1179 if (aligned - size >= SZ_4K)
1180 memdesc->priv &= ~KGSL_MEMDESC_GUARD_PAGE;
1181
1182 memdesc->size = aligned;
1183 memdesc->ops = &kgsl_cma_ops;
1184 memdesc->dev = iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE].dev;
1185
1186 memdesc->attrs |= DMA_ATTR_STRONGLY_ORDERED;
1187
1188 memdesc->hostptr = dma_alloc_attrs(memdesc->dev, aligned,
1189 &memdesc->physaddr, GFP_KERNEL, memdesc->attrs);
1190
1191 if (memdesc->hostptr == NULL) {
1192 result = -ENOMEM;
1193 goto err;
1194 }
1195
1196 result = memdesc_sg_dma(memdesc, memdesc->physaddr, aligned);
1197 if (result)
1198 goto err;
1199
1200 result = scm_lock_chunk(memdesc, 1);
1201
1202 if (result != 0)
1203 goto err;
1204
1205 /* Set the private bit to indicate that we've secured this */
1206 SetPagePrivate(sg_page(memdesc->sgt->sgl));
1207
1208 memdesc->priv |= KGSL_MEMDESC_TZ_LOCKED;
1209
1210 /* Record statistics */
1211 KGSL_STATS_ADD(aligned, &kgsl_driver.stats.secure,
1212 &kgsl_driver.stats.secure_max);
1213err:
1214 if (result)
1215 kgsl_sharedmem_free(memdesc);
1216
1217 return result;
1218}
1219
1220/**
1221 * kgsl_cma_unlock_secure() - Unlock secure memory by calling TZ
1222 * @memdesc: memory descriptor
1223 */
1224static void kgsl_cma_unlock_secure(struct kgsl_memdesc *memdesc)
1225{
1226 if (memdesc->size == 0 || !(memdesc->priv & KGSL_MEMDESC_TZ_LOCKED))
1227 return;
1228
1229 if (!scm_lock_chunk(memdesc, 0))
1230 ClearPagePrivate(sg_page(memdesc->sgt->sgl));
1231}
1232
1233void kgsl_sharedmem_set_noretry(bool val)
1234{
1235 sharedmem_noretry_flag = val;
1236}
1237
1238bool kgsl_sharedmem_get_noretry(void)
1239{
1240 return sharedmem_noretry_flag;
1241}