blob: dec3e3ce07d58cb0f707949c1e63003080cf8898 [file] [log] [blame]
Kamal Agrawal8f0fb822020-08-19 10:25:15 +05301/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/module.h>
14#include <linux/fb.h>
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/fdtable.h>
18#include <linux/list.h>
19#include <linux/debugfs.h>
20#include <linux/uaccess.h>
21#include <linux/interrupt.h>
22#include <linux/workqueue.h>
23#include <linux/dma-buf.h>
24#include <linux/pm_runtime.h>
25#include <linux/rbtree.h>
26#include <linux/major.h>
27#include <linux/io.h>
28#include <linux/mman.h>
29#include <linux/sort.h>
30#include <linux/security.h>
31#include <linux/compat.h>
32#include <linux/ctype.h>
33#include <linux/mm.h>
34#include <asm/cacheflush.h>
35
36#include "kgsl.h"
37#include "kgsl_debugfs.h"
38#include "kgsl_log.h"
39#include "kgsl_sharedmem.h"
40#include "kgsl_drawobj.h"
41#include "kgsl_device.h"
42#include "kgsl_trace.h"
43#include "kgsl_sync.h"
44#include "kgsl_compat.h"
45#include "kgsl_pool.h"
46
47#undef MODULE_PARAM_PREFIX
48#define MODULE_PARAM_PREFIX "kgsl."
49
50#ifndef arch_mmap_check
51#define arch_mmap_check(addr, len, flags) (0)
52#endif
53
54#ifndef pgprot_writebackcache
55#define pgprot_writebackcache(_prot) (_prot)
56#endif
57
58#ifndef pgprot_writethroughcache
59#define pgprot_writethroughcache(_prot) (_prot)
60#endif
61
62#ifdef CONFIG_ARM_LPAE
63#define KGSL_DMA_BIT_MASK DMA_BIT_MASK(64)
64#else
65#define KGSL_DMA_BIT_MASK DMA_BIT_MASK(32)
66#endif
67
68static char *kgsl_mmu_type;
69module_param_named(mmutype, kgsl_mmu_type, charp, 0000);
70MODULE_PARM_DESC(kgsl_mmu_type, "Type of MMU to be used for graphics");
71
72/* Mutex used for the IOMMU sync quirk */
73DEFINE_MUTEX(kgsl_mmu_sync);
74EXPORT_SYMBOL(kgsl_mmu_sync);
75
76struct kgsl_dma_buf_meta {
77 struct dma_buf_attachment *attach;
78 struct dma_buf *dmabuf;
79 struct sg_table *table;
80};
81
82static inline struct kgsl_pagetable *_get_memdesc_pagetable(
83 struct kgsl_pagetable *pt, struct kgsl_mem_entry *entry)
84{
85 /* if a secured buffer, map it to secure global pagetable */
86 if (kgsl_memdesc_is_secured(&entry->memdesc))
87 return pt->mmu->securepagetable;
88
89 return pt;
90}
91
92static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
93
94static const struct file_operations kgsl_fops;
95
96/*
97 * The memfree list contains the last N blocks of memory that have been freed.
98 * On a GPU fault we walk the list to see if the faulting address had been
99 * recently freed and print out a message to that effect
100 */
101
102#define MEMFREE_ENTRIES 512
103
104static DEFINE_SPINLOCK(memfree_lock);
105
106struct memfree_entry {
107 pid_t ptname;
108 uint64_t gpuaddr;
109 uint64_t size;
110 pid_t pid;
111 uint64_t flags;
112};
113
114static struct {
115 struct memfree_entry *list;
116 int head;
117 int tail;
118} memfree;
119
120static int kgsl_memfree_init(void)
121{
122 memfree.list = kcalloc(MEMFREE_ENTRIES, sizeof(struct memfree_entry),
123 GFP_KERNEL);
124
125 return (memfree.list) ? 0 : -ENOMEM;
126}
127
128static void kgsl_memfree_exit(void)
129{
130 kfree(memfree.list);
131 memset(&memfree, 0, sizeof(memfree));
132}
133
134static inline bool match_memfree_addr(struct memfree_entry *entry,
135 pid_t ptname, uint64_t gpuaddr)
136{
137 return ((entry->ptname == ptname) &&
138 (entry->size > 0) &&
139 (gpuaddr >= entry->gpuaddr &&
140 gpuaddr < (entry->gpuaddr + entry->size)));
141}
142int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
143 uint64_t *size, uint64_t *flags, pid_t *pid)
144{
145 int ptr;
146
147 if (memfree.list == NULL)
148 return 0;
149
150 spin_lock(&memfree_lock);
151
152 ptr = memfree.head - 1;
153 if (ptr < 0)
154 ptr = MEMFREE_ENTRIES - 1;
155
156 /* Walk backwards through the list looking for the last match */
157 while (ptr != memfree.tail) {
158 struct memfree_entry *entry = &memfree.list[ptr];
159
160 if (match_memfree_addr(entry, ptname, *gpuaddr)) {
161 *gpuaddr = entry->gpuaddr;
162 *flags = entry->flags;
163 *size = entry->size;
164 *pid = entry->pid;
165
166 spin_unlock(&memfree_lock);
167 return 1;
168 }
169
170 ptr = ptr - 1;
171
172 if (ptr < 0)
173 ptr = MEMFREE_ENTRIES - 1;
174 }
175
176 spin_unlock(&memfree_lock);
177 return 0;
178}
179
180static void kgsl_memfree_purge(struct kgsl_pagetable *pagetable,
181 uint64_t gpuaddr, uint64_t size)
182{
183 pid_t ptname = pagetable ? pagetable->name : 0;
184 int i;
185
186 if (memfree.list == NULL)
187 return;
188
189 spin_lock(&memfree_lock);
190
191 for (i = 0; i < MEMFREE_ENTRIES; i++) {
192 struct memfree_entry *entry = &memfree.list[i];
193
194 if (entry->ptname != ptname || entry->size == 0)
195 continue;
196
197 if (gpuaddr > entry->gpuaddr &&
198 gpuaddr < entry->gpuaddr + entry->size) {
199 /* truncate the end of the entry */
200 entry->size = gpuaddr - entry->gpuaddr;
201 } else if (gpuaddr <= entry->gpuaddr) {
202 if (gpuaddr + size > entry->gpuaddr &&
203 gpuaddr + size < entry->gpuaddr + entry->size)
204 /* Truncate the beginning of the entry */
205 entry->gpuaddr = gpuaddr + size;
206 else if (gpuaddr + size >= entry->gpuaddr + entry->size)
207 /* Remove the entire entry */
208 entry->size = 0;
209 }
210 }
211 spin_unlock(&memfree_lock);
212}
213
214static void kgsl_memfree_add(pid_t pid, pid_t ptname, uint64_t gpuaddr,
215 uint64_t size, uint64_t flags)
216
217{
218 struct memfree_entry *entry;
219
220 if (memfree.list == NULL)
221 return;
222
223 spin_lock(&memfree_lock);
224
225 entry = &memfree.list[memfree.head];
226
227 entry->pid = pid;
228 entry->ptname = ptname;
229 entry->gpuaddr = gpuaddr;
230 entry->size = size;
231 entry->flags = flags;
232
233 memfree.head = (memfree.head + 1) % MEMFREE_ENTRIES;
234
235 if (memfree.head == memfree.tail)
236 memfree.tail = (memfree.tail + 1) % MEMFREE_ENTRIES;
237
238 spin_unlock(&memfree_lock);
239}
240
241int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
242 enum kgsl_timestamp_type type, unsigned int *timestamp)
243{
244 return device->ftbl->readtimestamp(device, priv, type, timestamp);
245}
246EXPORT_SYMBOL(kgsl_readtimestamp);
247
Shrenuj Bansala419c792016-10-20 14:05:11 -0700248/* Scheduled by kgsl_mem_entry_put_deferred() */
249static void _deferred_put(struct work_struct *work)
250{
251 struct kgsl_mem_entry *entry =
252 container_of(work, struct kgsl_mem_entry, work);
253
254 kgsl_mem_entry_put(entry);
255}
256
257static inline struct kgsl_mem_entry *
258kgsl_mem_entry_create(void)
259{
260 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
261
Tarun Karra24d3fe12017-04-05 15:23:03 -0700262 if (entry != NULL) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700263 kref_init(&entry->refcount);
Tarun Karra24d3fe12017-04-05 15:23:03 -0700264 /* put this ref in userspace memory alloc and map ioctls */
265 kref_get(&entry->refcount);
266 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700267
Jordan Crouse6bce65c2020-12-28 16:06:42 +0530268 atomic_set(&entry->map_count, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700269 return entry;
270}
271#ifdef CONFIG_DMA_SHARED_BUFFER
272static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
273{
274 if (meta != NULL) {
275 dma_buf_unmap_attachment(meta->attach, meta->table,
276 DMA_FROM_DEVICE);
277 dma_buf_detach(meta->dmabuf, meta->attach);
278 dma_buf_put(meta->dmabuf);
279 kfree(meta);
280 }
281}
282#else
283static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
284{
285
286}
287#endif
288
289void
290kgsl_mem_entry_destroy(struct kref *kref)
291{
292 struct kgsl_mem_entry *entry = container_of(kref,
293 struct kgsl_mem_entry,
294 refcount);
295 unsigned int memtype;
296
297 if (entry == NULL)
298 return;
299
300 /* pull out the memtype before the flags get cleared */
301 memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
302
303 /* Detach from process list */
304 kgsl_mem_entry_detach_process(entry);
305
306 if (memtype != KGSL_MEM_ENTRY_KERNEL)
307 atomic_long_sub(entry->memdesc.size,
308 &kgsl_driver.stats.mapped);
309
310 /*
311 * Ion takes care of freeing the sg_table for us so
312 * clear the sg table before freeing the sharedmem
313 * so kgsl_sharedmem_free doesn't try to free it again
314 */
315 if (memtype == KGSL_MEM_ENTRY_ION)
316 entry->memdesc.sgt = NULL;
317
318 if ((memtype == KGSL_MEM_ENTRY_USER)
319 && !(entry->memdesc.flags & KGSL_MEMFLAGS_GPUREADONLY)) {
320 int i = 0, j;
321 struct scatterlist *sg;
322 struct page *page;
323 /*
324 * Mark all of pages in the scatterlist as dirty since they
325 * were writable by the GPU.
326 */
327 for_each_sg(entry->memdesc.sgt->sgl, sg,
328 entry->memdesc.sgt->nents, i) {
329 page = sg_page(sg);
330 for (j = 0; j < (sg->length >> PAGE_SHIFT); j++)
Rajesh Kemisettif7a4df12019-04-30 19:04:30 +0530331 set_page_dirty_lock(nth_page(page, j));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700332 }
333 }
334
335 kgsl_sharedmem_free(&entry->memdesc);
336
337 switch (memtype) {
338 case KGSL_MEM_ENTRY_ION:
339 kgsl_destroy_ion(entry->priv_data);
340 break;
341 default:
342 break;
343 }
344
345 kfree(entry);
346}
347EXPORT_SYMBOL(kgsl_mem_entry_destroy);
348
349/* Allocate a IOVA for memory objects that don't use SVM */
350static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
351 struct kgsl_process_private *process,
352 struct kgsl_mem_entry *entry)
353{
354 struct kgsl_pagetable *pagetable;
355
356 /*
357 * If SVM is enabled for this object then the address needs to be
358 * assigned elsewhere
359 * Also do not proceed further in case of NoMMU.
360 */
361 if (kgsl_memdesc_use_cpu_map(&entry->memdesc) ||
362 (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
363 return 0;
364
365 pagetable = kgsl_memdesc_is_secured(&entry->memdesc) ?
366 device->mmu.securepagetable : process->pagetable;
367
368 return kgsl_mmu_get_gpuaddr(pagetable, &entry->memdesc);
369}
370
371/* Commit the entry to the process so it can be accessed by other operations */
372static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
373{
374 if (!entry)
375 return;
376
377 spin_lock(&entry->priv->mem_lock);
378 idr_replace(&entry->priv->mem_idr, entry, entry->id);
379 spin_unlock(&entry->priv->mem_lock);
380}
381
382/*
383 * Attach the memory object to a process by (possibly) getting a GPU address and
384 * (possibly) mapping it
385 */
386static int kgsl_mem_entry_attach_process(struct kgsl_device *device,
387 struct kgsl_process_private *process,
388 struct kgsl_mem_entry *entry)
389{
390 int id, ret;
391
392 ret = kgsl_process_private_get(process);
393 if (!ret)
394 return -EBADF;
395
396 ret = kgsl_mem_entry_track_gpuaddr(device, process, entry);
397 if (ret) {
398 kgsl_process_private_put(process);
399 return ret;
400 }
401
402 idr_preload(GFP_KERNEL);
403 spin_lock(&process->mem_lock);
404 /* Allocate the ID but don't attach the pointer just yet */
405 id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
406 spin_unlock(&process->mem_lock);
407 idr_preload_end();
408
409 if (id < 0) {
410 if (!kgsl_memdesc_use_cpu_map(&entry->memdesc))
411 kgsl_mmu_put_gpuaddr(&entry->memdesc);
412 kgsl_process_private_put(process);
413 return id;
414 }
415
416 entry->id = id;
417 entry->priv = process;
418
419 /*
420 * Map the memory if a GPU address is already assigned, either through
421 * kgsl_mem_entry_track_gpuaddr() or via some other SVM process
422 */
423 if (entry->memdesc.gpuaddr) {
424 if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT)
425 ret = kgsl_mmu_sparse_dummy_map(
426 entry->memdesc.pagetable,
427 &entry->memdesc, 0,
428 entry->memdesc.size);
429 else if (entry->memdesc.gpuaddr)
430 ret = kgsl_mmu_map(entry->memdesc.pagetable,
431 &entry->memdesc);
432
433 if (ret)
434 kgsl_mem_entry_detach_process(entry);
435 }
436
437 kgsl_memfree_purge(entry->memdesc.pagetable, entry->memdesc.gpuaddr,
438 entry->memdesc.size);
439
440 return ret;
441}
442
443/* Detach a memory entry from a process and unmap it from the MMU */
444static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
445{
446 unsigned int type;
447
448 if (entry == NULL)
449 return;
450
451 /*
452 * First remove the entry from mem_idr list
453 * so that no one can operate on obsolete values
454 */
455 spin_lock(&entry->priv->mem_lock);
456 if (entry->id != 0)
457 idr_remove(&entry->priv->mem_idr, entry->id);
458 entry->id = 0;
459
460 type = kgsl_memdesc_usermem_type(&entry->memdesc);
461 entry->priv->stats[type].cur -= entry->memdesc.size;
Amit Kushwaha7c843c22018-04-09 20:41:14 +0530462
Shrenuj Bansala419c792016-10-20 14:05:11 -0700463 spin_unlock(&entry->priv->mem_lock);
464
465 kgsl_mmu_put_gpuaddr(&entry->memdesc);
466
467 kgsl_process_private_put(entry->priv);
468
469 entry->priv = NULL;
470}
471
472/**
473 * kgsl_context_dump() - dump information about a draw context
474 * @device: KGSL device that owns the context
475 * @context: KGSL context to dump information about
476 *
477 * Dump specific information about the context to the kernel log. Used for
478 * fence timeout callbacks
479 */
480void kgsl_context_dump(struct kgsl_context *context)
481{
482 struct kgsl_device *device;
483
484 if (_kgsl_context_get(context) == 0)
485 return;
486
487 device = context->device;
488
489 if (kgsl_context_detached(context)) {
490 dev_err(device->dev, " context[%d]: context detached\n",
491 context->id);
492 } else if (device->ftbl->drawctxt_dump != NULL)
493 device->ftbl->drawctxt_dump(device, context);
494
495 kgsl_context_put(context);
496}
497EXPORT_SYMBOL(kgsl_context_dump);
498
499/* Allocate a new context ID */
500static int _kgsl_get_context_id(struct kgsl_device *device)
501{
502 int id;
503
504 idr_preload(GFP_KERNEL);
505 write_lock(&device->context_lock);
506 /* Allocate the slot but don't put a pointer in it yet */
507 id = idr_alloc(&device->context_idr, NULL, 1,
508 KGSL_MEMSTORE_MAX, GFP_NOWAIT);
509 write_unlock(&device->context_lock);
510 idr_preload_end();
511
512 return id;
513}
514
515/**
516 * kgsl_context_init() - helper to initialize kgsl_context members
517 * @dev_priv: the owner of the context
518 * @context: the newly created context struct, should be allocated by
519 * the device specific drawctxt_create function.
520 *
521 * This is a helper function for the device specific drawctxt_create
522 * function to initialize the common members of its context struct.
523 * If this function succeeds, reference counting is active in the context
524 * struct and the caller should kgsl_context_put() it on error.
525 * If it fails, the caller should just free the context structure
526 * it passed in.
527 */
528int kgsl_context_init(struct kgsl_device_private *dev_priv,
529 struct kgsl_context *context)
530{
531 struct kgsl_device *device = dev_priv->device;
532 char name[64];
533 int ret = 0, id;
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700534 struct kgsl_process_private *proc_priv = dev_priv->process_priv;
535
Deepak Kumare0d19f92018-03-05 16:51:25 +0530536 /*
537 * Read and increment the context count under lock to make sure
538 * no process goes beyond the specified context limit.
539 */
540 spin_lock(&proc_priv->ctxt_count_lock);
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700541 if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) {
Rajesh Kemisetti7ac20532018-08-10 13:30:07 +0530542 KGSL_DRV_ERR_RATELIMIT(device,
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700543 "Per process context limit reached for pid %u",
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530544 pid_nr(dev_priv->process_priv->pid));
Deepak Kumare0d19f92018-03-05 16:51:25 +0530545 spin_unlock(&proc_priv->ctxt_count_lock);
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700546 return -ENOSPC;
547 }
548
549 atomic_inc(&proc_priv->ctxt_count);
Deepak Kumare0d19f92018-03-05 16:51:25 +0530550 spin_unlock(&proc_priv->ctxt_count_lock);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700551
552 id = _kgsl_get_context_id(device);
553 if (id == -ENOSPC) {
554 /*
555 * Before declaring that there are no contexts left try
556 * flushing the event workqueue just in case there are
557 * detached contexts waiting to finish
558 */
559
560 flush_workqueue(device->events_wq);
561 id = _kgsl_get_context_id(device);
562 }
563
564 if (id < 0) {
565 if (id == -ENOSPC)
566 KGSL_DRV_INFO(device,
567 "cannot have more than %zu contexts due to memstore limitation\n",
568 KGSL_MEMSTORE_MAX);
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700569 atomic_dec(&proc_priv->ctxt_count);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700570 return id;
571 }
572
573 context->id = id;
574
575 kref_init(&context->refcount);
576 /*
577 * Get a refernce to the process private so its not destroyed, until
578 * the context is destroyed. This will also prevent the pagetable
579 * from being destroyed
580 */
581 if (!kgsl_process_private_get(dev_priv->process_priv)) {
582 ret = -EBADF;
583 goto out;
584 }
585 context->device = dev_priv->device;
586 context->dev_priv = dev_priv;
587 context->proc_priv = dev_priv->process_priv;
588 context->tid = task_pid_nr(current);
589
590 ret = kgsl_sync_timeline_create(context);
Harshdeep Dhattb1273892017-06-01 13:12:07 -0600591 if (ret) {
592 kgsl_process_private_put(dev_priv->process_priv);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700593 goto out;
Harshdeep Dhattb1273892017-06-01 13:12:07 -0600594 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700595
596 snprintf(name, sizeof(name), "context-%d", id);
597 kgsl_add_event_group(&context->events, context, name,
598 kgsl_readtimestamp, context);
599
600out:
601 if (ret) {
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700602 atomic_dec(&proc_priv->ctxt_count);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700603 write_lock(&device->context_lock);
604 idr_remove(&dev_priv->device->context_idr, id);
605 write_unlock(&device->context_lock);
606 }
607
608 return ret;
609}
610EXPORT_SYMBOL(kgsl_context_init);
611
612/**
613 * kgsl_context_detach() - Release the "master" context reference
614 * @context: The context that will be detached
615 *
616 * This is called when a context becomes unusable, because userspace
617 * has requested for it to be destroyed. The context itself may
618 * exist a bit longer until its reference count goes to zero.
619 * Other code referencing the context can detect that it has been
620 * detached by checking the KGSL_CONTEXT_PRIV_DETACHED bit in
621 * context->priv.
622 */
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600623void kgsl_context_detach(struct kgsl_context *context)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700624{
625 struct kgsl_device *device;
626
627 if (context == NULL)
628 return;
629
630 /*
631 * Mark the context as detached to keep others from using
632 * the context before it gets fully removed, and to make sure
633 * we don't try to detach twice.
634 */
635 if (test_and_set_bit(KGSL_CONTEXT_PRIV_DETACHED, &context->priv))
636 return;
637
638 device = context->device;
639
640 trace_kgsl_context_detach(device, context);
641
642 context->device->ftbl->drawctxt_detach(context);
643
644 /*
645 * Cancel all pending events after the device-specific context is
646 * detached, to avoid possibly freeing memory while it is still
647 * in use by the GPU.
648 */
649 kgsl_cancel_events(device, &context->events);
650
651 /* Remove the event group from the list */
652 kgsl_del_event_group(&context->events);
653
Lynus Vazc031a9b2017-01-25 13:00:13 +0530654 kgsl_sync_timeline_put(context->ktimeline);
655
Shrenuj Bansala419c792016-10-20 14:05:11 -0700656 kgsl_context_put(context);
657}
658
659void
660kgsl_context_destroy(struct kref *kref)
661{
662 struct kgsl_context *context = container_of(kref, struct kgsl_context,
663 refcount);
664 struct kgsl_device *device = context->device;
665
666 trace_kgsl_context_destroy(device, context);
667
668 /*
669 * It's not safe to destroy the context if it's not detached as GPU
670 * may still be executing commands
671 */
672 BUG_ON(!kgsl_context_detached(context));
673
674 write_lock(&device->context_lock);
675 if (context->id != KGSL_CONTEXT_INVALID) {
676
677 /* Clear the timestamps in the memstore during destroy */
678 kgsl_sharedmem_writel(device, &device->memstore,
679 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), 0);
680 kgsl_sharedmem_writel(device, &device->memstore,
681 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), 0);
682
683 /* clear device power constraint */
684 if (context->id == device->pwrctrl.constraint.owner_id) {
685 trace_kgsl_constraint(device,
686 device->pwrctrl.constraint.type,
687 device->pwrctrl.active_pwrlevel,
688 0);
689 device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
690 }
691
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700692 atomic_dec(&context->proc_priv->ctxt_count);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700693 idr_remove(&device->context_idr, context->id);
694 context->id = KGSL_CONTEXT_INVALID;
695 }
696 write_unlock(&device->context_lock);
697 kgsl_sync_timeline_destroy(context);
698 kgsl_process_private_put(context->proc_priv);
699
700 device->ftbl->drawctxt_destroy(context);
701}
702
703struct kgsl_device *kgsl_get_device(int dev_idx)
704{
705 int i;
706 struct kgsl_device *ret = NULL;
707
708 mutex_lock(&kgsl_driver.devlock);
709
710 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
711 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
712 ret = kgsl_driver.devp[i];
713 break;
714 }
715 }
716
717 mutex_unlock(&kgsl_driver.devlock);
718 return ret;
719}
720EXPORT_SYMBOL(kgsl_get_device);
721
722static struct kgsl_device *kgsl_get_minor(int minor)
723{
724 struct kgsl_device *ret = NULL;
725
726 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
727 return NULL;
728
729 mutex_lock(&kgsl_driver.devlock);
730 ret = kgsl_driver.devp[minor];
731 mutex_unlock(&kgsl_driver.devlock);
732
733 return ret;
734}
735
736/**
737 * kgsl_check_timestamp() - return true if the specified timestamp is retired
738 * @device: Pointer to the KGSL device to check
739 * @context: Pointer to the context for the timestamp
740 * @timestamp: The timestamp to compare
741 */
742int kgsl_check_timestamp(struct kgsl_device *device,
743 struct kgsl_context *context, unsigned int timestamp)
744{
745 unsigned int ts_processed;
746
747 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
748 &ts_processed);
749
750 return (timestamp_cmp(ts_processed, timestamp) >= 0);
751}
752EXPORT_SYMBOL(kgsl_check_timestamp);
753
754static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
755{
756 int status = -EINVAL;
757
758 if (!device)
759 return -EINVAL;
760
761 KGSL_PWR_WARN(device, "suspend start\n");
762
763 mutex_lock(&device->mutex);
764 status = kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
Deepak Kumar10e2bc52018-08-01 11:57:33 +0530765 if (status == 0 && device->state == KGSL_STATE_SUSPEND)
Deepak Kumar79908f52018-02-28 11:06:38 +0530766 device->ftbl->dispatcher_halt(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700767 mutex_unlock(&device->mutex);
768
769 KGSL_PWR_WARN(device, "suspend end\n");
770 return status;
771}
772
773static int kgsl_resume_device(struct kgsl_device *device)
774{
775 if (!device)
776 return -EINVAL;
777
778 KGSL_PWR_WARN(device, "resume start\n");
779 mutex_lock(&device->mutex);
780 if (device->state == KGSL_STATE_SUSPEND) {
Deepak Kumar79908f52018-02-28 11:06:38 +0530781 device->ftbl->dispatcher_unhalt(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700782 kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
783 } else if (device->state != KGSL_STATE_INIT) {
784 /*
785 * This is an error situation,so wait for the device
786 * to idle and then put the device to SLUMBER state.
787 * This will put the device to the right state when
788 * we resume.
789 */
790 if (device->state == KGSL_STATE_ACTIVE)
791 device->ftbl->idle(device);
792 kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
793 KGSL_PWR_ERR(device,
794 "resume invoked without a suspend\n");
795 }
796
797 mutex_unlock(&device->mutex);
798 KGSL_PWR_WARN(device, "resume end\n");
799 return 0;
800}
801
802static int kgsl_suspend(struct device *dev)
803{
804
805 pm_message_t arg = {0};
806 struct kgsl_device *device = dev_get_drvdata(dev);
807
808 return kgsl_suspend_device(device, arg);
809}
810
811static int kgsl_resume(struct device *dev)
812{
813 struct kgsl_device *device = dev_get_drvdata(dev);
814
815 return kgsl_resume_device(device);
816}
817
818static int kgsl_runtime_suspend(struct device *dev)
819{
820 return 0;
821}
822
823static int kgsl_runtime_resume(struct device *dev)
824{
825 return 0;
826}
827
828const struct dev_pm_ops kgsl_pm_ops = {
829 .suspend = kgsl_suspend,
830 .resume = kgsl_resume,
831 .runtime_suspend = kgsl_runtime_suspend,
832 .runtime_resume = kgsl_runtime_resume,
833};
834EXPORT_SYMBOL(kgsl_pm_ops);
835
836int kgsl_suspend_driver(struct platform_device *pdev,
837 pm_message_t state)
838{
839 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
840
841 return kgsl_suspend_device(device, state);
842}
843EXPORT_SYMBOL(kgsl_suspend_driver);
844
845int kgsl_resume_driver(struct platform_device *pdev)
846{
847 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
848
849 return kgsl_resume_device(device);
850}
851EXPORT_SYMBOL(kgsl_resume_driver);
852
853/**
854 * kgsl_destroy_process_private() - Cleanup function to free process private
855 * @kref: - Pointer to object being destroyed's kref struct
856 * Free struct object and all other resources attached to it.
857 * Since the function can be used when not all resources inside process
858 * private have been allocated, there is a check to (before each resource
859 * cleanup) see if the struct member being cleaned is in fact allocated or not.
860 * If the value is not NULL, resource is freed.
861 */
862static void kgsl_destroy_process_private(struct kref *kref)
863{
864 struct kgsl_process_private *private = container_of(kref,
865 struct kgsl_process_private, refcount);
866
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530867 put_pid(private->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700868 idr_destroy(&private->mem_idr);
869 idr_destroy(&private->syncsource_idr);
870
871 /* When using global pagetables, do not detach global pagetable */
872 if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
873 kgsl_mmu_putpagetable(private->pagetable);
874
875 kfree(private);
876}
877
878void
879kgsl_process_private_put(struct kgsl_process_private *private)
880{
881 if (private)
882 kref_put(&private->refcount, kgsl_destroy_process_private);
883}
884
885/**
886 * kgsl_process_private_find() - Find the process associated with the specified
887 * name
888 * @name: pid_t of the process to search for
889 * Return the process struct for the given ID.
890 */
891struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
892{
893 struct kgsl_process_private *p, *private = NULL;
894
895 mutex_lock(&kgsl_driver.process_mutex);
896 list_for_each_entry(p, &kgsl_driver.process_list, list) {
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530897 if (pid_nr(p->pid) == pid) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700898 if (kgsl_process_private_get(p))
899 private = p;
900 break;
901 }
902 }
903 mutex_unlock(&kgsl_driver.process_mutex);
904 return private;
905}
906
907static struct kgsl_process_private *kgsl_process_private_new(
908 struct kgsl_device *device)
909{
910 struct kgsl_process_private *private;
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530911 struct pid *cur_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700912
913 /* Search in the process list */
914 list_for_each_entry(private, &kgsl_driver.process_list, list) {
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530915 if (private->pid == cur_pid) {
916 if (!kgsl_process_private_get(private)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700917 private = ERR_PTR(-EINVAL);
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530918 }
Archana Sriram81699bd2020-12-14 16:55:35 +0530919 /*
920 * We need to hold only one reference to the PID for
921 * each process struct to avoid overflowing the
922 * reference counter which can lead to use-after-free.
923 */
924 put_pid(cur_pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700925 return private;
926 }
927 }
928
929 /* Create a new object */
930 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
Archana Sriram81699bd2020-12-14 16:55:35 +0530931 if (private == NULL) {
932 put_pid(cur_pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700933 return ERR_PTR(-ENOMEM);
Archana Sriram81699bd2020-12-14 16:55:35 +0530934 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700935
936 kref_init(&private->refcount);
937
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530938 private->pid = cur_pid;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700939 get_task_comm(private->comm, current->group_leader);
940
941 spin_lock_init(&private->mem_lock);
942 spin_lock_init(&private->syncsource_lock);
Deepak Kumare0d19f92018-03-05 16:51:25 +0530943 spin_lock_init(&private->ctxt_count_lock);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700944
945 idr_init(&private->mem_idr);
946 idr_init(&private->syncsource_idr);
947
948 /* Allocate a pagetable for the new process object */
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530949 private->pagetable = kgsl_mmu_getpagetable(&device->mmu,
950 pid_nr(cur_pid));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700951 if (IS_ERR(private->pagetable)) {
952 int err = PTR_ERR(private->pagetable);
953
954 idr_destroy(&private->mem_idr);
955 idr_destroy(&private->syncsource_idr);
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530956 put_pid(private->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700957
958 kfree(private);
959 private = ERR_PTR(err);
960 }
961
962 return private;
963}
964
965static void process_release_memory(struct kgsl_process_private *private)
966{
967 struct kgsl_mem_entry *entry;
968 int next = 0;
969
970 while (1) {
971 spin_lock(&private->mem_lock);
972 entry = idr_get_next(&private->mem_idr, &next);
973 if (entry == NULL) {
974 spin_unlock(&private->mem_lock);
975 break;
976 }
977 /*
978 * If the free pending flag is not set it means that user space
979 * did not free it's reference to this entry, in that case
980 * free a reference to this entry, other references are from
981 * within kgsl so they will be freed eventually by kgsl
982 */
983 if (!entry->pending_free) {
984 entry->pending_free = 1;
985 spin_unlock(&private->mem_lock);
986 kgsl_mem_entry_put(entry);
987 } else {
988 spin_unlock(&private->mem_lock);
989 }
990 next = next + 1;
991 }
992}
993
Shrenuj Bansala419c792016-10-20 14:05:11 -0700994static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
995 struct kgsl_process_private *private)
996{
997 mutex_lock(&kgsl_driver.process_mutex);
998
999 if (--private->fd_count > 0) {
1000 mutex_unlock(&kgsl_driver.process_mutex);
1001 kgsl_process_private_put(private);
1002 return;
1003 }
1004
1005 /*
1006 * If this is the last file on the process take down the debug
1007 * directories and garbage collect any outstanding resources
1008 */
1009
1010 kgsl_process_uninit_sysfs(private);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001011
Akhil P Oommenb5f1b822018-03-08 20:10:52 +05301012 /* Release all syncsource objects from process private */
1013 kgsl_syncsource_process_release_syncsources(private);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001014
1015 /* When using global pagetables, do not detach global pagetable */
1016 if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
1017 kgsl_mmu_detach_pagetable(private->pagetable);
1018
1019 /* Remove the process struct from the master list */
1020 list_del(&private->list);
1021
1022 /*
Lynus Vaz98ffb322017-07-27 15:13:09 +05301023 * Unlock the mutex before releasing the memory and the debugfs
1024 * nodes - this prevents deadlocks with the IOMMU and debugfs
1025 * locks.
Shrenuj Bansala419c792016-10-20 14:05:11 -07001026 */
1027 mutex_unlock(&kgsl_driver.process_mutex);
1028
1029 process_release_memory(private);
Lynus Vaz98ffb322017-07-27 15:13:09 +05301030 debugfs_remove_recursive(private->debug_root);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001031
1032 kgsl_process_private_put(private);
1033}
1034
1035
1036static struct kgsl_process_private *kgsl_process_private_open(
1037 struct kgsl_device *device)
1038{
1039 struct kgsl_process_private *private;
1040
1041 mutex_lock(&kgsl_driver.process_mutex);
1042 private = kgsl_process_private_new(device);
1043
1044 if (IS_ERR(private))
1045 goto done;
1046
1047 /*
1048 * If this is a new process create the debug directories and add it to
1049 * the process list
1050 */
1051
1052 if (private->fd_count++ == 0) {
1053 kgsl_process_init_sysfs(device, private);
1054 kgsl_process_init_debugfs(private);
1055
1056 list_add(&private->list, &kgsl_driver.process_list);
1057 }
1058
1059done:
1060 mutex_unlock(&kgsl_driver.process_mutex);
1061 return private;
1062}
1063
1064static int kgsl_close_device(struct kgsl_device *device)
1065{
1066 int result = 0;
1067
1068 mutex_lock(&device->mutex);
Kyle Piefer89d64fe2017-05-15 09:15:43 -07001069 device->open_count--;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001070 if (device->open_count == 0) {
1071
1072 /* Wait for the active count to go to 0 */
1073 kgsl_active_count_wait(device, 0);
1074
1075 /* Fail if the wait times out */
1076 BUG_ON(atomic_read(&device->active_cnt) > 0);
1077
1078 result = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1079 }
1080 mutex_unlock(&device->mutex);
1081 return result;
1082
1083}
1084
1085static void device_release_contexts(struct kgsl_device_private *dev_priv)
1086{
1087 struct kgsl_device *device = dev_priv->device;
1088 struct kgsl_context *context;
1089 int next = 0;
1090 int result = 0;
1091
1092 while (1) {
1093 read_lock(&device->context_lock);
1094 context = idr_get_next(&device->context_idr, &next);
1095
1096 if (context == NULL) {
1097 read_unlock(&device->context_lock);
1098 break;
1099 } else if (context->dev_priv == dev_priv) {
1100 /*
1101 * Hold a reference to the context in case somebody
1102 * tries to put it while we are detaching
1103 */
1104 result = _kgsl_context_get(context);
1105 }
1106 read_unlock(&device->context_lock);
1107
1108 if (result) {
1109 kgsl_context_detach(context);
1110 kgsl_context_put(context);
1111 result = 0;
1112 }
1113
1114 next = next + 1;
1115 }
1116}
1117
1118static int kgsl_release(struct inode *inodep, struct file *filep)
1119{
1120 struct kgsl_device_private *dev_priv = filep->private_data;
1121 struct kgsl_device *device = dev_priv->device;
1122 int result;
1123
1124 filep->private_data = NULL;
1125
1126 /* Release the contexts for the file */
1127 device_release_contexts(dev_priv);
1128
1129 /* Close down the process wide resources for the file */
1130 kgsl_process_private_close(dev_priv, dev_priv->process_priv);
1131
Harshdeep Dhattf61e3b52014-12-15 13:45:19 -07001132 /* Destroy the device-specific structure */
1133 device->ftbl->device_private_destroy(dev_priv);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001134
1135 result = kgsl_close_device(device);
1136 pm_runtime_put(&device->pdev->dev);
1137
1138 return result;
1139}
1140
1141static int kgsl_open_device(struct kgsl_device *device)
1142{
1143 int result = 0;
1144
1145 mutex_lock(&device->mutex);
1146 if (device->open_count == 0) {
1147 /*
1148 * active_cnt special case: we are starting up for the first
1149 * time, so use this sequence instead of the kgsl_pwrctrl_wake()
1150 * which will be called by kgsl_active_count_get().
1151 */
1152 atomic_inc(&device->active_cnt);
1153 kgsl_sharedmem_set(device, &device->memstore, 0, 0,
1154 device->memstore.size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001155
1156 result = device->ftbl->init(device);
1157 if (result)
1158 goto err;
1159
1160 result = device->ftbl->start(device, 0);
1161 if (result)
1162 goto err;
1163 /*
1164 * Make sure the gates are open, so they don't block until
1165 * we start suspend or FT.
1166 */
1167 complete_all(&device->hwaccess_gate);
1168 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
1169 kgsl_active_count_put(device);
1170 }
1171 device->open_count++;
1172err:
1173 if (result) {
1174 kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1175 atomic_dec(&device->active_cnt);
1176 }
1177
1178 mutex_unlock(&device->mutex);
1179 return result;
1180}
1181
1182static int kgsl_open(struct inode *inodep, struct file *filep)
1183{
1184 int result;
1185 struct kgsl_device_private *dev_priv;
1186 struct kgsl_device *device;
1187 unsigned int minor = iminor(inodep);
1188
1189 device = kgsl_get_minor(minor);
1190 BUG_ON(device == NULL);
1191
1192 result = pm_runtime_get_sync(&device->pdev->dev);
1193 if (result < 0) {
1194 KGSL_DRV_ERR(device,
1195 "Runtime PM: Unable to wake up the device, rc = %d\n",
1196 result);
1197 return result;
1198 }
1199 result = 0;
1200
Harshdeep Dhattf61e3b52014-12-15 13:45:19 -07001201 dev_priv = device->ftbl->device_private_create();
Shrenuj Bansala419c792016-10-20 14:05:11 -07001202 if (dev_priv == NULL) {
1203 result = -ENOMEM;
1204 goto err;
1205 }
1206
1207 dev_priv->device = device;
1208 filep->private_data = dev_priv;
1209
1210 result = kgsl_open_device(device);
1211 if (result)
1212 goto err;
1213
1214 /*
1215 * Get file (per process) private struct. This must be done
1216 * after the first start so that the global pagetable mappings
1217 * are set up before we create the per-process pagetable.
1218 */
1219 dev_priv->process_priv = kgsl_process_private_open(device);
1220 if (IS_ERR(dev_priv->process_priv)) {
1221 result = PTR_ERR(dev_priv->process_priv);
1222 kgsl_close_device(device);
1223 goto err;
1224 }
1225
1226err:
1227 if (result) {
1228 filep->private_data = NULL;
1229 kfree(dev_priv);
1230 pm_runtime_put(&device->pdev->dev);
1231 }
1232 return result;
1233}
1234
1235#define GPUADDR_IN_MEMDESC(_val, _memdesc) \
1236 (((_val) >= (_memdesc)->gpuaddr) && \
1237 ((_val) < ((_memdesc)->gpuaddr + (_memdesc)->size)))
1238
1239/**
1240 * kgsl_sharedmem_find() - Find a gpu memory allocation
1241 *
1242 * @private: private data for the process to check.
1243 * @gpuaddr: start address of the region
1244 *
1245 * Find a gpu allocation. Caller must kgsl_mem_entry_put()
1246 * the returned entry when finished using it.
1247 */
1248struct kgsl_mem_entry * __must_check
1249kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
1250{
1251 int ret = 0, id;
1252 struct kgsl_mem_entry *entry = NULL;
1253
1254 if (!private)
1255 return NULL;
1256
1257 if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr))
1258 return NULL;
1259
1260 spin_lock(&private->mem_lock);
1261 idr_for_each_entry(&private->mem_idr, entry, id) {
1262 if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
Deepak Kumar0a4ef64e2017-05-08 15:29:03 +05301263 if (!entry->pending_free)
1264 ret = kgsl_mem_entry_get(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001265 break;
1266 }
1267 }
1268 spin_unlock(&private->mem_lock);
1269
1270 return (ret == 0) ? NULL : entry;
1271}
1272EXPORT_SYMBOL(kgsl_sharedmem_find);
1273
1274struct kgsl_mem_entry * __must_check
1275kgsl_sharedmem_find_id_flags(struct kgsl_process_private *process,
1276 unsigned int id, uint64_t flags)
1277{
1278 int count = 0;
1279 struct kgsl_mem_entry *entry;
1280
1281 spin_lock(&process->mem_lock);
1282 entry = idr_find(&process->mem_idr, id);
1283 if (entry)
1284 if (!entry->pending_free &&
1285 (flags & entry->memdesc.flags) == flags)
1286 count = kgsl_mem_entry_get(entry);
1287 spin_unlock(&process->mem_lock);
1288
1289 return (count == 0) ? NULL : entry;
1290}
1291
1292/**
1293 * kgsl_sharedmem_find_id() - find a memory entry by id
1294 * @process: the owning process
1295 * @id: id to find
1296 *
1297 * @returns - the mem_entry or NULL
1298 *
1299 * Caller must kgsl_mem_entry_put() the returned entry, when finished using
1300 * it.
1301 */
1302struct kgsl_mem_entry * __must_check
1303kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
1304{
1305 return kgsl_sharedmem_find_id_flags(process, id, 0);
1306}
1307
1308/**
1309 * kgsl_mem_entry_unset_pend() - Unset the pending free flag of an entry
1310 * @entry - The memory entry
1311 */
1312static inline void kgsl_mem_entry_unset_pend(struct kgsl_mem_entry *entry)
1313{
1314 if (entry == NULL)
1315 return;
1316 spin_lock(&entry->priv->mem_lock);
1317 entry->pending_free = 0;
1318 spin_unlock(&entry->priv->mem_lock);
1319}
1320
1321/**
1322 * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
1323 * @entry - The memory entry
1324 *
1325 * @returns - true if pending flag was 0 else false
1326 *
1327 * This function will set the pending free flag if it is previously unset. Used
1328 * to prevent race condition between ioctls calling free/freememontimestamp
1329 * on the same entry. Whichever thread set's the flag first will do the free.
1330 */
1331static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry)
1332{
1333 bool ret = false;
1334
1335 if (entry == NULL)
1336 return false;
1337
1338 spin_lock(&entry->priv->mem_lock);
1339 if (!entry->pending_free) {
1340 entry->pending_free = 1;
1341 ret = true;
1342 }
1343 spin_unlock(&entry->priv->mem_lock);
1344 return ret;
1345}
1346
1347/*call all ioctl sub functions with driver locked*/
1348long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
1349 unsigned int cmd, void *data)
1350{
1351 int result = 0;
1352 struct kgsl_device_getproperty *param = data;
1353
1354 switch (param->type) {
1355 case KGSL_PROP_VERSION:
1356 {
1357 struct kgsl_version version;
1358
1359 if (param->sizebytes != sizeof(version)) {
1360 result = -EINVAL;
1361 break;
1362 }
1363
1364 version.drv_major = KGSL_VERSION_MAJOR;
1365 version.drv_minor = KGSL_VERSION_MINOR;
1366 version.dev_major = dev_priv->device->ver_major;
1367 version.dev_minor = dev_priv->device->ver_minor;
1368
1369 if (copy_to_user(param->value, &version, sizeof(version)))
1370 result = -EFAULT;
1371
1372 break;
1373 }
1374 case KGSL_PROP_GPU_RESET_STAT:
1375 {
1376 /* Return reset status of given context and clear it */
1377 uint32_t id;
1378 struct kgsl_context *context;
1379
1380 if (param->sizebytes != sizeof(unsigned int)) {
1381 result = -EINVAL;
1382 break;
1383 }
1384 /* We expect the value passed in to contain the context id */
1385 if (copy_from_user(&id, param->value,
1386 sizeof(unsigned int))) {
1387 result = -EFAULT;
1388 break;
1389 }
1390 context = kgsl_context_get_owner(dev_priv, id);
1391 if (!context) {
1392 result = -EINVAL;
1393 break;
1394 }
1395 /*
1396 * Copy the reset status to value which also serves as
1397 * the out parameter
1398 */
1399 if (copy_to_user(param->value, &(context->reset_status),
1400 sizeof(unsigned int)))
1401 result = -EFAULT;
1402 else {
1403 /* Clear reset status once its been queried */
1404 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
1405 }
1406
1407 kgsl_context_put(context);
1408 break;
1409 }
Sunil Khatrib055b6b2018-07-19 17:10:39 +05301410 case KGSL_PROP_SECURE_BUFFER_ALIGNMENT:
1411 {
1412 unsigned int align;
1413
1414 if (param->sizebytes != sizeof(unsigned int)) {
1415 result = -EINVAL;
1416 break;
1417 }
1418 /*
1419 * XPUv2 impose the constraint of 1MB memory alignment,
1420 * on the other hand Hypervisor does not have such
1421 * constraints. So driver should fulfill such
1422 * requirements when allocating secure memory.
1423 */
1424 align = MMU_FEATURE(&dev_priv->device->mmu,
1425 KGSL_MMU_HYP_SECURE_ALLOC) ? PAGE_SIZE : SZ_1M;
1426
1427 if (copy_to_user(param->value, &align, sizeof(align)))
1428 result = -EFAULT;
1429
1430 break;
1431 }
Sunil Khatric4d4c2d2018-08-10 11:46:58 +05301432 case KGSL_PROP_SECURE_CTXT_SUPPORT:
1433 {
1434 unsigned int secure_ctxt;
1435
1436 if (param->sizebytes != sizeof(unsigned int)) {
1437 result = -EINVAL;
1438 break;
1439 }
1440
1441 secure_ctxt = dev_priv->device->mmu.secured ? 1 : 0;
1442
1443 if (copy_to_user(param->value, &secure_ctxt,
1444 sizeof(secure_ctxt)))
1445 result = -EFAULT;
1446
1447 break;
1448 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001449 default:
1450 if (is_compat_task())
1451 result = dev_priv->device->ftbl->getproperty_compat(
1452 dev_priv->device, param->type,
1453 param->value, param->sizebytes);
1454 else
1455 result = dev_priv->device->ftbl->getproperty(
1456 dev_priv->device, param->type,
1457 param->value, param->sizebytes);
1458 }
1459
1460
1461 return result;
1462}
1463
1464long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
1465 unsigned int cmd, void *data)
1466{
1467 int result = 0;
1468 /* The getproperty struct is reused for setproperty too */
1469 struct kgsl_device_getproperty *param = data;
1470
1471 /* Reroute to compat version if coming from compat_ioctl */
1472 if (is_compat_task())
1473 result = dev_priv->device->ftbl->setproperty_compat(
1474 dev_priv, param->type, param->value,
1475 param->sizebytes);
1476 else if (dev_priv->device->ftbl->setproperty)
1477 result = dev_priv->device->ftbl->setproperty(
1478 dev_priv, param->type, param->value,
1479 param->sizebytes);
1480
1481 return result;
1482}
1483
1484long kgsl_ioctl_device_waittimestamp_ctxtid(
1485 struct kgsl_device_private *dev_priv, unsigned int cmd,
1486 void *data)
1487{
1488 struct kgsl_device_waittimestamp_ctxtid *param = data;
1489 struct kgsl_device *device = dev_priv->device;
1490 long result = -EINVAL;
1491 unsigned int temp_cur_ts = 0;
1492 struct kgsl_context *context;
1493
1494 context = kgsl_context_get_owner(dev_priv, param->context_id);
1495 if (context == NULL)
1496 return result;
1497
1498 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
1499 &temp_cur_ts);
1500
1501 trace_kgsl_waittimestamp_entry(device, context->id, temp_cur_ts,
1502 param->timestamp, param->timeout);
1503
1504 result = device->ftbl->waittimestamp(device, context, param->timestamp,
1505 param->timeout);
1506
1507 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
1508 &temp_cur_ts);
1509 trace_kgsl_waittimestamp_exit(device, temp_cur_ts, result);
1510
1511 kgsl_context_put(context);
1512
1513 return result;
1514}
1515
Tarun Karra2b8b3632016-11-14 16:38:27 -08001516static inline bool _check_context_is_sparse(struct kgsl_context *context,
1517 uint64_t flags)
1518{
1519 if ((context->flags & KGSL_CONTEXT_SPARSE) ||
1520 (flags & KGSL_DRAWOBJ_SPARSE))
1521 return true;
1522
1523 return false;
1524}
1525
1526
Shrenuj Bansala419c792016-10-20 14:05:11 -07001527long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
1528 unsigned int cmd, void *data)
1529{
1530 struct kgsl_ringbuffer_issueibcmds *param = data;
1531 struct kgsl_device *device = dev_priv->device;
1532 struct kgsl_context *context;
1533 struct kgsl_drawobj *drawobj;
1534 struct kgsl_drawobj_cmd *cmdobj;
1535 long result = -EINVAL;
1536
1537 /* The legacy functions don't support synchronization commands */
1538 if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
1539 return -EINVAL;
1540
1541 /* Sanity check the number of IBs */
1542 if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
1543 (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
1544 return -EINVAL;
1545
1546 /* Get the context */
1547 context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
1548 if (context == NULL)
1549 return -EINVAL;
1550
Tarun Karra2b8b3632016-11-14 16:38:27 -08001551 if (_check_context_is_sparse(context, param->flags)) {
1552 kgsl_context_put(context);
1553 return -EINVAL;
1554 }
1555
Shrenuj Bansala419c792016-10-20 14:05:11 -07001556 cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
1557 CMDOBJ_TYPE);
1558 if (IS_ERR(cmdobj)) {
1559 kgsl_context_put(context);
1560 return PTR_ERR(cmdobj);
1561 }
1562
1563 drawobj = DRAWOBJ(cmdobj);
1564
1565 if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
1566 result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
1567 (void __user *) param->ibdesc_addr,
1568 param->numibs);
1569 else {
1570 struct kgsl_ibdesc ibdesc;
1571 /* Ultra legacy path */
1572
1573 ibdesc.gpuaddr = param->ibdesc_addr;
1574 ibdesc.sizedwords = param->numibs;
1575 ibdesc.ctrl = 0;
1576
1577 result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
1578 }
1579
1580 if (result == 0)
1581 result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
1582 &drawobj, 1, &param->timestamp);
1583
1584 /*
1585 * -EPROTO is a "success" error - it just tells the user that the
1586 * context had previously faulted
1587 */
1588 if (result && result != -EPROTO)
1589 kgsl_drawobj_destroy(drawobj);
1590
1591 kgsl_context_put(context);
1592 return result;
1593}
1594
1595/* Returns 0 on failure. Returns command type(s) on success */
1596static unsigned int _process_command_input(struct kgsl_device *device,
1597 unsigned int flags, unsigned int numcmds,
1598 unsigned int numobjs, unsigned int numsyncs)
1599{
1600 if (numcmds > KGSL_MAX_NUMIBS ||
1601 numobjs > KGSL_MAX_NUMIBS ||
1602 numsyncs > KGSL_MAX_SYNCPOINTS)
1603 return 0;
1604
1605 /*
1606 * The SYNC bit is supposed to identify a dummy sync object
1607 * so warn the user if they specified any IBs with it.
1608 * A MARKER command can either have IBs or not but if the
1609 * command has 0 IBs it is automatically assumed to be a marker.
1610 */
1611
1612 /* If they specify the flag, go with what they say */
1613 if (flags & KGSL_DRAWOBJ_MARKER)
1614 return MARKEROBJ_TYPE;
1615 else if (flags & KGSL_DRAWOBJ_SYNC)
1616 return SYNCOBJ_TYPE;
1617
1618 /* If not, deduce what they meant */
1619 if (numsyncs && numcmds)
1620 return SYNCOBJ_TYPE | CMDOBJ_TYPE;
1621 else if (numsyncs)
1622 return SYNCOBJ_TYPE;
1623 else if (numcmds)
1624 return CMDOBJ_TYPE;
1625 else if (numcmds == 0)
1626 return MARKEROBJ_TYPE;
1627
1628 return 0;
1629}
1630
1631long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
1632 unsigned int cmd, void *data)
1633{
1634 struct kgsl_submit_commands *param = data;
1635 struct kgsl_device *device = dev_priv->device;
1636 struct kgsl_context *context;
1637 struct kgsl_drawobj *drawobj[2];
1638 unsigned int type;
1639 long result;
1640 unsigned int i = 0;
1641
1642 type = _process_command_input(device, param->flags, param->numcmds, 0,
1643 param->numsyncs);
1644 if (!type)
1645 return -EINVAL;
1646
1647 context = kgsl_context_get_owner(dev_priv, param->context_id);
1648 if (context == NULL)
1649 return -EINVAL;
1650
Tarun Karra2b8b3632016-11-14 16:38:27 -08001651 if (_check_context_is_sparse(context, param->flags)) {
1652 kgsl_context_put(context);
1653 return -EINVAL;
1654 }
1655
Shrenuj Bansala419c792016-10-20 14:05:11 -07001656 if (type & SYNCOBJ_TYPE) {
1657 struct kgsl_drawobj_sync *syncobj =
1658 kgsl_drawobj_sync_create(device, context);
1659 if (IS_ERR(syncobj)) {
1660 result = PTR_ERR(syncobj);
1661 goto done;
1662 }
1663
1664 drawobj[i++] = DRAWOBJ(syncobj);
1665
1666 result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
1667 param->synclist, param->numsyncs);
1668 if (result)
1669 goto done;
1670 }
1671
1672 if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
1673 struct kgsl_drawobj_cmd *cmdobj =
1674 kgsl_drawobj_cmd_create(device,
1675 context, param->flags, type);
1676 if (IS_ERR(cmdobj)) {
1677 result = PTR_ERR(cmdobj);
1678 goto done;
1679 }
1680
1681 drawobj[i++] = DRAWOBJ(cmdobj);
1682
1683 result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
1684 param->cmdlist, param->numcmds);
1685 if (result)
1686 goto done;
1687
1688 /* If no profiling buffer was specified, clear the flag */
1689 if (cmdobj->profiling_buf_entry == NULL)
Lynus Vazeb7af682017-04-17 18:36:01 +05301690 DRAWOBJ(cmdobj)->flags &=
1691 ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001692 }
1693
1694 result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
1695 i, &param->timestamp);
1696
1697done:
1698 /*
1699 * -EPROTO is a "success" error - it just tells the user that the
1700 * context had previously faulted
1701 */
1702 if (result && result != -EPROTO)
1703 while (i--)
1704 kgsl_drawobj_destroy(drawobj[i]);
1705
1706
1707 kgsl_context_put(context);
1708 return result;
1709}
1710
1711long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
1712 unsigned int cmd, void *data)
1713{
1714 struct kgsl_gpu_command *param = data;
1715 struct kgsl_device *device = dev_priv->device;
1716 struct kgsl_context *context;
1717 struct kgsl_drawobj *drawobj[2];
1718 unsigned int type;
1719 long result;
1720 unsigned int i = 0;
1721
1722 type = _process_command_input(device, param->flags, param->numcmds,
1723 param->numobjs, param->numsyncs);
1724 if (!type)
1725 return -EINVAL;
1726
1727 context = kgsl_context_get_owner(dev_priv, param->context_id);
1728 if (context == NULL)
1729 return -EINVAL;
1730
Tarun Karra2b8b3632016-11-14 16:38:27 -08001731 if (_check_context_is_sparse(context, param->flags)) {
1732 kgsl_context_put(context);
1733 return -EINVAL;
1734 }
1735
Shrenuj Bansala419c792016-10-20 14:05:11 -07001736 if (type & SYNCOBJ_TYPE) {
1737 struct kgsl_drawobj_sync *syncobj =
1738 kgsl_drawobj_sync_create(device, context);
1739
1740 if (IS_ERR(syncobj)) {
1741 result = PTR_ERR(syncobj);
1742 goto done;
1743 }
1744
1745 drawobj[i++] = DRAWOBJ(syncobj);
1746
1747 result = kgsl_drawobj_sync_add_synclist(device, syncobj,
1748 to_user_ptr(param->synclist),
1749 param->syncsize, param->numsyncs);
1750 if (result)
1751 goto done;
1752 }
1753
1754 if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
1755 struct kgsl_drawobj_cmd *cmdobj =
1756 kgsl_drawobj_cmd_create(device,
1757 context, param->flags, type);
1758
1759 if (IS_ERR(cmdobj)) {
1760 result = PTR_ERR(cmdobj);
1761 goto done;
1762 }
1763
1764 drawobj[i++] = DRAWOBJ(cmdobj);
1765
1766 result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
1767 to_user_ptr(param->cmdlist),
1768 param->cmdsize, param->numcmds);
1769 if (result)
1770 goto done;
1771
1772 result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
1773 to_user_ptr(param->objlist),
1774 param->objsize, param->numobjs);
1775 if (result)
1776 goto done;
1777
1778 /* If no profiling buffer was specified, clear the flag */
1779 if (cmdobj->profiling_buf_entry == NULL)
Lynus Vazeb7af682017-04-17 18:36:01 +05301780 DRAWOBJ(cmdobj)->flags &=
1781 ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001782 }
1783
1784 result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
1785 i, &param->timestamp);
1786
1787done:
1788 /*
1789 * -EPROTO is a "success" error - it just tells the user that the
1790 * context had previously faulted
1791 */
1792 if (result && result != -EPROTO)
1793 while (i--)
1794 kgsl_drawobj_destroy(drawobj[i]);
1795
1796 kgsl_context_put(context);
1797 return result;
1798}
1799
1800long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
1801 *dev_priv, unsigned int cmd,
1802 void *data)
1803{
1804 struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
1805 struct kgsl_device *device = dev_priv->device;
1806 struct kgsl_context *context;
1807 long result = -EINVAL;
1808
1809 mutex_lock(&device->mutex);
1810 context = kgsl_context_get_owner(dev_priv, param->context_id);
1811
1812 if (context) {
1813 result = kgsl_readtimestamp(device, context,
1814 param->type, &param->timestamp);
1815
1816 trace_kgsl_readtimestamp(device, context->id,
1817 param->type, param->timestamp);
1818 }
1819
1820 kgsl_context_put(context);
1821 mutex_unlock(&device->mutex);
1822 return result;
1823}
1824
1825long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1826 unsigned int cmd, void *data)
1827{
1828 int result = 0;
1829 struct kgsl_drawctxt_create *param = data;
1830 struct kgsl_context *context = NULL;
1831 struct kgsl_device *device = dev_priv->device;
1832
1833 context = device->ftbl->drawctxt_create(dev_priv, &param->flags);
1834 if (IS_ERR(context)) {
1835 result = PTR_ERR(context);
1836 goto done;
1837 }
1838 trace_kgsl_context_create(dev_priv->device, context, param->flags);
1839
1840 /* Commit the pointer to the context in context_idr */
1841 write_lock(&device->context_lock);
1842 idr_replace(&device->context_idr, context, context->id);
Sunil Khatridd90d682017-04-06 18:28:31 +05301843 param->drawctxt_id = context->id;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001844 write_unlock(&device->context_lock);
1845
Shrenuj Bansala419c792016-10-20 14:05:11 -07001846done:
1847 return result;
1848}
1849
1850long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1851 unsigned int cmd, void *data)
1852{
1853 struct kgsl_drawctxt_destroy *param = data;
1854 struct kgsl_context *context;
1855
1856 context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
1857 if (context == NULL)
1858 return -EINVAL;
1859
1860 kgsl_context_detach(context);
1861 kgsl_context_put(context);
1862
1863 return 0;
1864}
1865
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -06001866long gpumem_free_entry(struct kgsl_mem_entry *entry)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001867{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001868 if (!kgsl_mem_entry_set_pend(entry))
1869 return -EBUSY;
1870
1871 trace_kgsl_mem_free(entry);
Archana Sriramd66ae7b2020-10-18 23:34:04 +05301872 kgsl_memfree_add(pid_nr(entry->priv->pid),
Lynus Vaz3fe67582017-11-08 15:22:32 +05301873 entry->memdesc.pagetable ?
1874 entry->memdesc.pagetable->name : 0,
1875 entry->memdesc.gpuaddr, entry->memdesc.size,
1876 entry->memdesc.flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001877
1878 kgsl_mem_entry_put(entry);
1879
1880 return 0;
1881}
1882
1883static void gpumem_free_func(struct kgsl_device *device,
1884 struct kgsl_event_group *group, void *priv, int ret)
1885{
1886 struct kgsl_context *context = group->context;
1887 struct kgsl_mem_entry *entry = priv;
1888 unsigned int timestamp;
1889
1890 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &timestamp);
1891
1892 /* Free the memory for all event types */
1893 trace_kgsl_mem_timestamp_free(device, entry, KGSL_CONTEXT_ID(context),
1894 timestamp, 0);
Archana Sriramd66ae7b2020-10-18 23:34:04 +05301895 kgsl_memfree_add(pid_nr(entry->priv->pid),
Lynus Vaz3fe67582017-11-08 15:22:32 +05301896 entry->memdesc.pagetable ?
1897 entry->memdesc.pagetable->name : 0,
1898 entry->memdesc.gpuaddr, entry->memdesc.size,
1899 entry->memdesc.flags);
1900
Shrenuj Bansala419c792016-10-20 14:05:11 -07001901 kgsl_mem_entry_put(entry);
1902}
1903
1904static long gpumem_free_entry_on_timestamp(struct kgsl_device *device,
1905 struct kgsl_mem_entry *entry,
1906 struct kgsl_context *context, unsigned int timestamp)
1907{
1908 int ret;
1909 unsigned int temp;
1910
1911 if (!kgsl_mem_entry_set_pend(entry))
1912 return -EBUSY;
1913
1914 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &temp);
1915 trace_kgsl_mem_timestamp_queue(device, entry, context->id, temp,
1916 timestamp);
1917 ret = kgsl_add_event(device, &context->events,
1918 timestamp, gpumem_free_func, entry);
1919
1920 if (ret)
1921 kgsl_mem_entry_unset_pend(entry);
1922
1923 return ret;
1924}
1925
1926long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1927 unsigned int cmd, void *data)
1928{
1929 struct kgsl_sharedmem_free *param = data;
1930 struct kgsl_process_private *private = dev_priv->process_priv;
1931 struct kgsl_mem_entry *entry;
1932 long ret;
1933
1934 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
1935 if (entry == NULL)
1936 return -EINVAL;
1937
1938 ret = gpumem_free_entry(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05301939 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001940
1941 return ret;
1942}
1943
1944long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
1945 unsigned int cmd, void *data)
1946{
1947 struct kgsl_gpumem_free_id *param = data;
1948 struct kgsl_process_private *private = dev_priv->process_priv;
1949 struct kgsl_mem_entry *entry;
1950 long ret;
1951
1952 entry = kgsl_sharedmem_find_id(private, param->id);
1953 if (entry == NULL)
1954 return -EINVAL;
1955
1956 ret = gpumem_free_entry(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05301957 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001958
1959 return ret;
1960}
1961
1962static long gpuobj_free_on_timestamp(struct kgsl_device_private *dev_priv,
1963 struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
1964{
1965 struct kgsl_gpu_event_timestamp event;
1966 struct kgsl_context *context;
1967 long ret;
1968
1969 memset(&event, 0, sizeof(event));
1970
1971 ret = _copy_from_user(&event, to_user_ptr(param->priv),
1972 sizeof(event), param->len);
1973 if (ret)
1974 return ret;
1975
1976 if (event.context_id == 0)
1977 return -EINVAL;
1978
1979 context = kgsl_context_get_owner(dev_priv, event.context_id);
1980 if (context == NULL)
1981 return -EINVAL;
1982
1983 ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry, context,
1984 event.timestamp);
1985
1986 kgsl_context_put(context);
1987 return ret;
1988}
1989
Lynus Vaz27da44d2017-07-26 13:50:10 +05301990static bool gpuobj_free_fence_func(void *priv)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001991{
1992 struct kgsl_mem_entry *entry = priv;
1993
Lynus Vaz3fe67582017-11-08 15:22:32 +05301994 trace_kgsl_mem_free(entry);
Archana Sriramd66ae7b2020-10-18 23:34:04 +05301995 kgsl_memfree_add(pid_nr(entry->priv->pid),
Lynus Vaz3fe67582017-11-08 15:22:32 +05301996 entry->memdesc.pagetable ?
1997 entry->memdesc.pagetable->name : 0,
1998 entry->memdesc.gpuaddr, entry->memdesc.size,
1999 entry->memdesc.flags);
2000
Hareesh Gundu615439d2017-06-16 17:06:57 +05302001 INIT_WORK(&entry->work, _deferred_put);
2002 queue_work(kgsl_driver.mem_workqueue, &entry->work);
Lynus Vaz27da44d2017-07-26 13:50:10 +05302003 return true;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002004}
2005
2006static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
2007 struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
2008{
Lynus Vazc031a9b2017-01-25 13:00:13 +05302009 struct kgsl_sync_fence_cb *handle;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002010 struct kgsl_gpu_event_fence event;
2011 long ret;
2012
2013 if (!kgsl_mem_entry_set_pend(entry))
2014 return -EBUSY;
2015
2016 memset(&event, 0, sizeof(event));
2017
2018 ret = _copy_from_user(&event, to_user_ptr(param->priv),
2019 sizeof(event), param->len);
2020 if (ret) {
2021 kgsl_mem_entry_unset_pend(entry);
2022 return ret;
2023 }
2024
2025 if (event.fd < 0) {
2026 kgsl_mem_entry_unset_pend(entry);
2027 return -EINVAL;
2028 }
2029
2030 handle = kgsl_sync_fence_async_wait(event.fd,
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +05302031 gpuobj_free_fence_func, entry, NULL);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002032
Shrenuj Bansala419c792016-10-20 14:05:11 -07002033 if (IS_ERR(handle)) {
2034 kgsl_mem_entry_unset_pend(entry);
2035 return PTR_ERR(handle);
2036 }
2037
Lynus Vaz3fe67582017-11-08 15:22:32 +05302038 /* if handle is NULL the fence has already signaled */
2039 if (handle == NULL)
2040 gpuobj_free_fence_func(entry);
2041
Shrenuj Bansala419c792016-10-20 14:05:11 -07002042 return 0;
2043}
2044
2045long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
2046 unsigned int cmd, void *data)
2047{
2048 struct kgsl_gpuobj_free *param = data;
2049 struct kgsl_process_private *private = dev_priv->process_priv;
2050 struct kgsl_mem_entry *entry;
2051 long ret;
2052
2053 entry = kgsl_sharedmem_find_id(private, param->id);
2054 if (entry == NULL)
2055 return -EINVAL;
2056
2057 /* If no event is specified then free immediately */
2058 if (!(param->flags & KGSL_GPUOBJ_FREE_ON_EVENT))
2059 ret = gpumem_free_entry(entry);
2060 else if (param->type == KGSL_GPU_EVENT_TIMESTAMP)
2061 ret = gpuobj_free_on_timestamp(dev_priv, entry, param);
2062 else if (param->type == KGSL_GPU_EVENT_FENCE)
2063 ret = gpuobj_free_on_fence(dev_priv, entry, param);
2064 else
2065 ret = -EINVAL;
2066
Hareesh Gundu615439d2017-06-16 17:06:57 +05302067 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002068 return ret;
2069}
2070
2071long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
2072 struct kgsl_device_private *dev_priv,
2073 unsigned int cmd, void *data)
2074{
2075 struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
2076 struct kgsl_context *context = NULL;
2077 struct kgsl_mem_entry *entry;
2078 long ret = -EINVAL;
2079
2080 if (param->type != KGSL_TIMESTAMP_RETIRED)
2081 return -EINVAL;
2082
2083 context = kgsl_context_get_owner(dev_priv, param->context_id);
2084 if (context == NULL)
2085 return -EINVAL;
2086
2087 entry = kgsl_sharedmem_find(dev_priv->process_priv,
2088 (uint64_t) param->gpuaddr);
2089 if (entry == NULL) {
2090 kgsl_context_put(context);
2091 return -EINVAL;
2092 }
2093
2094 ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry,
2095 context, param->timestamp);
2096
2097 kgsl_mem_entry_put(entry);
2098 kgsl_context_put(context);
2099
2100 return ret;
2101}
2102
Shrenuj Bansala419c792016-10-20 14:05:11 -07002103static int check_vma_flags(struct vm_area_struct *vma,
2104 unsigned int flags)
2105{
2106 unsigned long flags_requested = (VM_READ | VM_WRITE);
2107
2108 if (flags & KGSL_MEMFLAGS_GPUREADONLY)
Lynus Vazeb7af682017-04-17 18:36:01 +05302109 flags_requested &= ~(unsigned long)VM_WRITE;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002110
2111 if ((vma->vm_flags & flags_requested) == flags_requested)
2112 return 0;
2113
2114 return -EFAULT;
2115}
2116
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002117static int check_vma(unsigned long hostptr, u64 size)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002118{
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002119 struct vm_area_struct *vma;
2120 unsigned long cur = hostptr;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002121
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002122 while (cur < (hostptr + size)) {
2123 vma = find_vma(current->mm, cur);
2124 if (!vma)
2125 return false;
2126
2127 /* Don't remap memory that we already own */
2128 if (vma->vm_file && vma->vm_file->f_op == &kgsl_fops)
2129 return false;
2130
2131 cur = vma->vm_end;
2132 }
2133
2134 return true;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002135}
2136
Jordan Crouse6bce65c2020-12-28 16:06:42 +05302137static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002138{
2139 int ret = 0;
2140 long npages = 0, i;
2141 size_t sglen = (size_t) (memdesc->size / PAGE_SIZE);
2142 struct page **pages = NULL;
2143 int write = ((memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 0 :
2144 FOLL_WRITE);
2145
2146 if (sglen == 0 || sglen >= LONG_MAX)
2147 return -EINVAL;
2148
2149 pages = kgsl_malloc(sglen * sizeof(struct page *));
2150 if (pages == NULL)
2151 return -ENOMEM;
2152
2153 memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
2154 if (memdesc->sgt == NULL) {
2155 ret = -ENOMEM;
2156 goto out;
2157 }
2158
2159 down_read(&current->mm->mmap_sem);
Jordan Crouse6bce65c2020-12-28 16:06:42 +05302160 if (!check_vma(useraddr, memdesc->size)) {
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002161 up_read(&current->mm->mmap_sem);
2162 ret = -EFAULT;
2163 goto out;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002164 }
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002165
Jordan Crouse6bce65c2020-12-28 16:06:42 +05302166 npages = get_user_pages(useraddr,
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002167 sglen, write, pages, NULL);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002168 up_read(&current->mm->mmap_sem);
2169
Jordan Crouse9ea6cad2020-09-24 09:05:53 -06002170 ret = (npages < 0) ? (int)npages : 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002171 if (ret)
2172 goto out;
2173
2174 if ((unsigned long) npages != sglen) {
2175 ret = -EINVAL;
2176 goto out;
2177 }
2178
2179 ret = sg_alloc_table_from_pages(memdesc->sgt, pages, npages,
2180 0, memdesc->size, GFP_KERNEL);
2181out:
2182 if (ret) {
2183 for (i = 0; i < npages; i++)
2184 put_page(pages[i]);
2185
2186 kfree(memdesc->sgt);
2187 memdesc->sgt = NULL;
2188 }
2189 kgsl_free(pages);
2190 return ret;
2191}
2192
2193static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
2194 struct kgsl_mem_entry *entry, unsigned long hostptr,
2195 size_t offset, size_t size)
2196{
2197 /* Map an anonymous memory chunk */
2198
Neeraja Pb8350672021-01-07 20:48:09 +05302199 int ret;
2200
Shrenuj Bansala419c792016-10-20 14:05:11 -07002201 if (size == 0 || offset != 0 ||
2202 !IS_ALIGNED(size, PAGE_SIZE))
2203 return -EINVAL;
2204
2205 entry->memdesc.pagetable = pagetable;
2206 entry->memdesc.size = (uint64_t) size;
Lynus Vazeb7af682017-04-17 18:36:01 +05302207 entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002208
2209 if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07002210
2211 /* Register the address in the database */
2212 ret = kgsl_mmu_set_svm_region(pagetable,
Jordan Crouse6bce65c2020-12-28 16:06:42 +05302213 (uint64_t) hostptr, (uint64_t) size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002214
2215 if (ret)
2216 return ret;
2217
Jordan Crouse6bce65c2020-12-28 16:06:42 +05302218 entry->memdesc.gpuaddr = (uint64_t) hostptr;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002219 }
2220
Neeraja Pb8350672021-01-07 20:48:09 +05302221 ret = memdesc_sg_virt(&entry->memdesc, hostptr);
2222
2223 if (ret && kgsl_memdesc_use_cpu_map(&entry->memdesc))
2224 kgsl_mmu_put_gpuaddr(&entry->memdesc);
2225
2226 return ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002227}
2228
2229#ifdef CONFIG_DMA_SHARED_BUFFER
2230static int match_file(const void *p, struct file *file, unsigned int fd)
2231{
2232 /*
2233 * We must return fd + 1 because iterate_fd stops searching on
2234 * non-zero return, but 0 is a valid fd.
2235 */
2236 return (p == file) ? (fd + 1) : 0;
2237}
2238
2239static void _setup_cache_mode(struct kgsl_mem_entry *entry,
2240 struct vm_area_struct *vma)
2241{
Lynus Vazeb7af682017-04-17 18:36:01 +05302242 uint64_t mode;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002243 pgprot_t pgprot = vma->vm_page_prot;
2244
2245 if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot)))
2246 mode = KGSL_CACHEMODE_UNCACHED;
2247 else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(pgprot)))
2248 mode = KGSL_CACHEMODE_WRITECOMBINE;
2249 else
2250 mode = KGSL_CACHEMODE_WRITEBACK;
2251
2252 entry->memdesc.flags |= (mode << KGSL_CACHEMODE_SHIFT);
2253}
2254
2255static int kgsl_setup_dma_buf(struct kgsl_device *device,
2256 struct kgsl_pagetable *pagetable,
2257 struct kgsl_mem_entry *entry,
2258 struct dma_buf *dmabuf);
2259
2260static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
2261 struct kgsl_pagetable *pagetable,
2262 struct kgsl_mem_entry *entry, unsigned long hostptr)
2263{
2264 struct vm_area_struct *vma;
2265 struct dma_buf *dmabuf = NULL;
2266 int ret;
2267
2268 /*
2269 * Find the VMA containing this pointer and figure out if it
2270 * is a dma-buf.
2271 */
2272 down_read(&current->mm->mmap_sem);
2273 vma = find_vma(current->mm, hostptr);
2274
2275 if (vma && vma->vm_file) {
2276 int fd;
2277
2278 ret = check_vma_flags(vma, entry->memdesc.flags);
2279 if (ret) {
2280 up_read(&current->mm->mmap_sem);
2281 return ret;
2282 }
2283
2284 /*
2285 * Check to see that this isn't our own memory that we have
2286 * already mapped
2287 */
2288 if (vma->vm_file->f_op == &kgsl_fops) {
2289 up_read(&current->mm->mmap_sem);
2290 return -EFAULT;
2291 }
2292
2293 /* Look for the fd that matches this the vma file */
2294 fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
2295 if (fd != 0)
2296 dmabuf = dma_buf_get(fd - 1);
2297 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07002298
Sunil Khatri51a79372017-07-06 15:09:35 +05302299 if (IS_ERR_OR_NULL(dmabuf)) {
2300 up_read(&current->mm->mmap_sem);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002301 return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
Sunil Khatri51a79372017-07-06 15:09:35 +05302302 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07002303
2304 ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
2305 if (ret) {
2306 dma_buf_put(dmabuf);
Sunil Khatri51a79372017-07-06 15:09:35 +05302307 up_read(&current->mm->mmap_sem);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002308 return ret;
2309 }
2310
Jordan Crouse6bce65c2020-12-28 16:06:42 +05302311 /* Setup the cache mode for cache operations */
Shrenuj Bansala419c792016-10-20 14:05:11 -07002312 _setup_cache_mode(entry, vma);
Sunil Khatri51a79372017-07-06 15:09:35 +05302313 up_read(&current->mm->mmap_sem);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002314 return 0;
2315}
2316#else
2317static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
2318 struct kgsl_pagetable *pagetable,
2319 struct kgsl_mem_entry *entry, unsigned long hostptr)
2320{
2321 return -ENODEV;
2322}
2323#endif
2324
2325static int kgsl_setup_useraddr(struct kgsl_device *device,
2326 struct kgsl_pagetable *pagetable,
2327 struct kgsl_mem_entry *entry,
2328 unsigned long hostptr, size_t offset, size_t size)
2329{
2330 int ret;
2331
2332 if (hostptr == 0 || !IS_ALIGNED(hostptr, PAGE_SIZE))
2333 return -EINVAL;
2334
2335 /* Try to set up a dmabuf - if it returns -ENODEV assume anonymous */
2336 ret = kgsl_setup_dmabuf_useraddr(device, pagetable, entry, hostptr);
2337 if (ret != -ENODEV)
2338 return ret;
2339
2340 /* Okay - lets go legacy */
2341 return kgsl_setup_anon_useraddr(pagetable, entry,
2342 hostptr, offset, size);
2343}
2344
2345static long _gpuobj_map_useraddr(struct kgsl_device *device,
2346 struct kgsl_pagetable *pagetable,
2347 struct kgsl_mem_entry *entry,
2348 struct kgsl_gpuobj_import *param)
2349{
Archana Obannagarice60fec2017-09-08 20:35:28 +05302350 struct kgsl_gpuobj_import_useraddr useraddr = {0};
Shrenuj Bansala419c792016-10-20 14:05:11 -07002351 int ret;
2352
2353 param->flags &= KGSL_MEMFLAGS_GPUREADONLY
2354 | KGSL_CACHEMODE_MASK
2355 | KGSL_MEMTYPE_MASK
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07002356 | KGSL_MEMFLAGS_FORCE_32BIT
2357 | KGSL_MEMFLAGS_IOCOHERENT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002358
2359 /* Specifying SECURE is an explicit error */
2360 if (param->flags & KGSL_MEMFLAGS_SECURE)
2361 return -ENOTSUPP;
2362
2363 ret = _copy_from_user(&useraddr,
2364 to_user_ptr(param->priv), sizeof(useraddr),
2365 param->priv_len);
2366 if (ret)
2367 return ret;
2368
2369 /* Verify that the virtaddr and len are within bounds */
2370 if (useraddr.virtaddr > ULONG_MAX)
2371 return -EINVAL;
2372
2373 return kgsl_setup_useraddr(device, pagetable, entry,
2374 (unsigned long) useraddr.virtaddr, 0, param->priv_len);
2375}
2376
2377#ifdef CONFIG_DMA_SHARED_BUFFER
2378static long _gpuobj_map_dma_buf(struct kgsl_device *device,
2379 struct kgsl_pagetable *pagetable,
2380 struct kgsl_mem_entry *entry,
2381 struct kgsl_gpuobj_import *param,
2382 int *fd)
2383{
2384 struct kgsl_gpuobj_import_dma_buf buf;
2385 struct dma_buf *dmabuf;
2386 int ret;
2387
2388 /*
2389 * If content protection is not enabled and secure buffer
2390 * is requested to be mapped return error.
2391 */
2392 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
2393 if (!kgsl_mmu_is_secured(&device->mmu)) {
2394 dev_WARN_ONCE(device->dev, 1,
2395 "Secure buffer not supported");
2396 return -ENOTSUPP;
2397 }
2398
2399 entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
2400 }
2401
2402 ret = _copy_from_user(&buf, to_user_ptr(param->priv),
2403 sizeof(buf), param->priv_len);
2404 if (ret)
2405 return ret;
2406
2407 if (buf.fd < 0)
2408 return -EINVAL;
2409
2410 *fd = buf.fd;
2411 dmabuf = dma_buf_get(buf.fd);
2412
2413 if (IS_ERR_OR_NULL(dmabuf))
2414 return (dmabuf == NULL) ? -EINVAL : PTR_ERR(dmabuf);
2415
2416 ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
2417 if (ret)
2418 dma_buf_put(dmabuf);
2419
2420 return ret;
2421}
2422#else
2423static long _gpuobj_map_dma_buf(struct kgsl_device *device,
2424 struct kgsl_pagetable *pagetable,
2425 struct kgsl_mem_entry *entry,
2426 struct kgsl_gpuobj_import *param,
2427 int *fd)
2428{
2429 return -EINVAL;
2430}
2431#endif
2432
2433long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
2434 unsigned int cmd, void *data)
2435{
2436 struct kgsl_process_private *private = dev_priv->process_priv;
2437 struct kgsl_gpuobj_import *param = data;
2438 struct kgsl_mem_entry *entry;
2439 int ret, fd = -1;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002440
2441 entry = kgsl_mem_entry_create();
2442 if (entry == NULL)
2443 return -ENOMEM;
2444
2445 param->flags &= KGSL_MEMFLAGS_GPUREADONLY
2446 | KGSL_MEMTYPE_MASK
2447 | KGSL_MEMALIGN_MASK
2448 | KGSL_MEMFLAGS_USE_CPU_MAP
2449 | KGSL_MEMFLAGS_SECURE
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07002450 | KGSL_MEMFLAGS_FORCE_32BIT
2451 | KGSL_MEMFLAGS_IOCOHERENT;
2452
Deepak Kumarcf056d12018-04-17 15:59:42 +05302453 if (kgsl_is_compat_task())
2454 param->flags |= KGSL_MEMFLAGS_FORCE_32BIT;
2455
Lynus Vaz90d98b52018-04-09 14:45:36 +05302456 kgsl_memdesc_init(dev_priv->device, &entry->memdesc, param->flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002457 if (param->type == KGSL_USER_MEM_TYPE_ADDR)
2458 ret = _gpuobj_map_useraddr(dev_priv->device, private->pagetable,
2459 entry, param);
2460 else if (param->type == KGSL_USER_MEM_TYPE_DMABUF)
2461 ret = _gpuobj_map_dma_buf(dev_priv->device, private->pagetable,
2462 entry, param, &fd);
2463 else
2464 ret = -ENOTSUPP;
2465
2466 if (ret)
2467 goto out;
2468
2469 if (entry->memdesc.size >= SZ_1M)
2470 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
2471 else if (entry->memdesc.size >= SZ_64K)
2472 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64K));
2473
2474 param->flags = entry->memdesc.flags;
2475
2476 ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
2477 if (ret)
2478 goto unmap;
2479
2480 param->id = entry->id;
2481
2482 KGSL_STATS_ADD(entry->memdesc.size, &kgsl_driver.stats.mapped,
2483 &kgsl_driver.stats.mapped_max);
2484
2485 kgsl_process_add_stats(private,
2486 kgsl_memdesc_usermem_type(&entry->memdesc),
2487 entry->memdesc.size);
2488
2489 trace_kgsl_mem_map(entry, fd);
2490
2491 kgsl_mem_entry_commit_process(entry);
Tarun Karra24d3fe12017-04-05 15:23:03 -07002492
2493 /* Put the extra ref from kgsl_mem_entry_create() */
2494 kgsl_mem_entry_put(entry);
2495
Shrenuj Bansala419c792016-10-20 14:05:11 -07002496 return 0;
2497
2498unmap:
Kamal Agrawal8f0fb822020-08-19 10:25:15 +05302499 if (kgsl_memdesc_usermem_type(&entry->memdesc) == KGSL_MEM_ENTRY_ION) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07002500 kgsl_destroy_ion(entry->priv_data);
2501 entry->memdesc.sgt = NULL;
2502 }
2503
2504 kgsl_sharedmem_free(&entry->memdesc);
2505
2506out:
2507 kfree(entry);
2508 return ret;
2509}
2510
2511static long _map_usermem_addr(struct kgsl_device *device,
2512 struct kgsl_pagetable *pagetable, struct kgsl_mem_entry *entry,
2513 unsigned long hostptr, size_t offset, size_t size)
2514{
2515 if (!MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED))
2516 return -EINVAL;
2517
2518 /* No CPU mapped buffer could ever be secure */
2519 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
2520 return -EINVAL;
2521
2522 return kgsl_setup_useraddr(device, pagetable, entry, hostptr,
2523 offset, size);
2524}
2525
2526#ifdef CONFIG_DMA_SHARED_BUFFER
2527static int _map_usermem_dma_buf(struct kgsl_device *device,
2528 struct kgsl_pagetable *pagetable,
2529 struct kgsl_mem_entry *entry,
2530 unsigned int fd)
2531{
2532 int ret;
2533 struct dma_buf *dmabuf;
2534
2535 /*
2536 * If content protection is not enabled and secure buffer
2537 * is requested to be mapped return error.
2538 */
2539
2540 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
2541 if (!kgsl_mmu_is_secured(&device->mmu)) {
2542 dev_WARN_ONCE(device->dev, 1,
2543 "Secure buffer not supported");
2544 return -EINVAL;
2545 }
2546
2547 entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
2548 }
2549
2550 dmabuf = dma_buf_get(fd);
2551 if (IS_ERR_OR_NULL(dmabuf)) {
2552 ret = PTR_ERR(dmabuf);
2553 return ret ? ret : -EINVAL;
2554 }
2555 ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
2556 if (ret)
2557 dma_buf_put(dmabuf);
2558 return ret;
2559}
2560#else
2561static int _map_usermem_dma_buf(struct kgsl_device *device,
2562 struct kgsl_pagetable *pagetable,
2563 struct kgsl_mem_entry *entry,
2564 unsigned int fd)
2565{
2566 return -EINVAL;
2567}
2568#endif
2569
2570#ifdef CONFIG_DMA_SHARED_BUFFER
2571static int kgsl_setup_dma_buf(struct kgsl_device *device,
2572 struct kgsl_pagetable *pagetable,
2573 struct kgsl_mem_entry *entry,
2574 struct dma_buf *dmabuf)
2575{
2576 int ret = 0;
2577 struct scatterlist *s;
2578 struct sg_table *sg_table;
2579 struct dma_buf_attachment *attach = NULL;
2580 struct kgsl_dma_buf_meta *meta;
2581
2582 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
2583 if (!meta)
2584 return -ENOMEM;
2585
2586 attach = dma_buf_attach(dmabuf, device->dev);
2587 if (IS_ERR_OR_NULL(attach)) {
2588 ret = attach ? PTR_ERR(attach) : -EINVAL;
2589 goto out;
2590 }
2591
2592 meta->dmabuf = dmabuf;
2593 meta->attach = attach;
2594
2595 attach->priv = entry;
2596
2597 entry->priv_data = meta;
2598 entry->memdesc.pagetable = pagetable;
2599 entry->memdesc.size = 0;
2600 /* USE_CPU_MAP is not impemented for ION. */
2601 entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
Lynus Vazeb7af682017-04-17 18:36:01 +05302602 entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ION;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002603
2604 sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
2605
2606 if (IS_ERR_OR_NULL(sg_table)) {
2607 ret = PTR_ERR(sg_table);
2608 goto out;
2609 }
2610
2611 meta->table = sg_table;
2612 entry->priv_data = meta;
2613 entry->memdesc.sgt = sg_table;
2614
2615 /* Calculate the size of the memdesc from the sglist */
2616 for (s = entry->memdesc.sgt->sgl; s != NULL; s = sg_next(s)) {
2617 int priv = (entry->memdesc.priv & KGSL_MEMDESC_SECURE) ? 1 : 0;
2618
2619 /*
2620 * Check that each chunk of of the sg table matches the secure
2621 * flag.
2622 */
2623
2624 if (PagePrivate(sg_page(s)) != priv) {
2625 ret = -EPERM;
2626 goto out;
2627 }
2628
2629 entry->memdesc.size += (uint64_t) s->length;
2630 }
2631
2632 entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size);
2633
2634out:
2635 if (ret) {
2636 if (!IS_ERR_OR_NULL(attach))
2637 dma_buf_detach(dmabuf, attach);
2638
2639
2640 kfree(meta);
2641 }
2642
2643 return ret;
2644}
2645#endif
2646
2647#ifdef CONFIG_DMA_SHARED_BUFFER
2648void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
2649 int *egl_surface_count, int *egl_image_count)
2650{
2651 struct kgsl_dma_buf_meta *meta = entry->priv_data;
2652 struct dma_buf *dmabuf = meta->dmabuf;
2653 struct dma_buf_attachment *mem_entry_buf_attachment = meta->attach;
2654 struct device *buf_attachment_dev = mem_entry_buf_attachment->dev;
2655 struct dma_buf_attachment *attachment = NULL;
2656
2657 mutex_lock(&dmabuf->lock);
2658 list_for_each_entry(attachment, &dmabuf->attachments, node) {
2659 struct kgsl_mem_entry *scan_mem_entry = NULL;
2660
2661 if (attachment->dev != buf_attachment_dev)
2662 continue;
2663
2664 scan_mem_entry = attachment->priv;
2665 if (!scan_mem_entry)
2666 continue;
2667
2668 switch (kgsl_memdesc_get_memtype(&scan_mem_entry->memdesc)) {
2669 case KGSL_MEMTYPE_EGL_SURFACE:
2670 (*egl_surface_count)++;
2671 break;
2672 case KGSL_MEMTYPE_EGL_IMAGE:
2673 (*egl_image_count)++;
2674 break;
2675 }
2676 }
2677 mutex_unlock(&dmabuf->lock);
2678}
2679#else
2680void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
2681 int *egl_surface_count, int *egl_image_count)
2682{
2683}
2684#endif
2685
2686long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
2687 unsigned int cmd, void *data)
2688{
2689 int result = -EINVAL;
2690 struct kgsl_map_user_mem *param = data;
2691 struct kgsl_mem_entry *entry = NULL;
2692 struct kgsl_process_private *private = dev_priv->process_priv;
2693 struct kgsl_mmu *mmu = &dev_priv->device->mmu;
2694 unsigned int memtype;
Lynus Vaz90d98b52018-04-09 14:45:36 +05302695 uint64_t flags;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002696
2697 /*
2698 * If content protection is not enabled and secure buffer
2699 * is requested to be mapped return error.
2700 */
2701
2702 if (param->flags & KGSL_MEMFLAGS_SECURE) {
2703 /* Log message and return if context protection isn't enabled */
2704 if (!kgsl_mmu_is_secured(mmu)) {
2705 dev_WARN_ONCE(dev_priv->device->dev, 1,
2706 "Secure buffer not supported");
2707 return -EOPNOTSUPP;
2708 }
2709
2710 /* Can't use CPU map with secure buffers */
2711 if (param->flags & KGSL_MEMFLAGS_USE_CPU_MAP)
2712 return -EINVAL;
2713 }
2714
2715 entry = kgsl_mem_entry_create();
2716
2717 if (entry == NULL)
2718 return -ENOMEM;
2719
2720 /*
2721 * Convert from enum value to KGSL_MEM_ENTRY value, so that
2722 * we can use the latter consistently everywhere.
2723 */
2724 memtype = param->memtype + 1;
2725
2726 /*
2727 * Mask off unknown flags from userspace. This way the caller can
2728 * check if a flag is supported by looking at the returned flags.
2729 * Note: CACHEMODE is ignored for this call. Caching should be
2730 * determined by type of allocation being mapped.
2731 */
Lynus Vaz90d98b52018-04-09 14:45:36 +05302732 flags = param->flags & (KGSL_MEMFLAGS_GPUREADONLY
2733 | KGSL_MEMTYPE_MASK
2734 | KGSL_MEMALIGN_MASK
2735 | KGSL_MEMFLAGS_USE_CPU_MAP
2736 | KGSL_MEMFLAGS_SECURE
2737 | KGSL_MEMFLAGS_IOCOHERENT);
Deepak Kumarcf056d12018-04-17 15:59:42 +05302738
2739 if (kgsl_is_compat_task())
Lynus Vaz90d98b52018-04-09 14:45:36 +05302740 flags |= KGSL_MEMFLAGS_FORCE_32BIT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002741
Lynus Vaz90d98b52018-04-09 14:45:36 +05302742 kgsl_memdesc_init(dev_priv->device, &entry->memdesc, flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002743
2744 switch (memtype) {
2745 case KGSL_MEM_ENTRY_USER:
2746 result = _map_usermem_addr(dev_priv->device, private->pagetable,
2747 entry, param->hostptr, param->offset, param->len);
2748 break;
2749 case KGSL_MEM_ENTRY_ION:
2750 if (param->offset != 0)
2751 result = -EINVAL;
2752 else
2753 result = _map_usermem_dma_buf(dev_priv->device,
2754 private->pagetable, entry, param->fd);
2755 break;
2756 default:
2757 result = -EOPNOTSUPP;
2758 break;
2759 }
2760
2761 if (result)
2762 goto error;
2763
2764 if ((param->flags & KGSL_MEMFLAGS_SECURE) &&
2765 (entry->memdesc.size & mmu->secure_align_mask)) {
2766 result = -EINVAL;
2767 goto error_attach;
2768 }
2769
2770 if (entry->memdesc.size >= SZ_2M)
2771 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_2M));
2772 else if (entry->memdesc.size >= SZ_1M)
2773 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
2774 else if (entry->memdesc.size >= SZ_64K)
2775 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
2776
2777 /* echo back flags */
2778 param->flags = (unsigned int) entry->memdesc.flags;
2779
2780 result = kgsl_mem_entry_attach_process(dev_priv->device, private,
2781 entry);
2782 if (result)
2783 goto error_attach;
2784
2785 /* Adjust the returned value for a non 4k aligned offset */
2786 param->gpuaddr = (unsigned long)
2787 entry->memdesc.gpuaddr + (param->offset & PAGE_MASK);
2788
2789 KGSL_STATS_ADD(param->len, &kgsl_driver.stats.mapped,
2790 &kgsl_driver.stats.mapped_max);
2791
2792 kgsl_process_add_stats(private,
2793 kgsl_memdesc_usermem_type(&entry->memdesc), param->len);
2794
2795 trace_kgsl_mem_map(entry, param->fd);
2796
2797 kgsl_mem_entry_commit_process(entry);
Tarun Karra24d3fe12017-04-05 15:23:03 -07002798
2799 /* Put the extra ref from kgsl_mem_entry_create() */
2800 kgsl_mem_entry_put(entry);
2801
Shrenuj Bansala419c792016-10-20 14:05:11 -07002802 return result;
2803
2804error_attach:
Kamal Agrawal8f0fb822020-08-19 10:25:15 +05302805 switch (kgsl_memdesc_usermem_type(&entry->memdesc)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07002806 case KGSL_MEM_ENTRY_ION:
2807 kgsl_destroy_ion(entry->priv_data);
2808 entry->memdesc.sgt = NULL;
2809 break;
2810 default:
2811 break;
2812 }
2813 kgsl_sharedmem_free(&entry->memdesc);
2814error:
2815 /* Clear gpuaddr here so userspace doesn't get any wrong ideas */
2816 param->gpuaddr = 0;
2817
2818 kfree(entry);
2819 return result;
2820}
2821
2822static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
2823 uint64_t offset, uint64_t length, unsigned int op)
2824{
2825 int ret = 0;
2826 int cacheop;
2827 int mode;
2828
Akhil P Oommen4323d4ca2017-06-21 12:54:18 +05302829 /* Cache ops are not allowed on secure memory */
2830 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
2831 return 0;
2832
Shrenuj Bansala419c792016-10-20 14:05:11 -07002833 /*
2834 * Flush is defined as (clean | invalidate). If both bits are set, then
2835 * do a flush, otherwise check for the individual bits and clean or inv
2836 * as requested
2837 */
2838
2839 if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
2840 cacheop = KGSL_CACHE_OP_FLUSH;
2841 else if (op & KGSL_GPUMEM_CACHE_CLEAN)
2842 cacheop = KGSL_CACHE_OP_CLEAN;
2843 else if (op & KGSL_GPUMEM_CACHE_INV)
2844 cacheop = KGSL_CACHE_OP_INV;
2845 else {
2846 ret = -EINVAL;
2847 goto done;
2848 }
2849
2850 if (!(op & KGSL_GPUMEM_CACHE_RANGE)) {
2851 offset = 0;
2852 length = entry->memdesc.size;
2853 }
2854
2855 mode = kgsl_memdesc_get_cachemode(&entry->memdesc);
2856 if (mode != KGSL_CACHEMODE_UNCACHED
2857 && mode != KGSL_CACHEMODE_WRITECOMBINE) {
2858 trace_kgsl_mem_sync_cache(entry, offset, length, op);
2859 ret = kgsl_cache_range_op(&entry->memdesc, offset,
2860 length, cacheop);
2861 }
2862
2863done:
2864 return ret;
2865}
2866
2867/* New cache sync function - supports both directions (clean and invalidate) */
2868
2869long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
2870 unsigned int cmd, void *data)
2871{
2872 struct kgsl_gpumem_sync_cache *param = data;
2873 struct kgsl_process_private *private = dev_priv->process_priv;
2874 struct kgsl_mem_entry *entry = NULL;
2875 long ret;
2876
2877 if (param->id != 0)
2878 entry = kgsl_sharedmem_find_id(private, param->id);
2879 else if (param->gpuaddr != 0)
2880 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
2881
2882 if (entry == NULL)
2883 return -EINVAL;
2884
2885 ret = _kgsl_gpumem_sync_cache(entry, (uint64_t) param->offset,
2886 (uint64_t) param->length, param->op);
2887 kgsl_mem_entry_put(entry);
2888 return ret;
2889}
2890
2891static int mem_id_cmp(const void *_a, const void *_b)
2892{
2893 const unsigned int *a = _a, *b = _b;
2894
2895 if (*a == *b)
2896 return 0;
2897 return (*a > *b) ? 1 : -1;
2898}
2899
2900#ifdef CONFIG_ARM64
2901/* Do not support full flush on ARM64 targets */
2902static inline bool check_full_flush(size_t size, int op)
2903{
2904 return false;
2905}
2906#else
2907/* Support full flush if the size is bigger than the threshold */
2908static inline bool check_full_flush(size_t size, int op)
2909{
2910 /* If we exceed the breakeven point, flush the entire cache */
2911 bool ret = (kgsl_driver.full_cache_threshold != 0) &&
2912 (size >= kgsl_driver.full_cache_threshold) &&
2913 (op == KGSL_GPUMEM_CACHE_FLUSH);
Maria Yuceafc602017-09-26 15:45:02 +08002914 if (ret)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002915 flush_cache_all();
Shrenuj Bansala419c792016-10-20 14:05:11 -07002916 return ret;
2917}
2918#endif
2919
2920long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
2921 unsigned int cmd, void *data)
2922{
2923 int i;
2924 struct kgsl_gpumem_sync_cache_bulk *param = data;
2925 struct kgsl_process_private *private = dev_priv->process_priv;
2926 unsigned int id, last_id = 0, *id_list = NULL, actual_count = 0;
2927 struct kgsl_mem_entry **entries = NULL;
2928 long ret = 0;
2929 uint64_t op_size = 0;
2930 bool full_flush = false;
2931
2932 if (param->id_list == NULL || param->count == 0
2933 || param->count > (PAGE_SIZE / sizeof(unsigned int)))
2934 return -EINVAL;
2935
2936 id_list = kcalloc(param->count, sizeof(unsigned int), GFP_KERNEL);
2937 if (id_list == NULL)
2938 return -ENOMEM;
2939
2940 entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
2941 if (entries == NULL) {
2942 ret = -ENOMEM;
2943 goto end;
2944 }
2945
2946 if (copy_from_user(id_list, param->id_list,
2947 param->count * sizeof(unsigned int))) {
2948 ret = -EFAULT;
2949 goto end;
2950 }
2951 /* sort the ids so we can weed out duplicates */
2952 sort(id_list, param->count, sizeof(*id_list), mem_id_cmp, NULL);
2953
2954 for (i = 0; i < param->count; i++) {
2955 unsigned int cachemode;
2956 struct kgsl_mem_entry *entry = NULL;
2957
2958 id = id_list[i];
2959 /* skip 0 ids or duplicates */
2960 if (id == last_id)
2961 continue;
2962
2963 entry = kgsl_sharedmem_find_id(private, id);
2964 if (entry == NULL)
2965 continue;
2966
2967 /* skip uncached memory */
2968 cachemode = kgsl_memdesc_get_cachemode(&entry->memdesc);
2969 if (cachemode != KGSL_CACHEMODE_WRITETHROUGH &&
2970 cachemode != KGSL_CACHEMODE_WRITEBACK) {
2971 kgsl_mem_entry_put(entry);
2972 continue;
2973 }
2974
2975 op_size += entry->memdesc.size;
2976 entries[actual_count++] = entry;
2977
2978 full_flush = check_full_flush(op_size, param->op);
Maria Yuceafc602017-09-26 15:45:02 +08002979 if (full_flush) {
2980 trace_kgsl_mem_sync_full_cache(actual_count, op_size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002981 break;
Maria Yuceafc602017-09-26 15:45:02 +08002982 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07002983
2984 last_id = id;
2985 }
2986
2987 param->op &= ~KGSL_GPUMEM_CACHE_RANGE;
2988
2989 for (i = 0; i < actual_count; i++) {
2990 if (!full_flush)
2991 _kgsl_gpumem_sync_cache(entries[i], 0,
2992 entries[i]->memdesc.size,
2993 param->op);
2994 kgsl_mem_entry_put(entries[i]);
2995 }
2996end:
2997 kfree(entries);
2998 kfree(id_list);
2999 return ret;
3000}
3001
3002/* Legacy cache function, does a flush (clean + invalidate) */
3003
3004long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
3005 unsigned int cmd, void *data)
3006{
3007 struct kgsl_sharedmem_free *param = data;
3008 struct kgsl_process_private *private = dev_priv->process_priv;
3009 struct kgsl_mem_entry *entry = NULL;
3010 long ret;
3011
3012 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
3013 if (entry == NULL)
3014 return -EINVAL;
3015
3016 ret = _kgsl_gpumem_sync_cache(entry, 0, entry->memdesc.size,
3017 KGSL_GPUMEM_CACHE_FLUSH);
3018 kgsl_mem_entry_put(entry);
3019 return ret;
3020}
3021
3022long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
3023 unsigned int cmd, void *data)
3024{
3025 struct kgsl_process_private *private = dev_priv->process_priv;
3026 struct kgsl_gpuobj_sync *param = data;
3027 struct kgsl_gpuobj_sync_obj *objs;
3028 struct kgsl_mem_entry **entries;
3029 long ret = 0;
3030 bool full_flush = false;
3031 uint64_t size = 0;
Carter Cooper69355b82018-01-17 09:49:00 -07003032 int i;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003033 void __user *ptr;
3034
3035 if (param->count == 0 || param->count > 128)
3036 return -EINVAL;
3037
3038 objs = kcalloc(param->count, sizeof(*objs), GFP_KERNEL);
3039 if (objs == NULL)
3040 return -ENOMEM;
3041
3042 entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
3043 if (entries == NULL) {
Carter Cooper69355b82018-01-17 09:49:00 -07003044 kfree(objs);
3045 return -ENOMEM;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003046 }
3047
3048 ptr = to_user_ptr(param->objs);
3049
3050 for (i = 0; i < param->count; i++) {
3051 ret = _copy_from_user(&objs[i], ptr, sizeof(*objs),
3052 param->obj_len);
3053 if (ret)
3054 goto out;
3055
3056 entries[i] = kgsl_sharedmem_find_id(private, objs[i].id);
3057
3058 /* Not finding the ID is not a fatal failure - just skip it */
3059 if (entries[i] == NULL)
3060 continue;
3061
Shrenuj Bansala419c792016-10-20 14:05:11 -07003062 if (!(objs[i].op & KGSL_GPUMEM_CACHE_RANGE))
3063 size += entries[i]->memdesc.size;
3064 else if (objs[i].offset < entries[i]->memdesc.size)
3065 size += (entries[i]->memdesc.size - objs[i].offset);
3066
3067 full_flush = check_full_flush(size, objs[i].op);
Maria Yuceafc602017-09-26 15:45:02 +08003068 if (full_flush) {
3069 trace_kgsl_mem_sync_full_cache(i, size);
Carter Cooper69355b82018-01-17 09:49:00 -07003070 goto out;
Maria Yuceafc602017-09-26 15:45:02 +08003071 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07003072
3073 ptr += sizeof(*objs);
3074 }
3075
Carter Cooper69355b82018-01-17 09:49:00 -07003076 for (i = 0; !ret && i < param->count; i++)
3077 if (entries[i])
3078 ret = _kgsl_gpumem_sync_cache(entries[i],
3079 objs[i].offset, objs[i].length,
3080 objs[i].op);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003081
Carter Cooper69355b82018-01-17 09:49:00 -07003082out:
Shrenuj Bansala419c792016-10-20 14:05:11 -07003083 for (i = 0; i < param->count; i++)
3084 if (entries[i])
3085 kgsl_mem_entry_put(entries[i]);
3086
Shrenuj Bansala419c792016-10-20 14:05:11 -07003087 kfree(entries);
3088 kfree(objs);
3089
3090 return ret;
3091}
3092
3093#ifdef CONFIG_ARM64
3094static uint64_t kgsl_filter_cachemode(uint64_t flags)
3095{
3096 /*
3097 * WRITETHROUGH is not supported in arm64, so we tell the user that we
3098 * use WRITEBACK which is the default caching policy.
3099 */
3100 if ((flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT ==
3101 KGSL_CACHEMODE_WRITETHROUGH) {
3102 flags &= ~((uint64_t) KGSL_CACHEMODE_MASK);
Lynus Vazeb7af682017-04-17 18:36:01 +05303103 flags |= (uint64_t)((KGSL_CACHEMODE_WRITEBACK <<
3104 KGSL_CACHEMODE_SHIFT) &
3105 KGSL_CACHEMODE_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003106 }
3107 return flags;
3108}
3109#else
3110static uint64_t kgsl_filter_cachemode(uint64_t flags)
3111{
3112 return flags;
3113}
3114#endif
3115
3116/* The largest allowable alignment for a GPU object is 32MB */
3117#define KGSL_MAX_ALIGN (32 * SZ_1M)
3118
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -06003119struct kgsl_mem_entry *gpumem_alloc_entry(
Shrenuj Bansala419c792016-10-20 14:05:11 -07003120 struct kgsl_device_private *dev_priv,
3121 uint64_t size, uint64_t flags)
3122{
3123 int ret;
3124 struct kgsl_process_private *private = dev_priv->process_priv;
3125 struct kgsl_mem_entry *entry;
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07003126 struct kgsl_mmu *mmu = &dev_priv->device->mmu;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003127 unsigned int align;
3128
3129 flags &= KGSL_MEMFLAGS_GPUREADONLY
3130 | KGSL_CACHEMODE_MASK
3131 | KGSL_MEMTYPE_MASK
3132 | KGSL_MEMALIGN_MASK
3133 | KGSL_MEMFLAGS_USE_CPU_MAP
3134 | KGSL_MEMFLAGS_SECURE
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07003135 | KGSL_MEMFLAGS_FORCE_32BIT
3136 | KGSL_MEMFLAGS_IOCOHERENT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003137
Shrenuj Bansala419c792016-10-20 14:05:11 -07003138 /* Return not supported error if secure memory isn't enabled */
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07003139 if (!kgsl_mmu_is_secured(mmu) &&
Shrenuj Bansala419c792016-10-20 14:05:11 -07003140 (flags & KGSL_MEMFLAGS_SECURE)) {
3141 dev_WARN_ONCE(dev_priv->device->dev, 1,
3142 "Secure memory not supported");
3143 return ERR_PTR(-EOPNOTSUPP);
3144 }
3145
Shrenuj Bansala419c792016-10-20 14:05:11 -07003146 /* Cap the alignment bits to the highest number we can handle */
3147 align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
3148 if (align >= ilog2(KGSL_MAX_ALIGN)) {
3149 KGSL_CORE_ERR("Alignment too large; restricting to %dK\n",
3150 KGSL_MAX_ALIGN >> 10);
3151
3152 flags &= ~((uint64_t) KGSL_MEMALIGN_MASK);
Lynus Vazeb7af682017-04-17 18:36:01 +05303153 flags |= (uint64_t)((ilog2(KGSL_MAX_ALIGN) <<
3154 KGSL_MEMALIGN_SHIFT) &
3155 KGSL_MEMALIGN_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003156 }
3157
3158 /* For now only allow allocations up to 4G */
3159 if (size == 0 || size > UINT_MAX)
3160 return ERR_PTR(-EINVAL);
3161
3162 flags = kgsl_filter_cachemode(flags);
3163
3164 entry = kgsl_mem_entry_create();
3165 if (entry == NULL)
3166 return ERR_PTR(-ENOMEM);
3167
Shrenuj Bansala419c792016-10-20 14:05:11 -07003168 ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
3169 size, flags);
3170 if (ret != 0)
3171 goto err;
3172
3173 ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
3174 if (ret != 0) {
3175 kgsl_sharedmem_free(&entry->memdesc);
3176 goto err;
3177 }
3178
3179 kgsl_process_add_stats(private,
3180 kgsl_memdesc_usermem_type(&entry->memdesc),
3181 entry->memdesc.size);
3182 trace_kgsl_mem_alloc(entry);
3183
3184 kgsl_mem_entry_commit_process(entry);
3185 return entry;
3186err:
3187 kfree(entry);
3188 return ERR_PTR(ret);
3189}
3190
3191static void copy_metadata(struct kgsl_mem_entry *entry, uint64_t metadata,
3192 unsigned int len)
3193{
3194 unsigned int i, size;
3195
3196 if (len == 0)
3197 return;
3198
3199 size = min_t(unsigned int, len, sizeof(entry->metadata) - 1);
3200
3201 if (copy_from_user(entry->metadata, to_user_ptr(metadata), size)) {
3202 memset(entry->metadata, 0, sizeof(entry->metadata));
3203 return;
3204 }
3205
3206 /* Clean up non printable characters in the string */
3207 for (i = 0; i < size && entry->metadata[i] != 0; i++) {
3208 if (!isprint(entry->metadata[i]))
3209 entry->metadata[i] = '?';
3210 }
3211}
3212
3213long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
3214 unsigned int cmd, void *data)
3215{
3216 struct kgsl_gpuobj_alloc *param = data;
3217 struct kgsl_mem_entry *entry;
3218
Deepak Kumarcf056d12018-04-17 15:59:42 +05303219 if (kgsl_is_compat_task())
3220 param->flags |= KGSL_MEMFLAGS_FORCE_32BIT;
3221
Shrenuj Bansala419c792016-10-20 14:05:11 -07003222 entry = gpumem_alloc_entry(dev_priv, param->size, param->flags);
3223
3224 if (IS_ERR(entry))
3225 return PTR_ERR(entry);
3226
3227 copy_metadata(entry, param->metadata, param->metadata_len);
3228
3229 param->size = entry->memdesc.size;
3230 param->flags = entry->memdesc.flags;
3231 param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc);
3232 param->id = entry->id;
3233
Tarun Karra24d3fe12017-04-05 15:23:03 -07003234 /* Put the extra ref from kgsl_mem_entry_create() */
3235 kgsl_mem_entry_put(entry);
3236
Shrenuj Bansala419c792016-10-20 14:05:11 -07003237 return 0;
3238}
3239
3240long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
3241 unsigned int cmd, void *data)
3242{
3243 struct kgsl_gpumem_alloc *param = data;
3244 struct kgsl_mem_entry *entry;
3245 uint64_t flags = param->flags;
3246
3247 /* Legacy functions doesn't support these advanced features */
3248 flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
Deepak Kumarcf056d12018-04-17 15:59:42 +05303249
3250 if (kgsl_is_compat_task())
3251 flags |= KGSL_MEMFLAGS_FORCE_32BIT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003252
3253 entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);
3254
3255 if (IS_ERR(entry))
3256 return PTR_ERR(entry);
3257
3258 param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
3259 param->size = (size_t) entry->memdesc.size;
3260 param->flags = (unsigned int) entry->memdesc.flags;
3261
Tarun Karra24d3fe12017-04-05 15:23:03 -07003262 /* Put the extra ref from kgsl_mem_entry_create() */
3263 kgsl_mem_entry_put(entry);
3264
Shrenuj Bansala419c792016-10-20 14:05:11 -07003265 return 0;
3266}
3267
3268long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
3269 unsigned int cmd, void *data)
3270{
3271 struct kgsl_gpumem_alloc_id *param = data;
3272 struct kgsl_mem_entry *entry;
3273 uint64_t flags = param->flags;
3274
Deepak Kumarcf056d12018-04-17 15:59:42 +05303275 if (kgsl_is_compat_task())
3276 flags |= KGSL_MEMFLAGS_FORCE_32BIT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003277
3278 entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);
3279
3280 if (IS_ERR(entry))
3281 return PTR_ERR(entry);
3282
3283 param->id = entry->id;
3284 param->flags = (unsigned int) entry->memdesc.flags;
3285 param->size = (size_t) entry->memdesc.size;
3286 param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
3287 param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
3288
Tarun Karra24d3fe12017-04-05 15:23:03 -07003289 /* Put the extra ref from kgsl_mem_entry_create() */
3290 kgsl_mem_entry_put(entry);
3291
Shrenuj Bansala419c792016-10-20 14:05:11 -07003292 return 0;
3293}
3294
3295long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
3296 unsigned int cmd, void *data)
3297{
3298 struct kgsl_process_private *private = dev_priv->process_priv;
3299 struct kgsl_gpumem_get_info *param = data;
3300 struct kgsl_mem_entry *entry = NULL;
3301 int result = 0;
3302
3303 if (param->id != 0)
3304 entry = kgsl_sharedmem_find_id(private, param->id);
3305 else if (param->gpuaddr != 0)
3306 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
3307
3308 if (entry == NULL)
3309 return -EINVAL;
3310
3311 /*
3312 * If any of the 64 bit address / sizes would end up being
3313 * truncated, return -ERANGE. That will signal the user that they
3314 * should use a more modern API
3315 */
3316 if (entry->memdesc.gpuaddr > ULONG_MAX)
3317 result = -ERANGE;
3318
3319 param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
3320 param->id = entry->id;
3321 param->flags = (unsigned int) entry->memdesc.flags;
3322 param->size = (size_t) entry->memdesc.size;
3323 param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
Jordan Crouse6bce65c2020-12-28 16:06:42 +05303324 /*
3325 * Entries can have multiple user mappings so thre isn't any one address
3326 * we can report. Plus, the user should already know their mappings, so
3327 * there isn't any value in reporting it back to them.
3328 */
3329 param->useraddr = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003330
3331 kgsl_mem_entry_put(entry);
3332 return result;
3333}
3334
3335static inline int _sparse_alloc_param_sanity_check(uint64_t size,
3336 uint64_t pagesize)
3337{
3338 if (size == 0 || pagesize == 0)
3339 return -EINVAL;
3340
3341 if (pagesize != PAGE_SIZE && pagesize != SZ_64K)
3342 return -EINVAL;
3343
3344 if (pagesize > size || !IS_ALIGNED(size, pagesize))
3345 return -EINVAL;
3346
3347 return 0;
3348}
3349
3350long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
3351 unsigned int cmd, void *data)
3352{
3353 struct kgsl_process_private *process = dev_priv->process_priv;
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003354 struct kgsl_device *device = dev_priv->device;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003355 struct kgsl_sparse_phys_alloc *param = data;
3356 struct kgsl_mem_entry *entry;
Lynus Vaz90d98b52018-04-09 14:45:36 +05303357 uint64_t flags;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003358 int ret;
3359 int id;
3360
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003361 if (!(device->flags & KGSL_FLAG_SPARSE))
3362 return -ENOTSUPP;
3363
Shrenuj Bansala419c792016-10-20 14:05:11 -07003364 ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
3365 if (ret)
3366 return ret;
3367
3368 entry = kgsl_mem_entry_create();
3369 if (entry == NULL)
3370 return -ENOMEM;
3371
3372 ret = kgsl_process_private_get(process);
3373 if (!ret) {
3374 ret = -EBADF;
3375 goto err_free_entry;
3376 }
3377
3378 idr_preload(GFP_KERNEL);
3379 spin_lock(&process->mem_lock);
3380 /* Allocate the ID but don't attach the pointer just yet */
3381 id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
3382 spin_unlock(&process->mem_lock);
3383 idr_preload_end();
3384
3385 if (id < 0) {
3386 ret = id;
3387 goto err_put_proc_priv;
3388 }
3389
3390 entry->id = id;
3391 entry->priv = process;
3392
Lynus Vaz90d98b52018-04-09 14:45:36 +05303393 flags = KGSL_MEMFLAGS_SPARSE_PHYS |
3394 ((ilog2(param->pagesize) << KGSL_MEMALIGN_SHIFT) &
3395 KGSL_MEMALIGN_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003396
3397 ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
Lynus Vaz90d98b52018-04-09 14:45:36 +05303398 param->size, flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003399 if (ret)
3400 goto err_remove_idr;
3401
3402 /* Sanity check to verify we got correct pagesize */
3403 if (param->pagesize != PAGE_SIZE && entry->memdesc.sgt != NULL) {
3404 struct scatterlist *s;
3405 int i;
3406
3407 for_each_sg(entry->memdesc.sgt->sgl, s,
3408 entry->memdesc.sgt->nents, i) {
3409 if (!IS_ALIGNED(s->length, param->pagesize))
3410 goto err_invalid_pages;
3411 }
3412 }
3413
3414 param->id = entry->id;
3415 param->flags = entry->memdesc.flags;
3416
3417 trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
3418 kgsl_mem_entry_commit_process(entry);
3419
Tarun Karra24d3fe12017-04-05 15:23:03 -07003420 /* Put the extra ref from kgsl_mem_entry_create() */
3421 kgsl_mem_entry_put(entry);
3422
Shrenuj Bansala419c792016-10-20 14:05:11 -07003423 return 0;
3424
3425err_invalid_pages:
3426 kgsl_sharedmem_free(&entry->memdesc);
3427err_remove_idr:
3428 spin_lock(&process->mem_lock);
3429 idr_remove(&process->mem_idr, entry->id);
3430 spin_unlock(&process->mem_lock);
3431err_put_proc_priv:
3432 kgsl_process_private_put(process);
3433err_free_entry:
3434 kfree(entry);
3435
3436 return ret;
3437}
3438
3439long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
3440 unsigned int cmd, void *data)
3441{
3442 struct kgsl_process_private *process = dev_priv->process_priv;
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003443 struct kgsl_device *device = dev_priv->device;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003444 struct kgsl_sparse_phys_free *param = data;
3445 struct kgsl_mem_entry *entry;
3446
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003447 if (!(device->flags & KGSL_FLAG_SPARSE))
3448 return -ENOTSUPP;
3449
Shrenuj Bansala419c792016-10-20 14:05:11 -07003450 entry = kgsl_sharedmem_find_id_flags(process, param->id,
3451 KGSL_MEMFLAGS_SPARSE_PHYS);
3452 if (entry == NULL)
3453 return -EINVAL;
3454
Deepak Kumar32814682018-02-16 11:46:26 +05303455 if (!kgsl_mem_entry_set_pend(entry)) {
3456 kgsl_mem_entry_put(entry);
3457 return -EBUSY;
3458 }
3459
Shrenuj Bansala419c792016-10-20 14:05:11 -07003460 if (entry->memdesc.cur_bindings != 0) {
Deepak Kumar32814682018-02-16 11:46:26 +05303461 kgsl_mem_entry_unset_pend(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003462 kgsl_mem_entry_put(entry);
3463 return -EINVAL;
3464 }
3465
3466 trace_sparse_phys_free(entry->id);
3467
3468 /* One put for find_id(), one put for the kgsl_mem_entry_create() */
3469 kgsl_mem_entry_put(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05303470 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003471
3472 return 0;
3473}
3474
3475long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
3476 unsigned int cmd, void *data)
3477{
3478 struct kgsl_process_private *private = dev_priv->process_priv;
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003479 struct kgsl_device *device = dev_priv->device;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003480 struct kgsl_sparse_virt_alloc *param = data;
3481 struct kgsl_mem_entry *entry;
3482 int ret;
3483
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003484 if (!(device->flags & KGSL_FLAG_SPARSE))
3485 return -ENOTSUPP;
3486
Shrenuj Bansala419c792016-10-20 14:05:11 -07003487 ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
3488 if (ret)
3489 return ret;
3490
3491 entry = kgsl_mem_entry_create();
3492 if (entry == NULL)
3493 return -ENOMEM;
3494
Lynus Vaz90d98b52018-04-09 14:45:36 +05303495 kgsl_memdesc_init(dev_priv->device, &entry->memdesc,
3496 KGSL_MEMFLAGS_SPARSE_VIRT);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003497 entry->memdesc.size = param->size;
3498 entry->memdesc.cur_bindings = 0;
3499 kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
3500
3501 spin_lock_init(&entry->bind_lock);
3502 entry->bind_tree = RB_ROOT;
3503
3504 ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
3505 if (ret) {
3506 kfree(entry);
3507 return ret;
3508 }
3509
3510 param->id = entry->id;
3511 param->gpuaddr = entry->memdesc.gpuaddr;
3512 param->flags = entry->memdesc.flags;
3513
3514 trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
3515 kgsl_mem_entry_commit_process(entry);
3516
Tarun Karra24d3fe12017-04-05 15:23:03 -07003517 /* Put the extra ref from kgsl_mem_entry_create() */
3518 kgsl_mem_entry_put(entry);
3519
Shrenuj Bansala419c792016-10-20 14:05:11 -07003520 return 0;
3521}
3522
3523long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
3524 unsigned int cmd, void *data)
3525{
3526 struct kgsl_process_private *process = dev_priv->process_priv;
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003527 struct kgsl_device *device = dev_priv->device;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003528 struct kgsl_sparse_virt_free *param = data;
3529 struct kgsl_mem_entry *entry = NULL;
3530
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003531 if (!(device->flags & KGSL_FLAG_SPARSE))
3532 return -ENOTSUPP;
3533
Shrenuj Bansala419c792016-10-20 14:05:11 -07003534 entry = kgsl_sharedmem_find_id_flags(process, param->id,
3535 KGSL_MEMFLAGS_SPARSE_VIRT);
3536 if (entry == NULL)
3537 return -EINVAL;
3538
Deepak Kumar32814682018-02-16 11:46:26 +05303539 if (!kgsl_mem_entry_set_pend(entry)) {
3540 kgsl_mem_entry_put(entry);
3541 return -EBUSY;
3542 }
3543
Shrenuj Bansala419c792016-10-20 14:05:11 -07003544 if (entry->bind_tree.rb_node != NULL) {
Deepak Kumar32814682018-02-16 11:46:26 +05303545 kgsl_mem_entry_unset_pend(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003546 kgsl_mem_entry_put(entry);
3547 return -EINVAL;
3548 }
3549
3550 trace_sparse_virt_free(entry->id);
3551
3552 /* One put for find_id(), one put for the kgsl_mem_entry_create() */
3553 kgsl_mem_entry_put(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05303554 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003555
3556 return 0;
3557}
3558
Lynus Vaz4930cb12017-09-08 18:32:53 +05303559/* entry->bind_lock must be held by the caller */
Shrenuj Bansala419c792016-10-20 14:05:11 -07003560static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
3561 uint64_t v_offset,
3562 struct kgsl_memdesc *memdesc,
3563 uint64_t p_offset,
3564 uint64_t size,
3565 uint64_t flags)
3566{
3567 struct sparse_bind_object *new;
3568 struct rb_node **node, *parent = NULL;
3569
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303570 new = kzalloc(sizeof(*new), GFP_ATOMIC);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003571 if (new == NULL)
3572 return -ENOMEM;
3573
3574 new->v_off = v_offset;
3575 new->p_off = p_offset;
3576 new->p_memdesc = memdesc;
3577 new->size = size;
3578 new->flags = flags;
3579
3580 node = &entry->bind_tree.rb_node;
3581
3582 while (*node != NULL) {
3583 struct sparse_bind_object *this;
3584
3585 parent = *node;
3586 this = rb_entry(parent, struct sparse_bind_object, node);
3587
Lynus Vaze8c82572017-09-08 17:27:56 +05303588 if ((new->v_off < this->v_off) &&
3589 ((new->v_off + new->size) <= this->v_off))
Shrenuj Bansala419c792016-10-20 14:05:11 -07003590 node = &parent->rb_left;
Lynus Vaze8c82572017-09-08 17:27:56 +05303591 else if ((new->v_off > this->v_off) &&
3592 (new->v_off >= (this->v_off + this->size)))
Shrenuj Bansala419c792016-10-20 14:05:11 -07003593 node = &parent->rb_right;
Lynus Vaze8c82572017-09-08 17:27:56 +05303594 else {
3595 kfree(new);
3596 return -EADDRINUSE;
3597 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07003598 }
3599
3600 rb_link_node(&new->node, parent, node);
3601 rb_insert_color(&new->node, &entry->bind_tree);
3602
3603 return 0;
3604}
3605
3606static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry,
3607 struct sparse_bind_object *obj,
3608 uint64_t v_offset, uint64_t size)
3609{
Shrenuj Bansala419c792016-10-20 14:05:11 -07003610 if (v_offset == obj->v_off && size >= obj->size) {
3611 /*
3612 * We are all encompassing, remove the entry and free
3613 * things up
3614 */
3615 rb_erase(&obj->node, &entry->bind_tree);
3616 kfree(obj);
3617 } else if (v_offset == obj->v_off) {
3618 /*
3619 * We are the front of the node, adjust the front of
3620 * the node
3621 */
3622 obj->v_off += size;
3623 obj->p_off += size;
3624 obj->size -= size;
3625 } else if ((v_offset + size) == (obj->v_off + obj->size)) {
3626 /*
3627 * We are at the end of the obj, adjust the beginning
3628 * points
3629 */
3630 obj->size -= size;
3631 } else {
3632 /*
3633 * We are in the middle of a node, split it up and
3634 * create a new mini node. Adjust this node's bounds
3635 * and add the new node to the list.
3636 */
3637 uint64_t tmp_size = obj->size;
3638 int ret;
3639
3640 obj->size = v_offset - obj->v_off;
3641
Shrenuj Bansala419c792016-10-20 14:05:11 -07003642 ret = _sparse_add_to_bind_tree(entry, v_offset + size,
3643 obj->p_memdesc,
3644 obj->p_off + (v_offset - obj->v_off) + size,
3645 tmp_size - (v_offset - obj->v_off) - size,
3646 obj->flags);
3647
3648 return ret;
3649 }
3650
Shrenuj Bansala419c792016-10-20 14:05:11 -07003651 return 0;
3652}
3653
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303654/* entry->bind_lock must be held by the caller */
Shrenuj Bansala419c792016-10-20 14:05:11 -07003655static struct sparse_bind_object *_find_containing_bind_obj(
3656 struct kgsl_mem_entry *entry,
3657 uint64_t offset, uint64_t size)
3658{
3659 struct sparse_bind_object *obj = NULL;
3660 struct rb_node *node = entry->bind_tree.rb_node;
3661
Shrenuj Bansala419c792016-10-20 14:05:11 -07003662 while (node != NULL) {
3663 obj = rb_entry(node, struct sparse_bind_object, node);
3664
3665 if (offset == obj->v_off) {
3666 break;
3667 } else if (offset < obj->v_off) {
3668 if (offset + size > obj->v_off)
3669 break;
3670 node = node->rb_left;
3671 obj = NULL;
3672 } else if (offset > obj->v_off) {
3673 if (offset < obj->v_off + obj->size)
3674 break;
3675 node = node->rb_right;
3676 obj = NULL;
3677 }
3678 }
3679
Shrenuj Bansala419c792016-10-20 14:05:11 -07003680 return obj;
3681}
3682
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303683/* entry->bind_lock must be held by the caller */
Shrenuj Bansala419c792016-10-20 14:05:11 -07003684static int _sparse_unbind(struct kgsl_mem_entry *entry,
3685 struct sparse_bind_object *bind_obj,
3686 uint64_t offset, uint64_t size)
3687{
Shrenuj Bansala419c792016-10-20 14:05:11 -07003688 int ret;
3689
Shrenuj Bansala419c792016-10-20 14:05:11 -07003690 ret = _sparse_rm_from_bind_tree(entry, bind_obj, offset, size);
3691 if (ret == 0) {
3692 atomic_long_sub(size, &kgsl_driver.stats.mapped);
3693 trace_sparse_unbind(entry->id, offset, size);
3694 }
3695
3696 return ret;
3697}
3698
3699static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj,
3700 struct kgsl_mem_entry *virt_entry)
3701{
3702 struct sparse_bind_object *bind_obj;
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303703 struct kgsl_memdesc *memdesc;
3704 struct kgsl_pagetable *pt;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003705 int ret = 0;
3706 uint64_t size = obj->size;
3707 uint64_t tmp_size = obj->size;
3708 uint64_t offset = obj->virtoffset;
3709
3710 while (size > 0 && ret == 0) {
3711 tmp_size = size;
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303712
3713 spin_lock(&virt_entry->bind_lock);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003714 bind_obj = _find_containing_bind_obj(virt_entry, offset, size);
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303715
3716 if (bind_obj == NULL) {
3717 spin_unlock(&virt_entry->bind_lock);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003718 return 0;
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303719 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07003720
3721 if (bind_obj->v_off > offset) {
3722 tmp_size = size - bind_obj->v_off - offset;
3723 if (tmp_size > bind_obj->size)
3724 tmp_size = bind_obj->size;
3725 offset = bind_obj->v_off;
3726 } else if (bind_obj->v_off < offset) {
3727 uint64_t diff = offset - bind_obj->v_off;
3728
3729 if (diff + size > bind_obj->size)
3730 tmp_size = bind_obj->size - diff;
3731 } else {
3732 if (tmp_size > bind_obj->size)
3733 tmp_size = bind_obj->size;
3734 }
3735
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303736 memdesc = bind_obj->p_memdesc;
3737 pt = memdesc->pagetable;
3738
3739 if (memdesc->cur_bindings < (tmp_size / PAGE_SIZE)) {
3740 spin_unlock(&virt_entry->bind_lock);
3741 return -EINVAL;
3742 }
3743
3744 memdesc->cur_bindings -= tmp_size / PAGE_SIZE;
3745
Shrenuj Bansala419c792016-10-20 14:05:11 -07003746 ret = _sparse_unbind(virt_entry, bind_obj, offset, tmp_size);
Sunil Khatri94dce8f2017-09-04 14:05:03 +05303747 spin_unlock(&virt_entry->bind_lock);
3748
3749 ret = kgsl_mmu_unmap_offset(pt, memdesc,
3750 virt_entry->memdesc.gpuaddr, offset, tmp_size);
3751 if (ret)
3752 return ret;
3753
3754 ret = kgsl_mmu_sparse_dummy_map(pt, memdesc, offset, tmp_size);
3755 if (ret)
3756 return ret;
3757
Shrenuj Bansala419c792016-10-20 14:05:11 -07003758 if (ret == 0) {
3759 offset += tmp_size;
3760 size -= tmp_size;
3761 }
3762 }
3763
3764 return ret;
3765}
3766
3767static inline bool _is_phys_bindable(struct kgsl_mem_entry *phys_entry,
3768 uint64_t offset, uint64_t size, uint64_t flags)
3769{
3770 struct kgsl_memdesc *memdesc = &phys_entry->memdesc;
3771
3772 if (!IS_ALIGNED(offset | size, kgsl_memdesc_get_pagesize(memdesc)))
3773 return false;
3774
Sudeep Yedalapure8ff97992017-01-20 20:12:51 +05303775 if (offset + size < offset)
3776 return false;
3777
Shrenuj Bansala419c792016-10-20 14:05:11 -07003778 if (!(flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
3779 offset + size > memdesc->size)
3780 return false;
3781
3782 return true;
3783}
3784
3785static int _sparse_bind(struct kgsl_process_private *process,
3786 struct kgsl_mem_entry *virt_entry, uint64_t v_offset,
3787 struct kgsl_mem_entry *phys_entry, uint64_t p_offset,
3788 uint64_t size, uint64_t flags)
3789{
3790 int ret;
3791 struct kgsl_pagetable *pagetable;
3792 struct kgsl_memdesc *memdesc = &phys_entry->memdesc;
3793
3794 /* map the memory after unlocking if gpuaddr has been assigned */
3795 if (memdesc->gpuaddr)
3796 return -EINVAL;
3797
Shrenuj Bansala419c792016-10-20 14:05:11 -07003798 pagetable = memdesc->pagetable;
3799
3800 /* Clear out any mappings */
3801 ret = kgsl_mmu_unmap_offset(pagetable, &virt_entry->memdesc,
3802 virt_entry->memdesc.gpuaddr, v_offset, size);
3803 if (ret)
3804 return ret;
3805
3806 ret = kgsl_mmu_map_offset(pagetable, virt_entry->memdesc.gpuaddr,
3807 v_offset, memdesc, p_offset, size, flags);
3808 if (ret) {
3809 /* Try to clean up, but not the end of the world */
3810 kgsl_mmu_sparse_dummy_map(pagetable, &virt_entry->memdesc,
3811 v_offset, size);
3812 return ret;
3813 }
3814
Lynus Vaz4930cb12017-09-08 18:32:53 +05303815 spin_lock(&virt_entry->bind_lock);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003816 ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc,
3817 p_offset, size, flags);
Lynus Vaz4930cb12017-09-08 18:32:53 +05303818 spin_unlock(&virt_entry->bind_lock);
3819
Shrenuj Bansala419c792016-10-20 14:05:11 -07003820 if (ret == 0)
3821 memdesc->cur_bindings += size / PAGE_SIZE;
3822
3823 return ret;
3824}
3825
3826static long sparse_bind_range(struct kgsl_process_private *private,
3827 struct kgsl_sparse_binding_object *obj,
3828 struct kgsl_mem_entry *virt_entry)
3829{
3830 struct kgsl_mem_entry *phys_entry;
3831 int ret;
3832
3833 phys_entry = kgsl_sharedmem_find_id_flags(private, obj->id,
3834 KGSL_MEMFLAGS_SPARSE_PHYS);
3835 if (phys_entry == NULL)
3836 return -EINVAL;
3837
3838 if (!_is_phys_bindable(phys_entry, obj->physoffset, obj->size,
3839 obj->flags)) {
3840 kgsl_mem_entry_put(phys_entry);
3841 return -EINVAL;
3842 }
3843
3844 if (kgsl_memdesc_get_align(&virt_entry->memdesc) !=
3845 kgsl_memdesc_get_align(&phys_entry->memdesc)) {
3846 kgsl_mem_entry_put(phys_entry);
3847 return -EINVAL;
3848 }
3849
3850 ret = sparse_unbind_range(obj, virt_entry);
3851 if (ret) {
3852 kgsl_mem_entry_put(phys_entry);
3853 return -EINVAL;
3854 }
3855
3856 ret = _sparse_bind(private, virt_entry, obj->virtoffset,
3857 phys_entry, obj->physoffset, obj->size,
3858 obj->flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS);
3859 if (ret == 0) {
3860 KGSL_STATS_ADD(obj->size, &kgsl_driver.stats.mapped,
3861 &kgsl_driver.stats.mapped_max);
3862
3863 trace_sparse_bind(virt_entry->id, obj->virtoffset,
3864 phys_entry->id, obj->physoffset,
3865 obj->size, obj->flags);
3866 }
3867
3868 kgsl_mem_entry_put(phys_entry);
3869
3870 return ret;
3871}
3872
3873long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
3874 unsigned int cmd, void *data)
3875{
3876 struct kgsl_process_private *private = dev_priv->process_priv;
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003877 struct kgsl_device *device = dev_priv->device;
Shrenuj Bansala419c792016-10-20 14:05:11 -07003878 struct kgsl_sparse_bind *param = data;
3879 struct kgsl_sparse_binding_object obj;
3880 struct kgsl_mem_entry *virt_entry;
3881 int pg_sz;
3882 void __user *ptr;
3883 int ret = 0;
3884 int i = 0;
3885
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003886 if (!(device->flags & KGSL_FLAG_SPARSE))
3887 return -ENOTSUPP;
3888
Shrenuj Bansala419c792016-10-20 14:05:11 -07003889 ptr = (void __user *) (uintptr_t) param->list;
3890
3891 if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
3892 param->count == 0 || ptr == NULL)
3893 return -EINVAL;
3894
3895 virt_entry = kgsl_sharedmem_find_id_flags(private, param->id,
3896 KGSL_MEMFLAGS_SPARSE_VIRT);
3897 if (virt_entry == NULL)
3898 return -EINVAL;
3899
3900 pg_sz = kgsl_memdesc_get_pagesize(&virt_entry->memdesc);
3901
3902 for (i = 0; i < param->count; i++) {
3903 memset(&obj, 0, sizeof(obj));
3904 ret = _copy_from_user(&obj, ptr, sizeof(obj), param->size);
3905 if (ret)
3906 break;
3907
3908 /* Sanity check initial range */
Sudeep Yedalapure8ff97992017-01-20 20:12:51 +05303909 if (obj.size == 0 || obj.virtoffset + obj.size < obj.size ||
Shrenuj Bansala419c792016-10-20 14:05:11 -07003910 obj.virtoffset + obj.size > virt_entry->memdesc.size ||
3911 !(IS_ALIGNED(obj.virtoffset | obj.size, pg_sz))) {
3912 ret = -EINVAL;
3913 break;
3914 }
3915
3916 if (obj.flags & KGSL_SPARSE_BIND)
3917 ret = sparse_bind_range(private, &obj, virt_entry);
3918 else if (obj.flags & KGSL_SPARSE_UNBIND)
3919 ret = sparse_unbind_range(&obj, virt_entry);
3920 else
3921 ret = -EINVAL;
3922 if (ret)
3923 break;
3924
3925 ptr += sizeof(obj);
3926 }
3927
3928 kgsl_mem_entry_put(virt_entry);
3929
3930 return ret;
3931}
3932
Tarun Karra2b8b3632016-11-14 16:38:27 -08003933long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
3934 unsigned int cmd, void *data)
3935{
3936 struct kgsl_gpu_sparse_command *param = data;
3937 struct kgsl_device *device = dev_priv->device;
3938 struct kgsl_context *context;
3939 struct kgsl_drawobj *drawobj[2];
3940 struct kgsl_drawobj_sparse *sparseobj;
3941 long result;
3942 unsigned int i = 0;
3943
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06003944 if (!(device->flags & KGSL_FLAG_SPARSE))
3945 return -ENOTSUPP;
3946
Tarun Karra2b8b3632016-11-14 16:38:27 -08003947 /* Make sure sparse and syncpoint count isn't too big */
3948 if (param->numsparse > KGSL_MAX_SPARSE ||
3949 param->numsyncs > KGSL_MAX_SYNCPOINTS)
3950 return -EINVAL;
3951
3952 /* Make sure there is atleast one sparse or sync */
3953 if (param->numsparse == 0 && param->numsyncs == 0)
3954 return -EINVAL;
3955
3956 /* Only Sparse commands are supported in this ioctl */
3957 if (!(param->flags & KGSL_DRAWOBJ_SPARSE) || (param->flags &
3958 (KGSL_DRAWOBJ_SUBMIT_IB_LIST | KGSL_DRAWOBJ_MARKER
3959 | KGSL_DRAWOBJ_SYNC)))
3960 return -EINVAL;
3961
3962 context = kgsl_context_get_owner(dev_priv, param->context_id);
3963 if (context == NULL)
3964 return -EINVAL;
3965
3966 /* Restrict bind commands to bind context */
3967 if (!(context->flags & KGSL_CONTEXT_SPARSE)) {
3968 kgsl_context_put(context);
3969 return -EINVAL;
3970 }
3971
3972 if (param->numsyncs) {
3973 struct kgsl_drawobj_sync *syncobj = kgsl_drawobj_sync_create(
3974 device, context);
3975 if (IS_ERR(syncobj)) {
3976 result = PTR_ERR(syncobj);
3977 goto done;
3978 }
3979
3980 drawobj[i++] = DRAWOBJ(syncobj);
3981 result = kgsl_drawobj_sync_add_synclist(device, syncobj,
3982 to_user_ptr(param->synclist),
3983 param->syncsize, param->numsyncs);
3984 if (result)
3985 goto done;
3986 }
3987
3988 if (param->numsparse) {
3989 sparseobj = kgsl_drawobj_sparse_create(device, context,
3990 param->flags);
3991 if (IS_ERR(sparseobj)) {
3992 result = PTR_ERR(sparseobj);
3993 goto done;
3994 }
3995
3996 sparseobj->id = param->id;
3997 drawobj[i++] = DRAWOBJ(sparseobj);
3998 result = kgsl_drawobj_sparse_add_sparselist(device, sparseobj,
3999 param->id, to_user_ptr(param->sparselist),
4000 param->sparsesize, param->numsparse);
4001 if (result)
4002 goto done;
4003 }
4004
4005 result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
4006 drawobj, i, &param->timestamp);
4007
4008done:
4009 /*
4010 * -EPROTO is a "success" error - it just tells the user that the
4011 * context had previously faulted
4012 */
4013 if (result && result != -EPROTO)
4014 while (i--)
4015 kgsl_drawobj_destroy(drawobj[i]);
4016
4017 kgsl_context_put(context);
4018 return result;
4019}
4020
4021void kgsl_sparse_bind(struct kgsl_process_private *private,
4022 struct kgsl_drawobj_sparse *sparseobj)
4023{
4024 struct kgsl_sparseobj_node *sparse_node;
4025 struct kgsl_mem_entry *virt_entry = NULL;
4026 long ret = 0;
4027 char *name;
4028
4029 virt_entry = kgsl_sharedmem_find_id_flags(private, sparseobj->id,
4030 KGSL_MEMFLAGS_SPARSE_VIRT);
4031 if (virt_entry == NULL)
4032 return;
4033
4034 list_for_each_entry(sparse_node, &sparseobj->sparselist, node) {
4035 if (sparse_node->obj.flags & KGSL_SPARSE_BIND) {
4036 ret = sparse_bind_range(private, &sparse_node->obj,
4037 virt_entry);
4038 name = "bind";
4039 } else {
4040 ret = sparse_unbind_range(&sparse_node->obj,
4041 virt_entry);
4042 name = "unbind";
4043 }
4044
4045 if (ret)
4046 KGSL_CORE_ERR("kgsl: Unable to '%s' ret %ld virt_id %d, phys_id %d, virt_offset %16.16llX, phys_offset %16.16llX, size %16.16llX, flags %16.16llX\n",
4047 name, ret, sparse_node->virt_id,
4048 sparse_node->obj.id,
4049 sparse_node->obj.virtoffset,
4050 sparse_node->obj.physoffset,
4051 sparse_node->obj.size, sparse_node->obj.flags);
4052 }
4053
4054 kgsl_mem_entry_put(virt_entry);
4055}
4056EXPORT_SYMBOL(kgsl_sparse_bind);
4057
Shrenuj Bansala419c792016-10-20 14:05:11 -07004058long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
4059 unsigned int cmd, void *data)
4060{
4061 struct kgsl_process_private *private = dev_priv->process_priv;
4062 struct kgsl_gpuobj_info *param = data;
4063 struct kgsl_mem_entry *entry;
4064
4065 if (param->id == 0)
4066 return -EINVAL;
4067
4068 entry = kgsl_sharedmem_find_id(private, param->id);
4069 if (entry == NULL)
4070 return -EINVAL;
4071
4072 param->id = entry->id;
4073 param->gpuaddr = entry->memdesc.gpuaddr;
4074 param->flags = entry->memdesc.flags;
4075 param->size = entry->memdesc.size;
4076 param->va_len = kgsl_memdesc_footprint(&entry->memdesc);
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304077 /*
4078 * Entries can have multiple user mappings so thre isn't any one address
4079 * we can report. Plus, the user should already know their mappings, so
4080 * there isn't any value in reporting it back to them.
4081 */
4082 param->va_addr = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07004083
4084 kgsl_mem_entry_put(entry);
4085 return 0;
4086}
4087
4088long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
4089 unsigned int cmd, void *data)
4090{
4091 struct kgsl_process_private *private = dev_priv->process_priv;
4092 struct kgsl_gpuobj_set_info *param = data;
4093 struct kgsl_mem_entry *entry;
Deepak Kumar7d13ed22018-02-23 16:31:46 +05304094 int ret = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07004095
4096 if (param->id == 0)
4097 return -EINVAL;
4098
4099 entry = kgsl_sharedmem_find_id(private, param->id);
4100 if (entry == NULL)
4101 return -EINVAL;
4102
4103 if (param->flags & KGSL_GPUOBJ_SET_INFO_METADATA)
4104 copy_metadata(entry, param->metadata, param->metadata_len);
4105
4106 if (param->flags & KGSL_GPUOBJ_SET_INFO_TYPE) {
Deepak Kumar7d13ed22018-02-23 16:31:46 +05304107 if (param->type <= (KGSL_MEMTYPE_MASK >> KGSL_MEMTYPE_SHIFT)) {
4108 entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
4109 entry->memdesc.flags |= (uint64_t)((param->type <<
4110 KGSL_MEMTYPE_SHIFT) & KGSL_MEMTYPE_MASK);
4111 } else
4112 ret = -EINVAL;
Shrenuj Bansala419c792016-10-20 14:05:11 -07004113 }
4114
4115 kgsl_mem_entry_put(entry);
Deepak Kumar7d13ed22018-02-23 16:31:46 +05304116 return ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -07004117}
4118
4119/**
4120 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
4121 * @dev_priv - pointer to the private device structure
4122 * @cmd - the ioctl cmd passed from kgsl_ioctl
4123 * @data - the user data buffer from kgsl_ioctl
4124 * @returns 0 on success or error code on failure
4125 */
4126
4127long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
4128 unsigned int cmd, void *data)
4129{
4130 struct kgsl_timestamp_event *param = data;
4131 int ret;
4132
4133 switch (param->type) {
4134 case KGSL_TIMESTAMP_EVENT_FENCE:
4135 ret = kgsl_add_fence_event(dev_priv->device,
4136 param->context_id, param->timestamp, param->priv,
4137 param->len, dev_priv);
4138 break;
4139 default:
4140 ret = -EINVAL;
4141 }
4142
4143 return ret;
4144}
4145
4146static int
4147kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
4148{
4149 struct kgsl_memdesc *memdesc = &device->memstore;
4150 int result;
4151 unsigned int vma_size = vma->vm_end - vma->vm_start;
4152
4153 /* The memstore can only be mapped as read only */
4154
4155 if (vma->vm_flags & VM_WRITE)
4156 return -EPERM;
4157
Indira Biruduraju1e6b63e2020-08-11 15:24:16 +05304158 vma->vm_flags &= ~VM_MAYWRITE;
4159
Shrenuj Bansala419c792016-10-20 14:05:11 -07004160 if (memdesc->size != vma_size) {
4161 KGSL_MEM_ERR(device, "memstore bad size: %d should be %llu\n",
4162 vma_size, memdesc->size);
4163 return -EINVAL;
4164 }
4165
4166 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
4167
4168 result = remap_pfn_range(vma, vma->vm_start,
4169 device->memstore.physaddr >> PAGE_SHIFT,
4170 vma_size, vma->vm_page_prot);
4171 if (result != 0)
4172 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
4173 result);
4174
4175 return result;
4176}
4177
4178/*
4179 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
4180 * Increase the refcount to make sure that the accounting stays correct
4181 */
4182
4183static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
4184{
4185 struct kgsl_mem_entry *entry = vma->vm_private_data;
4186
4187 if (kgsl_mem_entry_get(entry) == 0)
4188 vma->vm_private_data = NULL;
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304189
4190 atomic_inc(&entry->map_count);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004191}
4192
4193static int
4194kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4195{
4196 struct kgsl_mem_entry *entry = vma->vm_private_data;
4197
4198 if (!entry)
4199 return VM_FAULT_SIGBUS;
4200 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
4201 return VM_FAULT_SIGBUS;
4202
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304203 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004204}
4205
4206static void
4207kgsl_gpumem_vm_close(struct vm_area_struct *vma)
4208{
4209 struct kgsl_mem_entry *entry = vma->vm_private_data;
4210
4211 if (!entry)
4212 return;
4213
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304214 /*
4215 * Remove the memdesc from the mapped stat once all the mappings have
4216 * gone away
4217 */
4218 if (!atomic_dec_return(&entry->map_count))
4219 entry->priv->gpumem_mapped -= entry->memdesc.size;
4220
Shrenuj Bansala419c792016-10-20 14:05:11 -07004221 kgsl_mem_entry_put(entry);
4222}
4223
4224static const struct vm_operations_struct kgsl_gpumem_vm_ops = {
4225 .open = kgsl_gpumem_vm_open,
4226 .fault = kgsl_gpumem_vm_fault,
4227 .close = kgsl_gpumem_vm_close,
4228};
4229
4230static int
4231get_mmap_entry(struct kgsl_process_private *private,
4232 struct kgsl_mem_entry **out_entry, unsigned long pgoff,
4233 unsigned long len)
4234{
4235 int ret = 0;
4236 struct kgsl_mem_entry *entry;
4237
4238 entry = kgsl_sharedmem_find_id(private, pgoff);
4239 if (entry == NULL)
4240 entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
4241
4242 if (!entry)
4243 return -EINVAL;
4244
4245 if (!entry->memdesc.ops ||
4246 !entry->memdesc.ops->vmflags ||
4247 !entry->memdesc.ops->vmfault) {
4248 ret = -EINVAL;
4249 goto err_put;
4250 }
4251
4252 if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
4253 if (len != entry->memdesc.size) {
4254 ret = -EINVAL;
4255 goto err_put;
4256 }
4257 }
4258
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304259 /* Don't allow ourselves to remap user memory */
4260 if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_ADDR) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07004261 ret = -EBUSY;
4262 goto err_put;
4263 }
4264
4265 if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
4266 if (len != kgsl_memdesc_footprint(&entry->memdesc)) {
4267 ret = -ERANGE;
4268 goto err_put;
4269 }
4270 } else if (len != kgsl_memdesc_footprint(&entry->memdesc) &&
4271 len != entry->memdesc.size) {
4272 /*
4273 * If cpu_map != gpumap then user can map either the
4274 * footprint or the entry size
4275 */
4276 ret = -ERANGE;
4277 goto err_put;
4278 }
4279
4280 *out_entry = entry;
4281 return 0;
4282err_put:
4283 kgsl_mem_entry_put(entry);
4284 return ret;
4285}
4286
4287static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
4288 struct kgsl_mem_entry *entry, unsigned long addr,
4289 unsigned long size)
4290{
4291 int ret;
4292
4293 ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
4294 (uint64_t) size);
4295
4296 if (ret != 0)
4297 return ret;
4298
4299 entry->memdesc.gpuaddr = (uint64_t) addr;
4300 entry->memdesc.pagetable = private->pagetable;
4301
4302 ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
4303 if (ret) {
4304 kgsl_mmu_put_gpuaddr(&entry->memdesc);
4305 return ret;
4306 }
4307
4308 kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
4309 entry->memdesc.size);
4310
4311 return addr;
4312}
4313
4314static unsigned long _gpu_find_svm(struct kgsl_process_private *private,
4315 unsigned long start, unsigned long end, unsigned long len,
4316 unsigned int align)
4317{
4318 uint64_t addr = kgsl_mmu_find_svm_region(private->pagetable,
4319 (uint64_t) start, (uint64_t)end, (uint64_t) len, align);
4320
4321 BUG_ON(!IS_ERR_VALUE((unsigned long)addr) && (addr > ULONG_MAX));
4322
4323 return (unsigned long) addr;
4324}
4325
4326/* Search top down in the CPU VM region for a free address */
4327static unsigned long _cpu_get_unmapped_area(unsigned long bottom,
4328 unsigned long top, unsigned long len, unsigned long align)
4329{
4330 struct vm_unmapped_area_info info;
4331 unsigned long addr, err;
4332
4333 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
4334 info.low_limit = bottom;
4335 info.high_limit = top;
4336 info.length = len;
4337 info.align_offset = 0;
4338 info.align_mask = align - 1;
4339
4340 addr = vm_unmapped_area(&info);
4341
4342 if (IS_ERR_VALUE(addr))
4343 return addr;
4344
4345 err = security_mmap_addr(addr);
4346 return err ? err : addr;
4347}
4348
4349static unsigned long _search_range(struct kgsl_process_private *private,
4350 struct kgsl_mem_entry *entry,
4351 unsigned long start, unsigned long end,
4352 unsigned long len, uint64_t align)
4353{
4354 unsigned long cpu, gpu = end, result = -ENOMEM;
4355
4356 while (gpu > start) {
4357 /* find a new empty spot on the CPU below the last one */
4358 cpu = _cpu_get_unmapped_area(start, gpu, len,
4359 (unsigned long) align);
4360 if (IS_ERR_VALUE(cpu)) {
4361 result = cpu;
4362 break;
4363 }
4364 /* try to map it on the GPU */
4365 result = _gpu_set_svm_region(private, entry, cpu, len);
4366 if (!IS_ERR_VALUE(result))
4367 break;
4368
4369 trace_kgsl_mem_unmapped_area_collision(entry, cpu, len);
4370
4371 if (cpu <= start) {
4372 result = -ENOMEM;
4373 break;
4374 }
4375
4376 /* move downward to the next empty spot on the GPU */
4377 gpu = _gpu_find_svm(private, start, cpu, len, align);
4378 if (IS_ERR_VALUE(gpu)) {
4379 result = gpu;
4380 break;
4381 }
4382
4383 /* Check that_gpu_find_svm doesn't put us in a loop */
4384 if (gpu >= cpu) {
4385 result = -ENOMEM;
4386 break;
4387 }
4388
4389 /* Break if the recommended GPU address is out of range */
4390 if (gpu < start) {
4391 result = -ENOMEM;
4392 break;
4393 }
4394
4395 /*
4396 * Add the length of the chunk to the GPU address to yield the
4397 * upper bound for the CPU search
4398 */
4399 gpu += len;
4400 }
4401 return result;
4402}
4403
4404static unsigned long _get_svm_area(struct kgsl_process_private *private,
4405 struct kgsl_mem_entry *entry, unsigned long hint,
4406 unsigned long len, unsigned long flags)
4407{
4408 uint64_t start, end;
4409 int align_shift = kgsl_memdesc_get_align(&entry->memdesc);
4410 uint64_t align;
4411 unsigned long result;
4412 unsigned long addr;
4413
4414 if (align_shift >= ilog2(SZ_2M))
4415 align = SZ_2M;
4416 else if (align_shift >= ilog2(SZ_1M))
4417 align = SZ_1M;
4418 else if (align_shift >= ilog2(SZ_64K))
4419 align = SZ_64K;
4420 else
4421 align = SZ_4K;
4422
4423 /* get the GPU pagetable's SVM range */
4424 if (kgsl_mmu_svm_range(private->pagetable, &start, &end,
4425 entry->memdesc.flags))
4426 return -ERANGE;
4427
4428 /* now clamp the range based on the CPU's requirements */
4429 start = max_t(uint64_t, start, mmap_min_addr);
4430 end = min_t(uint64_t, end, current->mm->mmap_base);
4431 if (start >= end)
4432 return -ERANGE;
4433
4434 if (flags & MAP_FIXED) {
4435 /* we must use addr 'hint' or fail */
4436 return _gpu_set_svm_region(private, entry, hint, len);
4437 } else if (hint != 0) {
4438 struct vm_area_struct *vma;
4439
4440 /*
4441 * See if the hint is usable, if not we will use
4442 * it as the start point for searching.
4443 */
4444 addr = clamp_t(unsigned long, hint & ~(align - 1),
4445 start, (end - len) & ~(align - 1));
4446
4447 vma = find_vma(current->mm, addr);
4448
4449 if (vma == NULL || ((addr + len) <= vma->vm_start)) {
4450 result = _gpu_set_svm_region(private, entry, addr, len);
4451
4452 /* On failure drop down to keep searching */
4453 if (!IS_ERR_VALUE(result))
4454 return result;
4455 }
4456 } else {
4457 /* no hint, start search at the top and work down */
4458 addr = end & ~(align - 1);
4459 }
4460
4461 /*
4462 * Search downwards from the hint first. If that fails we
4463 * must try to search above it.
4464 */
4465 result = _search_range(private, entry, start, addr, len, align);
4466 if (IS_ERR_VALUE(result) && hint != 0)
4467 result = _search_range(private, entry, addr, end, len, align);
4468
4469 return result;
4470}
4471
4472static unsigned long
4473kgsl_get_unmapped_area(struct file *file, unsigned long addr,
4474 unsigned long len, unsigned long pgoff,
4475 unsigned long flags)
4476{
4477 unsigned long val;
4478 unsigned long vma_offset = pgoff << PAGE_SHIFT;
4479 struct kgsl_device_private *dev_priv = file->private_data;
4480 struct kgsl_process_private *private = dev_priv->process_priv;
4481 struct kgsl_device *device = dev_priv->device;
4482 struct kgsl_mem_entry *entry = NULL;
4483
4484 if (vma_offset == (unsigned long) device->memstore.gpuaddr)
4485 return get_unmapped_area(NULL, addr, len, pgoff, flags);
4486
4487 val = get_mmap_entry(private, &entry, pgoff, len);
4488 if (val)
4489 return val;
4490
4491 /* Do not allow CPU mappings for secure buffers */
4492 if (kgsl_memdesc_is_secured(&entry->memdesc)) {
4493 val = -EPERM;
4494 goto put;
4495 }
4496
4497 if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
4498 val = get_unmapped_area(NULL, addr, len, 0, flags);
4499 if (IS_ERR_VALUE(val))
Venkateswara Rao Tadikondad57f7e52017-08-29 11:02:38 +05304500 KGSL_DRV_ERR_RATELIMIT(device,
Shrenuj Bansala419c792016-10-20 14:05:11 -07004501 "get_unmapped_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
Archana Sriramd66ae7b2020-10-18 23:34:04 +05304502 pid_nr(private->pid), addr,
4503 pgoff, len, (int) val);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004504 } else {
4505 val = _get_svm_area(private, entry, addr, len, flags);
4506 if (IS_ERR_VALUE(val))
Venkateswara Rao Tadikondad57f7e52017-08-29 11:02:38 +05304507 KGSL_DRV_ERR_RATELIMIT(device,
Hareesh Gunduca522a12017-02-15 16:02:06 +05304508 "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n",
Archana Sriramd66ae7b2020-10-18 23:34:04 +05304509 pid_nr(private->pid),
4510 current->mm->mmap_base, addr,
Hareesh Gunduca522a12017-02-15 16:02:06 +05304511 pgoff, len, (int) val);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004512 }
4513
4514put:
4515 kgsl_mem_entry_put(entry);
4516 return val;
4517}
4518
4519static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
4520{
4521 unsigned int ret, cache;
4522 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
4523 struct kgsl_device_private *dev_priv = file->private_data;
4524 struct kgsl_process_private *private = dev_priv->process_priv;
4525 struct kgsl_mem_entry *entry = NULL;
4526 struct kgsl_device *device = dev_priv->device;
4527
4528 /* Handle leagacy behavior for memstore */
4529
4530 if (vma_offset == (unsigned long) device->memstore.gpuaddr)
4531 return kgsl_mmap_memstore(device, vma);
4532
4533 /*
4534 * The reference count on the entry that we get from
4535 * get_mmap_entry() will be held until kgsl_gpumem_vm_close().
4536 */
4537 ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
4538 vma->vm_end - vma->vm_start);
4539 if (ret)
4540 return ret;
4541
4542 vma->vm_flags |= entry->memdesc.ops->vmflags;
4543
4544 vma->vm_private_data = entry;
4545
4546 /* Determine user-side caching policy */
4547
4548 cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
4549
4550 switch (cache) {
4551 case KGSL_CACHEMODE_UNCACHED:
4552 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
4553 break;
4554 case KGSL_CACHEMODE_WRITETHROUGH:
4555 vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
4556 if (pgprot_val(vma->vm_page_prot) ==
4557 pgprot_val(pgprot_writebackcache(vma->vm_page_prot)))
4558 WARN_ONCE(1, "WRITETHROUGH is deprecated for arm64");
4559 break;
4560 case KGSL_CACHEMODE_WRITEBACK:
4561 vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
4562 break;
4563 case KGSL_CACHEMODE_WRITECOMBINE:
4564 default:
4565 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
4566 break;
4567 }
4568
4569 vma->vm_ops = &kgsl_gpumem_vm_ops;
4570
4571 if (cache == KGSL_CACHEMODE_WRITEBACK
4572 || cache == KGSL_CACHEMODE_WRITETHROUGH) {
4573 int i;
4574 unsigned long addr = vma->vm_start;
4575 struct kgsl_memdesc *m = &entry->memdesc;
4576
4577 for (i = 0; i < m->page_count; i++) {
4578 struct page *page = m->pages[i];
4579
4580 vm_insert_page(vma, addr, page);
4581 addr += PAGE_SIZE;
4582 }
4583 }
4584
4585 vma->vm_file = file;
4586
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304587 if (atomic_inc_return(&entry->map_count) == 1)
4588 entry->priv->gpumem_mapped += entry->memdesc.size;
Shrenuj Bansala419c792016-10-20 14:05:11 -07004589
Jordan Crouse6bce65c2020-12-28 16:06:42 +05304590 trace_kgsl_mem_mmap(entry, vma->vm_start);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004591 return 0;
4592}
4593
4594static irqreturn_t kgsl_irq_handler(int irq, void *data)
4595{
4596 struct kgsl_device *device = data;
4597
4598 return device->ftbl->irq_handler(device);
4599
4600}
4601
4602#define KGSL_READ_MESSAGE "OH HAI GPU\n"
4603
4604static ssize_t kgsl_read(struct file *filep, char __user *buf, size_t count,
4605 loff_t *pos)
4606{
4607 return simple_read_from_buffer(buf, count, pos,
4608 KGSL_READ_MESSAGE, strlen(KGSL_READ_MESSAGE) + 1);
4609}
4610
4611static const struct file_operations kgsl_fops = {
4612 .owner = THIS_MODULE,
4613 .release = kgsl_release,
4614 .open = kgsl_open,
4615 .mmap = kgsl_mmap,
4616 .read = kgsl_read,
4617 .get_unmapped_area = kgsl_get_unmapped_area,
4618 .unlocked_ioctl = kgsl_ioctl,
4619 .compat_ioctl = kgsl_compat_ioctl,
4620};
4621
4622struct kgsl_driver kgsl_driver = {
4623 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
4624 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
4625 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
4626 /*
4627 * Full cache flushes are faster than line by line on at least
4628 * 8064 and 8974 once the region to be flushed is > 16mb.
4629 */
4630 .full_cache_threshold = SZ_16M,
4631
4632 .stats.vmalloc = ATOMIC_LONG_INIT(0),
4633 .stats.vmalloc_max = ATOMIC_LONG_INIT(0),
4634 .stats.page_alloc = ATOMIC_LONG_INIT(0),
4635 .stats.page_alloc_max = ATOMIC_LONG_INIT(0),
4636 .stats.coherent = ATOMIC_LONG_INIT(0),
4637 .stats.coherent_max = ATOMIC_LONG_INIT(0),
4638 .stats.secure = ATOMIC_LONG_INIT(0),
4639 .stats.secure_max = ATOMIC_LONG_INIT(0),
4640 .stats.mapped = ATOMIC_LONG_INIT(0),
4641 .stats.mapped_max = ATOMIC_LONG_INIT(0),
4642};
4643EXPORT_SYMBOL(kgsl_driver);
4644
4645static void _unregister_device(struct kgsl_device *device)
4646{
4647 int minor;
4648
4649 mutex_lock(&kgsl_driver.devlock);
4650 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
4651 if (device == kgsl_driver.devp[minor])
4652 break;
4653 }
4654 if (minor != KGSL_DEVICE_MAX) {
4655 device_destroy(kgsl_driver.class,
4656 MKDEV(MAJOR(kgsl_driver.major), minor));
4657 kgsl_driver.devp[minor] = NULL;
4658 }
4659 mutex_unlock(&kgsl_driver.devlock);
4660}
4661
4662static int _register_device(struct kgsl_device *device)
4663{
4664 int minor, ret;
4665 dev_t dev;
4666
4667 /* Find a minor for the device */
4668
4669 mutex_lock(&kgsl_driver.devlock);
4670 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
4671 if (kgsl_driver.devp[minor] == NULL) {
4672 kgsl_driver.devp[minor] = device;
4673 break;
4674 }
4675 }
4676 mutex_unlock(&kgsl_driver.devlock);
4677
4678 if (minor == KGSL_DEVICE_MAX) {
4679 KGSL_CORE_ERR("minor devices exhausted\n");
4680 return -ENODEV;
4681 }
4682
4683 /* Create the device */
4684 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
4685 device->dev = device_create(kgsl_driver.class,
4686 &device->pdev->dev,
4687 dev, device,
4688 device->name);
4689
4690 if (IS_ERR(device->dev)) {
4691 mutex_lock(&kgsl_driver.devlock);
4692 kgsl_driver.devp[minor] = NULL;
4693 mutex_unlock(&kgsl_driver.devlock);
4694 ret = PTR_ERR(device->dev);
4695 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
4696 return ret;
4697 }
4698
4699 dev_set_drvdata(&device->pdev->dev, device);
4700 return 0;
4701}
4702
4703int kgsl_device_platform_probe(struct kgsl_device *device)
4704{
4705 int status = -EINVAL;
4706 struct resource *res;
4707 int cpu;
4708
4709 status = _register_device(device);
4710 if (status)
4711 return status;
4712
4713 /* Initialize logging first, so that failures below actually print. */
4714 kgsl_device_debugfs_init(device);
4715
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -06004716 /* Disable the sparse ioctl invocation as they are not used */
4717 device->flags &= ~KGSL_FLAG_SPARSE;
4718
Shrenuj Bansala419c792016-10-20 14:05:11 -07004719 status = kgsl_pwrctrl_init(device);
4720 if (status)
4721 goto error;
4722
Shrenuj Bansala419c792016-10-20 14:05:11 -07004723 /*
4724 * Check if a shadermemname is defined, and then get shader memory
4725 * details including shader memory starting physical address
4726 * and shader memory length
4727 */
4728 if (device->shadermemname != NULL) {
4729 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
4730 device->shadermemname);
4731
4732 if (res == NULL) {
4733 KGSL_DRV_WARN(device,
4734 "Shader memory: platform_get_resource_byname failed\n");
4735 }
4736
4737 else {
4738 device->shader_mem_phys = res->start;
4739 device->shader_mem_len = resource_size(res);
4740 }
4741
4742 if (!devm_request_mem_region(device->dev,
4743 device->shader_mem_phys,
4744 device->shader_mem_len,
4745 device->name)) {
4746 KGSL_DRV_WARN(device, "request_mem_region_failed\n");
4747 }
4748 }
4749
4750 if (!devm_request_mem_region(device->dev, device->reg_phys,
4751 device->reg_len, device->name)) {
4752 KGSL_DRV_ERR(device, "request_mem_region failed\n");
4753 status = -ENODEV;
4754 goto error_pwrctrl_close;
4755 }
4756
4757 device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
4758 device->reg_len);
4759
4760 if (device->reg_virt == NULL) {
4761 KGSL_DRV_ERR(device, "ioremap failed\n");
4762 status = -ENODEV;
4763 goto error_pwrctrl_close;
4764 }
4765 /*acquire interrupt */
4766 device->pwrctrl.interrupt_num =
4767 platform_get_irq_byname(device->pdev, device->pwrctrl.irq_name);
4768
4769 if (device->pwrctrl.interrupt_num <= 0) {
4770 KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
4771 device->pwrctrl.interrupt_num);
4772 status = -EINVAL;
4773 goto error_pwrctrl_close;
4774 }
4775
4776 status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
4777 kgsl_irq_handler, IRQF_TRIGGER_HIGH,
4778 device->name, device);
4779 if (status) {
4780 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
4781 device->pwrctrl.interrupt_num, status);
4782 goto error_pwrctrl_close;
4783 }
4784 disable_irq(device->pwrctrl.interrupt_num);
4785
4786 KGSL_DRV_INFO(device,
4787 "dev_id %d regs phys 0x%08lx size 0x%08x\n",
4788 device->id, device->reg_phys, device->reg_len);
4789
4790 rwlock_init(&device->context_lock);
Hareesh Gundu2eb74d72017-06-07 14:50:15 +05304791 spin_lock_init(&device->submit_lock);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004792
4793 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
4794
4795 status = kgsl_mmu_probe(device, kgsl_mmu_type);
4796 if (status != 0)
4797 goto error_pwrctrl_close;
4798
4799 /* Check to see if our device can perform DMA correctly */
4800 status = dma_set_coherent_mask(&device->pdev->dev, KGSL_DMA_BIT_MASK);
4801 if (status)
4802 goto error_close_mmu;
4803
4804 /* Initialize the memory pools */
4805 kgsl_init_page_pools(device->pdev);
4806
4807 status = kgsl_allocate_global(device, &device->memstore,
4808 KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
4809
4810 if (status != 0)
4811 goto error_close_mmu;
4812
Shrenuj Bansala419c792016-10-20 14:05:11 -07004813 /*
4814 * The default request type PM_QOS_REQ_ALL_CORES is
4815 * applicable to all CPU cores that are online and
4816 * would have a power impact when there are more
4817 * number of CPUs. PM_QOS_REQ_AFFINE_IRQ request
4818 * type shall update/apply the vote only to that CPU to
4819 * which IRQ's affinity is set to.
4820 */
4821#ifdef CONFIG_SMP
4822
4823 device->pwrctrl.pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
4824 device->pwrctrl.pm_qos_req_dma.irq = device->pwrctrl.interrupt_num;
4825
4826#endif
4827 pm_qos_add_request(&device->pwrctrl.pm_qos_req_dma,
4828 PM_QOS_CPU_DMA_LATENCY,
4829 PM_QOS_DEFAULT_VALUE);
4830
4831 if (device->pwrctrl.l2pc_cpus_mask) {
4832
4833 device->pwrctrl.l2pc_cpus_qos.type =
4834 PM_QOS_REQ_AFFINE_CORES;
4835 cpumask_empty(&device->pwrctrl.l2pc_cpus_qos.cpus_affine);
4836 for_each_possible_cpu(cpu) {
4837 if ((1 << cpu) & device->pwrctrl.l2pc_cpus_mask)
4838 cpumask_set_cpu(cpu, &device->pwrctrl.
4839 l2pc_cpus_qos.cpus_affine);
4840 }
4841
4842 pm_qos_add_request(&device->pwrctrl.l2pc_cpus_qos,
4843 PM_QOS_CPU_DMA_LATENCY,
4844 PM_QOS_DEFAULT_VALUE);
4845 }
4846
4847 device->events_wq = alloc_workqueue("kgsl-events",
4848 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4849
4850 /* Initialize the snapshot engine */
4851 kgsl_device_snapshot_init(device);
4852
4853 /* Initialize common sysfs entries */
4854 kgsl_pwrctrl_init_sysfs(device);
4855
4856 return 0;
4857
Shrenuj Bansala419c792016-10-20 14:05:11 -07004858error_close_mmu:
4859 kgsl_mmu_close(device);
4860error_pwrctrl_close:
4861 kgsl_pwrctrl_close(device);
4862error:
Lynus Vaz519dacfd2017-02-14 12:17:37 +05304863 kgsl_device_debugfs_close(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004864 _unregister_device(device);
4865 return status;
4866}
4867EXPORT_SYMBOL(kgsl_device_platform_probe);
4868
4869void kgsl_device_platform_remove(struct kgsl_device *device)
4870{
4871 destroy_workqueue(device->events_wq);
4872
4873 kgsl_device_snapshot_close(device);
4874
4875 kgsl_exit_page_pools();
4876
4877 kgsl_pwrctrl_uninit_sysfs(device);
4878
4879 pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma);
4880 if (device->pwrctrl.l2pc_cpus_mask)
4881 pm_qos_remove_request(&device->pwrctrl.l2pc_cpus_qos);
4882
4883 idr_destroy(&device->context_idr);
4884
Shrenuj Bansala419c792016-10-20 14:05:11 -07004885 kgsl_free_global(device, &device->memstore);
4886
4887 kgsl_mmu_close(device);
4888
4889 kgsl_pwrctrl_close(device);
4890
Lynus Vaz519dacfd2017-02-14 12:17:37 +05304891 kgsl_device_debugfs_close(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004892 _unregister_device(device);
4893}
4894EXPORT_SYMBOL(kgsl_device_platform_remove);
4895
4896static void kgsl_core_exit(void)
4897{
4898 kgsl_events_exit();
4899 kgsl_core_debugfs_close();
4900
4901 /*
4902 * We call kgsl_sharedmem_uninit_sysfs() and device_unregister()
4903 * only if kgsl_driver.virtdev has been populated.
4904 * We check at least one member of kgsl_driver.virtdev to
4905 * see if it is not NULL (and thus, has been populated).
4906 */
4907 if (kgsl_driver.virtdev.class) {
4908 kgsl_sharedmem_uninit_sysfs();
4909 device_unregister(&kgsl_driver.virtdev);
4910 }
4911
4912 if (kgsl_driver.class) {
4913 class_destroy(kgsl_driver.class);
4914 kgsl_driver.class = NULL;
4915 }
4916
Tarun Karra2b8b3632016-11-14 16:38:27 -08004917 kgsl_drawobjs_cache_exit();
Shrenuj Bansala419c792016-10-20 14:05:11 -07004918
4919 kgsl_memfree_exit();
4920 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
4921}
4922
4923static int __init kgsl_core_init(void)
4924{
4925 int result = 0;
Tim Murray85040432017-02-20 15:59:32 +05304926 struct sched_param param = { .sched_priority = 2 };
4927
Shrenuj Bansala419c792016-10-20 14:05:11 -07004928 /* alloc major and minor device numbers */
4929 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
4930 "kgsl");
4931
4932 if (result < 0) {
4933
4934 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
4935 goto err;
4936 }
4937
4938 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
4939 kgsl_driver.cdev.owner = THIS_MODULE;
4940 kgsl_driver.cdev.ops = &kgsl_fops;
4941 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
4942 KGSL_DEVICE_MAX);
4943
4944 if (result) {
4945 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d, result= %d\n",
4946 kgsl_driver.major, result);
4947 goto err;
4948 }
4949
4950 kgsl_driver.class = class_create(THIS_MODULE, "kgsl");
4951
4952 if (IS_ERR(kgsl_driver.class)) {
4953 result = PTR_ERR(kgsl_driver.class);
4954 KGSL_CORE_ERR("failed to create class for kgsl");
4955 goto err;
4956 }
4957
4958 /*
4959 * Make a virtual device for managing core related things
4960 * in sysfs
4961 */
4962 kgsl_driver.virtdev.class = kgsl_driver.class;
4963 dev_set_name(&kgsl_driver.virtdev, "kgsl");
4964 result = device_register(&kgsl_driver.virtdev);
4965 if (result) {
4966 KGSL_CORE_ERR("driver_register failed\n");
4967 goto err;
4968 }
4969
4970 /* Make kobjects in the virtual device for storing statistics */
4971
4972 kgsl_driver.ptkobj =
4973 kobject_create_and_add("pagetables",
4974 &kgsl_driver.virtdev.kobj);
4975
4976 kgsl_driver.prockobj =
4977 kobject_create_and_add("proc",
4978 &kgsl_driver.virtdev.kobj);
4979
4980 kgsl_core_debugfs_init();
4981
4982 kgsl_sharedmem_init_sysfs();
4983
4984 INIT_LIST_HEAD(&kgsl_driver.process_list);
4985
4986 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
4987
4988 kgsl_driver.workqueue = alloc_workqueue("kgsl-workqueue",
4989 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4990
4991 kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
Hareesh Gundu615439d2017-06-16 17:06:57 +05304992 WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004993
Tim Murray85040432017-02-20 15:59:32 +05304994 kthread_init_worker(&kgsl_driver.worker);
4995
4996 kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
4997 &kgsl_driver.worker, "kgsl_worker_thread");
4998
4999 if (IS_ERR(kgsl_driver.worker_thread)) {
5000 pr_err("unable to start kgsl thread\n");
5001 goto err;
5002 }
5003
5004 sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, &param);
5005
Shrenuj Bansala419c792016-10-20 14:05:11 -07005006 kgsl_events_init();
5007
Tarun Karra2b8b3632016-11-14 16:38:27 -08005008 result = kgsl_drawobjs_cache_init();
Shrenuj Bansala419c792016-10-20 14:05:11 -07005009 if (result)
5010 goto err;
5011
5012 kgsl_memfree_init();
5013
5014 return 0;
5015
5016err:
5017 kgsl_core_exit();
5018 return result;
5019}
5020
5021module_init(kgsl_core_init);
5022module_exit(kgsl_core_exit);
5023
5024MODULE_DESCRIPTION("MSM GPU driver");
5025MODULE_LICENSE("GPL");