blob: 364e32fb7bfa3a2e228d445feb21d0b62ecaec63 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/module.h>
14#include <linux/fb.h>
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/fdtable.h>
18#include <linux/list.h>
19#include <linux/debugfs.h>
20#include <linux/uaccess.h>
21#include <linux/interrupt.h>
22#include <linux/workqueue.h>
23#include <linux/dma-buf.h>
24#include <linux/pm_runtime.h>
25#include <linux/rbtree.h>
26#include <linux/major.h>
27#include <linux/io.h>
28#include <linux/mman.h>
29#include <linux/sort.h>
30#include <linux/security.h>
31#include <linux/compat.h>
32#include <linux/ctype.h>
33#include <linux/mm.h>
34#include <asm/cacheflush.h>
35
36#include "kgsl.h"
37#include "kgsl_debugfs.h"
38#include "kgsl_log.h"
39#include "kgsl_sharedmem.h"
40#include "kgsl_drawobj.h"
41#include "kgsl_device.h"
42#include "kgsl_trace.h"
43#include "kgsl_sync.h"
44#include "kgsl_compat.h"
45#include "kgsl_pool.h"
46
47#undef MODULE_PARAM_PREFIX
48#define MODULE_PARAM_PREFIX "kgsl."
49
50#ifndef arch_mmap_check
51#define arch_mmap_check(addr, len, flags) (0)
52#endif
53
54#ifndef pgprot_writebackcache
55#define pgprot_writebackcache(_prot) (_prot)
56#endif
57
58#ifndef pgprot_writethroughcache
59#define pgprot_writethroughcache(_prot) (_prot)
60#endif
61
62#ifdef CONFIG_ARM_LPAE
63#define KGSL_DMA_BIT_MASK DMA_BIT_MASK(64)
64#else
65#define KGSL_DMA_BIT_MASK DMA_BIT_MASK(32)
66#endif
67
68static char *kgsl_mmu_type;
69module_param_named(mmutype, kgsl_mmu_type, charp, 0000);
70MODULE_PARM_DESC(kgsl_mmu_type, "Type of MMU to be used for graphics");
71
72/* Mutex used for the IOMMU sync quirk */
73DEFINE_MUTEX(kgsl_mmu_sync);
74EXPORT_SYMBOL(kgsl_mmu_sync);
75
76struct kgsl_dma_buf_meta {
77 struct dma_buf_attachment *attach;
78 struct dma_buf *dmabuf;
79 struct sg_table *table;
80};
81
82static inline struct kgsl_pagetable *_get_memdesc_pagetable(
83 struct kgsl_pagetable *pt, struct kgsl_mem_entry *entry)
84{
85 /* if a secured buffer, map it to secure global pagetable */
86 if (kgsl_memdesc_is_secured(&entry->memdesc))
87 return pt->mmu->securepagetable;
88
89 return pt;
90}
91
92static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
93
94static const struct file_operations kgsl_fops;
95
96/*
97 * The memfree list contains the last N blocks of memory that have been freed.
98 * On a GPU fault we walk the list to see if the faulting address had been
99 * recently freed and print out a message to that effect
100 */
101
102#define MEMFREE_ENTRIES 512
103
104static DEFINE_SPINLOCK(memfree_lock);
105
106struct memfree_entry {
107 pid_t ptname;
108 uint64_t gpuaddr;
109 uint64_t size;
110 pid_t pid;
111 uint64_t flags;
112};
113
114static struct {
115 struct memfree_entry *list;
116 int head;
117 int tail;
118} memfree;
119
120static int kgsl_memfree_init(void)
121{
122 memfree.list = kcalloc(MEMFREE_ENTRIES, sizeof(struct memfree_entry),
123 GFP_KERNEL);
124
125 return (memfree.list) ? 0 : -ENOMEM;
126}
127
128static void kgsl_memfree_exit(void)
129{
130 kfree(memfree.list);
131 memset(&memfree, 0, sizeof(memfree));
132}
133
134static inline bool match_memfree_addr(struct memfree_entry *entry,
135 pid_t ptname, uint64_t gpuaddr)
136{
137 return ((entry->ptname == ptname) &&
138 (entry->size > 0) &&
139 (gpuaddr >= entry->gpuaddr &&
140 gpuaddr < (entry->gpuaddr + entry->size)));
141}
142int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
143 uint64_t *size, uint64_t *flags, pid_t *pid)
144{
145 int ptr;
146
147 if (memfree.list == NULL)
148 return 0;
149
150 spin_lock(&memfree_lock);
151
152 ptr = memfree.head - 1;
153 if (ptr < 0)
154 ptr = MEMFREE_ENTRIES - 1;
155
156 /* Walk backwards through the list looking for the last match */
157 while (ptr != memfree.tail) {
158 struct memfree_entry *entry = &memfree.list[ptr];
159
160 if (match_memfree_addr(entry, ptname, *gpuaddr)) {
161 *gpuaddr = entry->gpuaddr;
162 *flags = entry->flags;
163 *size = entry->size;
164 *pid = entry->pid;
165
166 spin_unlock(&memfree_lock);
167 return 1;
168 }
169
170 ptr = ptr - 1;
171
172 if (ptr < 0)
173 ptr = MEMFREE_ENTRIES - 1;
174 }
175
176 spin_unlock(&memfree_lock);
177 return 0;
178}
179
180static void kgsl_memfree_purge(struct kgsl_pagetable *pagetable,
181 uint64_t gpuaddr, uint64_t size)
182{
183 pid_t ptname = pagetable ? pagetable->name : 0;
184 int i;
185
186 if (memfree.list == NULL)
187 return;
188
189 spin_lock(&memfree_lock);
190
191 for (i = 0; i < MEMFREE_ENTRIES; i++) {
192 struct memfree_entry *entry = &memfree.list[i];
193
194 if (entry->ptname != ptname || entry->size == 0)
195 continue;
196
197 if (gpuaddr > entry->gpuaddr &&
198 gpuaddr < entry->gpuaddr + entry->size) {
199 /* truncate the end of the entry */
200 entry->size = gpuaddr - entry->gpuaddr;
201 } else if (gpuaddr <= entry->gpuaddr) {
202 if (gpuaddr + size > entry->gpuaddr &&
203 gpuaddr + size < entry->gpuaddr + entry->size)
204 /* Truncate the beginning of the entry */
205 entry->gpuaddr = gpuaddr + size;
206 else if (gpuaddr + size >= entry->gpuaddr + entry->size)
207 /* Remove the entire entry */
208 entry->size = 0;
209 }
210 }
211 spin_unlock(&memfree_lock);
212}
213
214static void kgsl_memfree_add(pid_t pid, pid_t ptname, uint64_t gpuaddr,
215 uint64_t size, uint64_t flags)
216
217{
218 struct memfree_entry *entry;
219
220 if (memfree.list == NULL)
221 return;
222
223 spin_lock(&memfree_lock);
224
225 entry = &memfree.list[memfree.head];
226
227 entry->pid = pid;
228 entry->ptname = ptname;
229 entry->gpuaddr = gpuaddr;
230 entry->size = size;
231 entry->flags = flags;
232
233 memfree.head = (memfree.head + 1) % MEMFREE_ENTRIES;
234
235 if (memfree.head == memfree.tail)
236 memfree.tail = (memfree.tail + 1) % MEMFREE_ENTRIES;
237
238 spin_unlock(&memfree_lock);
239}
240
241int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
242 enum kgsl_timestamp_type type, unsigned int *timestamp)
243{
244 return device->ftbl->readtimestamp(device, priv, type, timestamp);
245}
246EXPORT_SYMBOL(kgsl_readtimestamp);
247
248static long gpumem_free_entry(struct kgsl_mem_entry *entry);
249
250/* Scheduled by kgsl_mem_entry_put_deferred() */
251static void _deferred_put(struct work_struct *work)
252{
253 struct kgsl_mem_entry *entry =
254 container_of(work, struct kgsl_mem_entry, work);
255
256 kgsl_mem_entry_put(entry);
257}
258
259static inline struct kgsl_mem_entry *
260kgsl_mem_entry_create(void)
261{
262 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
263
Tarun Karra24d3fe12017-04-05 15:23:03 -0700264 if (entry != NULL) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700265 kref_init(&entry->refcount);
Tarun Karra24d3fe12017-04-05 15:23:03 -0700266 /* put this ref in userspace memory alloc and map ioctls */
267 kref_get(&entry->refcount);
268 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700269
270 return entry;
271}
272#ifdef CONFIG_DMA_SHARED_BUFFER
273static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
274{
275 if (meta != NULL) {
276 dma_buf_unmap_attachment(meta->attach, meta->table,
277 DMA_FROM_DEVICE);
278 dma_buf_detach(meta->dmabuf, meta->attach);
279 dma_buf_put(meta->dmabuf);
280 kfree(meta);
281 }
282}
283#else
284static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
285{
286
287}
288#endif
289
290void
291kgsl_mem_entry_destroy(struct kref *kref)
292{
293 struct kgsl_mem_entry *entry = container_of(kref,
294 struct kgsl_mem_entry,
295 refcount);
296 unsigned int memtype;
297
298 if (entry == NULL)
299 return;
300
301 /* pull out the memtype before the flags get cleared */
302 memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
303
304 /* Detach from process list */
305 kgsl_mem_entry_detach_process(entry);
306
307 if (memtype != KGSL_MEM_ENTRY_KERNEL)
308 atomic_long_sub(entry->memdesc.size,
309 &kgsl_driver.stats.mapped);
310
311 /*
312 * Ion takes care of freeing the sg_table for us so
313 * clear the sg table before freeing the sharedmem
314 * so kgsl_sharedmem_free doesn't try to free it again
315 */
316 if (memtype == KGSL_MEM_ENTRY_ION)
317 entry->memdesc.sgt = NULL;
318
319 if ((memtype == KGSL_MEM_ENTRY_USER)
320 && !(entry->memdesc.flags & KGSL_MEMFLAGS_GPUREADONLY)) {
321 int i = 0, j;
322 struct scatterlist *sg;
323 struct page *page;
324 /*
325 * Mark all of pages in the scatterlist as dirty since they
326 * were writable by the GPU.
327 */
328 for_each_sg(entry->memdesc.sgt->sgl, sg,
329 entry->memdesc.sgt->nents, i) {
330 page = sg_page(sg);
331 for (j = 0; j < (sg->length >> PAGE_SHIFT); j++)
332 set_page_dirty(nth_page(page, j));
333 }
334 }
335
336 kgsl_sharedmem_free(&entry->memdesc);
337
338 switch (memtype) {
339 case KGSL_MEM_ENTRY_ION:
340 kgsl_destroy_ion(entry->priv_data);
341 break;
342 default:
343 break;
344 }
345
346 kfree(entry);
347}
348EXPORT_SYMBOL(kgsl_mem_entry_destroy);
349
350/* Allocate a IOVA for memory objects that don't use SVM */
351static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
352 struct kgsl_process_private *process,
353 struct kgsl_mem_entry *entry)
354{
355 struct kgsl_pagetable *pagetable;
356
357 /*
358 * If SVM is enabled for this object then the address needs to be
359 * assigned elsewhere
360 * Also do not proceed further in case of NoMMU.
361 */
362 if (kgsl_memdesc_use_cpu_map(&entry->memdesc) ||
363 (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
364 return 0;
365
366 pagetable = kgsl_memdesc_is_secured(&entry->memdesc) ?
367 device->mmu.securepagetable : process->pagetable;
368
369 return kgsl_mmu_get_gpuaddr(pagetable, &entry->memdesc);
370}
371
372/* Commit the entry to the process so it can be accessed by other operations */
373static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
374{
375 if (!entry)
376 return;
377
378 spin_lock(&entry->priv->mem_lock);
379 idr_replace(&entry->priv->mem_idr, entry, entry->id);
380 spin_unlock(&entry->priv->mem_lock);
381}
382
383/*
384 * Attach the memory object to a process by (possibly) getting a GPU address and
385 * (possibly) mapping it
386 */
387static int kgsl_mem_entry_attach_process(struct kgsl_device *device,
388 struct kgsl_process_private *process,
389 struct kgsl_mem_entry *entry)
390{
391 int id, ret;
392
393 ret = kgsl_process_private_get(process);
394 if (!ret)
395 return -EBADF;
396
397 ret = kgsl_mem_entry_track_gpuaddr(device, process, entry);
398 if (ret) {
399 kgsl_process_private_put(process);
400 return ret;
401 }
402
403 idr_preload(GFP_KERNEL);
404 spin_lock(&process->mem_lock);
405 /* Allocate the ID but don't attach the pointer just yet */
406 id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
407 spin_unlock(&process->mem_lock);
408 idr_preload_end();
409
410 if (id < 0) {
411 if (!kgsl_memdesc_use_cpu_map(&entry->memdesc))
412 kgsl_mmu_put_gpuaddr(&entry->memdesc);
413 kgsl_process_private_put(process);
414 return id;
415 }
416
417 entry->id = id;
418 entry->priv = process;
419
420 /*
421 * Map the memory if a GPU address is already assigned, either through
422 * kgsl_mem_entry_track_gpuaddr() or via some other SVM process
423 */
424 if (entry->memdesc.gpuaddr) {
425 if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT)
426 ret = kgsl_mmu_sparse_dummy_map(
427 entry->memdesc.pagetable,
428 &entry->memdesc, 0,
429 entry->memdesc.size);
430 else if (entry->memdesc.gpuaddr)
431 ret = kgsl_mmu_map(entry->memdesc.pagetable,
432 &entry->memdesc);
433
434 if (ret)
435 kgsl_mem_entry_detach_process(entry);
436 }
437
438 kgsl_memfree_purge(entry->memdesc.pagetable, entry->memdesc.gpuaddr,
439 entry->memdesc.size);
440
441 return ret;
442}
443
444/* Detach a memory entry from a process and unmap it from the MMU */
445static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
446{
447 unsigned int type;
448
449 if (entry == NULL)
450 return;
451
452 /*
453 * First remove the entry from mem_idr list
454 * so that no one can operate on obsolete values
455 */
456 spin_lock(&entry->priv->mem_lock);
457 if (entry->id != 0)
458 idr_remove(&entry->priv->mem_idr, entry->id);
459 entry->id = 0;
460
461 type = kgsl_memdesc_usermem_type(&entry->memdesc);
462 entry->priv->stats[type].cur -= entry->memdesc.size;
463 spin_unlock(&entry->priv->mem_lock);
464
465 kgsl_mmu_put_gpuaddr(&entry->memdesc);
466
467 kgsl_process_private_put(entry->priv);
468
469 entry->priv = NULL;
470}
471
472/**
473 * kgsl_context_dump() - dump information about a draw context
474 * @device: KGSL device that owns the context
475 * @context: KGSL context to dump information about
476 *
477 * Dump specific information about the context to the kernel log. Used for
478 * fence timeout callbacks
479 */
480void kgsl_context_dump(struct kgsl_context *context)
481{
482 struct kgsl_device *device;
483
484 if (_kgsl_context_get(context) == 0)
485 return;
486
487 device = context->device;
488
489 if (kgsl_context_detached(context)) {
490 dev_err(device->dev, " context[%d]: context detached\n",
491 context->id);
492 } else if (device->ftbl->drawctxt_dump != NULL)
493 device->ftbl->drawctxt_dump(device, context);
494
495 kgsl_context_put(context);
496}
497EXPORT_SYMBOL(kgsl_context_dump);
498
499/* Allocate a new context ID */
500static int _kgsl_get_context_id(struct kgsl_device *device)
501{
502 int id;
503
504 idr_preload(GFP_KERNEL);
505 write_lock(&device->context_lock);
506 /* Allocate the slot but don't put a pointer in it yet */
507 id = idr_alloc(&device->context_idr, NULL, 1,
508 KGSL_MEMSTORE_MAX, GFP_NOWAIT);
509 write_unlock(&device->context_lock);
510 idr_preload_end();
511
512 return id;
513}
514
515/**
516 * kgsl_context_init() - helper to initialize kgsl_context members
517 * @dev_priv: the owner of the context
518 * @context: the newly created context struct, should be allocated by
519 * the device specific drawctxt_create function.
520 *
521 * This is a helper function for the device specific drawctxt_create
522 * function to initialize the common members of its context struct.
523 * If this function succeeds, reference counting is active in the context
524 * struct and the caller should kgsl_context_put() it on error.
525 * If it fails, the caller should just free the context structure
526 * it passed in.
527 */
528int kgsl_context_init(struct kgsl_device_private *dev_priv,
529 struct kgsl_context *context)
530{
531 struct kgsl_device *device = dev_priv->device;
532 char name[64];
533 int ret = 0, id;
534
535 id = _kgsl_get_context_id(device);
536 if (id == -ENOSPC) {
537 /*
538 * Before declaring that there are no contexts left try
539 * flushing the event workqueue just in case there are
540 * detached contexts waiting to finish
541 */
542
543 flush_workqueue(device->events_wq);
544 id = _kgsl_get_context_id(device);
545 }
546
547 if (id < 0) {
548 if (id == -ENOSPC)
549 KGSL_DRV_INFO(device,
550 "cannot have more than %zu contexts due to memstore limitation\n",
551 KGSL_MEMSTORE_MAX);
552
553 return id;
554 }
555
556 context->id = id;
557
558 kref_init(&context->refcount);
559 /*
560 * Get a refernce to the process private so its not destroyed, until
561 * the context is destroyed. This will also prevent the pagetable
562 * from being destroyed
563 */
564 if (!kgsl_process_private_get(dev_priv->process_priv)) {
565 ret = -EBADF;
566 goto out;
567 }
568 context->device = dev_priv->device;
569 context->dev_priv = dev_priv;
570 context->proc_priv = dev_priv->process_priv;
571 context->tid = task_pid_nr(current);
572
573 ret = kgsl_sync_timeline_create(context);
574 if (ret)
575 goto out;
576
577 snprintf(name, sizeof(name), "context-%d", id);
578 kgsl_add_event_group(&context->events, context, name,
579 kgsl_readtimestamp, context);
580
581out:
582 if (ret) {
583 write_lock(&device->context_lock);
584 idr_remove(&dev_priv->device->context_idr, id);
585 write_unlock(&device->context_lock);
586 }
587
588 return ret;
589}
590EXPORT_SYMBOL(kgsl_context_init);
591
592/**
593 * kgsl_context_detach() - Release the "master" context reference
594 * @context: The context that will be detached
595 *
596 * This is called when a context becomes unusable, because userspace
597 * has requested for it to be destroyed. The context itself may
598 * exist a bit longer until its reference count goes to zero.
599 * Other code referencing the context can detect that it has been
600 * detached by checking the KGSL_CONTEXT_PRIV_DETACHED bit in
601 * context->priv.
602 */
603static void kgsl_context_detach(struct kgsl_context *context)
604{
605 struct kgsl_device *device;
606
607 if (context == NULL)
608 return;
609
610 /*
611 * Mark the context as detached to keep others from using
612 * the context before it gets fully removed, and to make sure
613 * we don't try to detach twice.
614 */
615 if (test_and_set_bit(KGSL_CONTEXT_PRIV_DETACHED, &context->priv))
616 return;
617
618 device = context->device;
619
620 trace_kgsl_context_detach(device, context);
621
622 context->device->ftbl->drawctxt_detach(context);
623
624 /*
625 * Cancel all pending events after the device-specific context is
626 * detached, to avoid possibly freeing memory while it is still
627 * in use by the GPU.
628 */
629 kgsl_cancel_events(device, &context->events);
630
631 /* Remove the event group from the list */
632 kgsl_del_event_group(&context->events);
633
Lynus Vazc031a9b2017-01-25 13:00:13 +0530634 kgsl_sync_timeline_put(context->ktimeline);
635
Shrenuj Bansala419c792016-10-20 14:05:11 -0700636 kgsl_context_put(context);
637}
638
639void
640kgsl_context_destroy(struct kref *kref)
641{
642 struct kgsl_context *context = container_of(kref, struct kgsl_context,
643 refcount);
644 struct kgsl_device *device = context->device;
645
646 trace_kgsl_context_destroy(device, context);
647
648 /*
649 * It's not safe to destroy the context if it's not detached as GPU
650 * may still be executing commands
651 */
652 BUG_ON(!kgsl_context_detached(context));
653
654 write_lock(&device->context_lock);
655 if (context->id != KGSL_CONTEXT_INVALID) {
656
657 /* Clear the timestamps in the memstore during destroy */
658 kgsl_sharedmem_writel(device, &device->memstore,
659 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), 0);
660 kgsl_sharedmem_writel(device, &device->memstore,
661 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), 0);
662
663 /* clear device power constraint */
664 if (context->id == device->pwrctrl.constraint.owner_id) {
665 trace_kgsl_constraint(device,
666 device->pwrctrl.constraint.type,
667 device->pwrctrl.active_pwrlevel,
668 0);
669 device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
670 }
671
672 idr_remove(&device->context_idr, context->id);
673 context->id = KGSL_CONTEXT_INVALID;
674 }
675 write_unlock(&device->context_lock);
676 kgsl_sync_timeline_destroy(context);
677 kgsl_process_private_put(context->proc_priv);
678
679 device->ftbl->drawctxt_destroy(context);
680}
681
682struct kgsl_device *kgsl_get_device(int dev_idx)
683{
684 int i;
685 struct kgsl_device *ret = NULL;
686
687 mutex_lock(&kgsl_driver.devlock);
688
689 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
690 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
691 ret = kgsl_driver.devp[i];
692 break;
693 }
694 }
695
696 mutex_unlock(&kgsl_driver.devlock);
697 return ret;
698}
699EXPORT_SYMBOL(kgsl_get_device);
700
701static struct kgsl_device *kgsl_get_minor(int minor)
702{
703 struct kgsl_device *ret = NULL;
704
705 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
706 return NULL;
707
708 mutex_lock(&kgsl_driver.devlock);
709 ret = kgsl_driver.devp[minor];
710 mutex_unlock(&kgsl_driver.devlock);
711
712 return ret;
713}
714
715/**
716 * kgsl_check_timestamp() - return true if the specified timestamp is retired
717 * @device: Pointer to the KGSL device to check
718 * @context: Pointer to the context for the timestamp
719 * @timestamp: The timestamp to compare
720 */
721int kgsl_check_timestamp(struct kgsl_device *device,
722 struct kgsl_context *context, unsigned int timestamp)
723{
724 unsigned int ts_processed;
725
726 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
727 &ts_processed);
728
729 return (timestamp_cmp(ts_processed, timestamp) >= 0);
730}
731EXPORT_SYMBOL(kgsl_check_timestamp);
732
733static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
734{
735 int status = -EINVAL;
736
737 if (!device)
738 return -EINVAL;
739
740 KGSL_PWR_WARN(device, "suspend start\n");
741
742 mutex_lock(&device->mutex);
743 status = kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
744 mutex_unlock(&device->mutex);
745
746 KGSL_PWR_WARN(device, "suspend end\n");
747 return status;
748}
749
750static int kgsl_resume_device(struct kgsl_device *device)
751{
752 if (!device)
753 return -EINVAL;
754
755 KGSL_PWR_WARN(device, "resume start\n");
756 mutex_lock(&device->mutex);
757 if (device->state == KGSL_STATE_SUSPEND) {
758 kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
759 } else if (device->state != KGSL_STATE_INIT) {
760 /*
761 * This is an error situation,so wait for the device
762 * to idle and then put the device to SLUMBER state.
763 * This will put the device to the right state when
764 * we resume.
765 */
766 if (device->state == KGSL_STATE_ACTIVE)
767 device->ftbl->idle(device);
768 kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
769 KGSL_PWR_ERR(device,
770 "resume invoked without a suspend\n");
771 }
772
773 mutex_unlock(&device->mutex);
774 KGSL_PWR_WARN(device, "resume end\n");
775 return 0;
776}
777
778static int kgsl_suspend(struct device *dev)
779{
780
781 pm_message_t arg = {0};
782 struct kgsl_device *device = dev_get_drvdata(dev);
783
784 return kgsl_suspend_device(device, arg);
785}
786
787static int kgsl_resume(struct device *dev)
788{
789 struct kgsl_device *device = dev_get_drvdata(dev);
790
791 return kgsl_resume_device(device);
792}
793
794static int kgsl_runtime_suspend(struct device *dev)
795{
796 return 0;
797}
798
799static int kgsl_runtime_resume(struct device *dev)
800{
801 return 0;
802}
803
804const struct dev_pm_ops kgsl_pm_ops = {
805 .suspend = kgsl_suspend,
806 .resume = kgsl_resume,
807 .runtime_suspend = kgsl_runtime_suspend,
808 .runtime_resume = kgsl_runtime_resume,
809};
810EXPORT_SYMBOL(kgsl_pm_ops);
811
812int kgsl_suspend_driver(struct platform_device *pdev,
813 pm_message_t state)
814{
815 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
816
817 return kgsl_suspend_device(device, state);
818}
819EXPORT_SYMBOL(kgsl_suspend_driver);
820
821int kgsl_resume_driver(struct platform_device *pdev)
822{
823 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
824
825 return kgsl_resume_device(device);
826}
827EXPORT_SYMBOL(kgsl_resume_driver);
828
829/**
830 * kgsl_destroy_process_private() - Cleanup function to free process private
831 * @kref: - Pointer to object being destroyed's kref struct
832 * Free struct object and all other resources attached to it.
833 * Since the function can be used when not all resources inside process
834 * private have been allocated, there is a check to (before each resource
835 * cleanup) see if the struct member being cleaned is in fact allocated or not.
836 * If the value is not NULL, resource is freed.
837 */
838static void kgsl_destroy_process_private(struct kref *kref)
839{
840 struct kgsl_process_private *private = container_of(kref,
841 struct kgsl_process_private, refcount);
842
843 idr_destroy(&private->mem_idr);
844 idr_destroy(&private->syncsource_idr);
845
846 /* When using global pagetables, do not detach global pagetable */
847 if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
848 kgsl_mmu_putpagetable(private->pagetable);
849
850 kfree(private);
851}
852
853void
854kgsl_process_private_put(struct kgsl_process_private *private)
855{
856 if (private)
857 kref_put(&private->refcount, kgsl_destroy_process_private);
858}
859
860/**
861 * kgsl_process_private_find() - Find the process associated with the specified
862 * name
863 * @name: pid_t of the process to search for
864 * Return the process struct for the given ID.
865 */
866struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
867{
868 struct kgsl_process_private *p, *private = NULL;
869
870 mutex_lock(&kgsl_driver.process_mutex);
871 list_for_each_entry(p, &kgsl_driver.process_list, list) {
872 if (p->pid == pid) {
873 if (kgsl_process_private_get(p))
874 private = p;
875 break;
876 }
877 }
878 mutex_unlock(&kgsl_driver.process_mutex);
879 return private;
880}
881
882static struct kgsl_process_private *kgsl_process_private_new(
883 struct kgsl_device *device)
884{
885 struct kgsl_process_private *private;
886 pid_t tgid = task_tgid_nr(current);
887
888 /* Search in the process list */
889 list_for_each_entry(private, &kgsl_driver.process_list, list) {
890 if (private->pid == tgid) {
891 if (!kgsl_process_private_get(private))
892 private = ERR_PTR(-EINVAL);
893 return private;
894 }
895 }
896
897 /* Create a new object */
898 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
899 if (private == NULL)
900 return ERR_PTR(-ENOMEM);
901
902 kref_init(&private->refcount);
903
904 private->pid = tgid;
905 get_task_comm(private->comm, current->group_leader);
906
907 spin_lock_init(&private->mem_lock);
908 spin_lock_init(&private->syncsource_lock);
909
910 idr_init(&private->mem_idr);
911 idr_init(&private->syncsource_idr);
912
913 /* Allocate a pagetable for the new process object */
914 private->pagetable = kgsl_mmu_getpagetable(&device->mmu, tgid);
915 if (IS_ERR(private->pagetable)) {
916 int err = PTR_ERR(private->pagetable);
917
918 idr_destroy(&private->mem_idr);
919 idr_destroy(&private->syncsource_idr);
920
921 kfree(private);
922 private = ERR_PTR(err);
923 }
924
925 return private;
926}
927
928static void process_release_memory(struct kgsl_process_private *private)
929{
930 struct kgsl_mem_entry *entry;
931 int next = 0;
932
933 while (1) {
934 spin_lock(&private->mem_lock);
935 entry = idr_get_next(&private->mem_idr, &next);
936 if (entry == NULL) {
937 spin_unlock(&private->mem_lock);
938 break;
939 }
940 /*
941 * If the free pending flag is not set it means that user space
942 * did not free it's reference to this entry, in that case
943 * free a reference to this entry, other references are from
944 * within kgsl so they will be freed eventually by kgsl
945 */
946 if (!entry->pending_free) {
947 entry->pending_free = 1;
948 spin_unlock(&private->mem_lock);
949 kgsl_mem_entry_put(entry);
950 } else {
951 spin_unlock(&private->mem_lock);
952 }
953 next = next + 1;
954 }
955}
956
957static void process_release_sync_sources(struct kgsl_process_private *private)
958{
959 struct kgsl_syncsource *syncsource;
960 int next = 0;
961
962 while (1) {
963 spin_lock(&private->syncsource_lock);
964 syncsource = idr_get_next(&private->syncsource_idr, &next);
965 spin_unlock(&private->syncsource_lock);
966
967 if (syncsource == NULL)
968 break;
969
Lynus Vazc031a9b2017-01-25 13:00:13 +0530970 kgsl_syncsource_cleanup(private, syncsource);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700971 next = next + 1;
972 }
973}
974
975static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
976 struct kgsl_process_private *private)
977{
978 mutex_lock(&kgsl_driver.process_mutex);
979
980 if (--private->fd_count > 0) {
981 mutex_unlock(&kgsl_driver.process_mutex);
982 kgsl_process_private_put(private);
983 return;
984 }
985
986 /*
987 * If this is the last file on the process take down the debug
988 * directories and garbage collect any outstanding resources
989 */
990
991 kgsl_process_uninit_sysfs(private);
992 debugfs_remove_recursive(private->debug_root);
993
994 process_release_sync_sources(private);
995
996 /* When using global pagetables, do not detach global pagetable */
997 if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
998 kgsl_mmu_detach_pagetable(private->pagetable);
999
1000 /* Remove the process struct from the master list */
1001 list_del(&private->list);
1002
1003 /*
1004 * Unlock the mutex before releasing the memory - this prevents a
1005 * deadlock with the IOMMU mutex if a page fault occurs
1006 */
1007 mutex_unlock(&kgsl_driver.process_mutex);
1008
1009 process_release_memory(private);
1010
1011 kgsl_process_private_put(private);
1012}
1013
1014
1015static struct kgsl_process_private *kgsl_process_private_open(
1016 struct kgsl_device *device)
1017{
1018 struct kgsl_process_private *private;
1019
1020 mutex_lock(&kgsl_driver.process_mutex);
1021 private = kgsl_process_private_new(device);
1022
1023 if (IS_ERR(private))
1024 goto done;
1025
1026 /*
1027 * If this is a new process create the debug directories and add it to
1028 * the process list
1029 */
1030
1031 if (private->fd_count++ == 0) {
1032 kgsl_process_init_sysfs(device, private);
1033 kgsl_process_init_debugfs(private);
1034
1035 list_add(&private->list, &kgsl_driver.process_list);
1036 }
1037
1038done:
1039 mutex_unlock(&kgsl_driver.process_mutex);
1040 return private;
1041}
1042
1043static int kgsl_close_device(struct kgsl_device *device)
1044{
1045 int result = 0;
1046
1047 mutex_lock(&device->mutex);
Kyle Piefer89d64fe2017-05-15 09:15:43 -07001048 device->open_count--;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001049 if (device->open_count == 0) {
1050
1051 /* Wait for the active count to go to 0 */
1052 kgsl_active_count_wait(device, 0);
1053
1054 /* Fail if the wait times out */
1055 BUG_ON(atomic_read(&device->active_cnt) > 0);
1056
1057 result = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1058 }
1059 mutex_unlock(&device->mutex);
1060 return result;
1061
1062}
1063
1064static void device_release_contexts(struct kgsl_device_private *dev_priv)
1065{
1066 struct kgsl_device *device = dev_priv->device;
1067 struct kgsl_context *context;
1068 int next = 0;
1069 int result = 0;
1070
1071 while (1) {
1072 read_lock(&device->context_lock);
1073 context = idr_get_next(&device->context_idr, &next);
1074
1075 if (context == NULL) {
1076 read_unlock(&device->context_lock);
1077 break;
1078 } else if (context->dev_priv == dev_priv) {
1079 /*
1080 * Hold a reference to the context in case somebody
1081 * tries to put it while we are detaching
1082 */
1083 result = _kgsl_context_get(context);
1084 }
1085 read_unlock(&device->context_lock);
1086
1087 if (result) {
1088 kgsl_context_detach(context);
1089 kgsl_context_put(context);
1090 result = 0;
1091 }
1092
1093 next = next + 1;
1094 }
1095}
1096
1097static int kgsl_release(struct inode *inodep, struct file *filep)
1098{
1099 struct kgsl_device_private *dev_priv = filep->private_data;
1100 struct kgsl_device *device = dev_priv->device;
1101 int result;
1102
1103 filep->private_data = NULL;
1104
1105 /* Release the contexts for the file */
1106 device_release_contexts(dev_priv);
1107
1108 /* Close down the process wide resources for the file */
1109 kgsl_process_private_close(dev_priv, dev_priv->process_priv);
1110
1111 kfree(dev_priv);
1112
1113 result = kgsl_close_device(device);
1114 pm_runtime_put(&device->pdev->dev);
1115
1116 return result;
1117}
1118
1119static int kgsl_open_device(struct kgsl_device *device)
1120{
1121 int result = 0;
1122
1123 mutex_lock(&device->mutex);
1124 if (device->open_count == 0) {
1125 /*
1126 * active_cnt special case: we are starting up for the first
1127 * time, so use this sequence instead of the kgsl_pwrctrl_wake()
1128 * which will be called by kgsl_active_count_get().
1129 */
1130 atomic_inc(&device->active_cnt);
1131 kgsl_sharedmem_set(device, &device->memstore, 0, 0,
1132 device->memstore.size);
1133 kgsl_sharedmem_set(device, &device->scratch, 0, 0,
1134 device->scratch.size);
1135
1136 result = device->ftbl->init(device);
1137 if (result)
1138 goto err;
1139
1140 result = device->ftbl->start(device, 0);
1141 if (result)
1142 goto err;
1143 /*
1144 * Make sure the gates are open, so they don't block until
1145 * we start suspend or FT.
1146 */
1147 complete_all(&device->hwaccess_gate);
1148 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
1149 kgsl_active_count_put(device);
1150 }
1151 device->open_count++;
1152err:
1153 if (result) {
1154 kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1155 atomic_dec(&device->active_cnt);
1156 }
1157
1158 mutex_unlock(&device->mutex);
1159 return result;
1160}
1161
1162static int kgsl_open(struct inode *inodep, struct file *filep)
1163{
1164 int result;
1165 struct kgsl_device_private *dev_priv;
1166 struct kgsl_device *device;
1167 unsigned int minor = iminor(inodep);
1168
1169 device = kgsl_get_minor(minor);
1170 BUG_ON(device == NULL);
1171
1172 result = pm_runtime_get_sync(&device->pdev->dev);
1173 if (result < 0) {
1174 KGSL_DRV_ERR(device,
1175 "Runtime PM: Unable to wake up the device, rc = %d\n",
1176 result);
1177 return result;
1178 }
1179 result = 0;
1180
1181 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
1182 if (dev_priv == NULL) {
1183 result = -ENOMEM;
1184 goto err;
1185 }
1186
1187 dev_priv->device = device;
1188 filep->private_data = dev_priv;
1189
1190 result = kgsl_open_device(device);
1191 if (result)
1192 goto err;
1193
1194 /*
1195 * Get file (per process) private struct. This must be done
1196 * after the first start so that the global pagetable mappings
1197 * are set up before we create the per-process pagetable.
1198 */
1199 dev_priv->process_priv = kgsl_process_private_open(device);
1200 if (IS_ERR(dev_priv->process_priv)) {
1201 result = PTR_ERR(dev_priv->process_priv);
1202 kgsl_close_device(device);
1203 goto err;
1204 }
1205
1206err:
1207 if (result) {
1208 filep->private_data = NULL;
1209 kfree(dev_priv);
1210 pm_runtime_put(&device->pdev->dev);
1211 }
1212 return result;
1213}
1214
1215#define GPUADDR_IN_MEMDESC(_val, _memdesc) \
1216 (((_val) >= (_memdesc)->gpuaddr) && \
1217 ((_val) < ((_memdesc)->gpuaddr + (_memdesc)->size)))
1218
1219/**
1220 * kgsl_sharedmem_find() - Find a gpu memory allocation
1221 *
1222 * @private: private data for the process to check.
1223 * @gpuaddr: start address of the region
1224 *
1225 * Find a gpu allocation. Caller must kgsl_mem_entry_put()
1226 * the returned entry when finished using it.
1227 */
1228struct kgsl_mem_entry * __must_check
1229kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
1230{
1231 int ret = 0, id;
1232 struct kgsl_mem_entry *entry = NULL;
1233
1234 if (!private)
1235 return NULL;
1236
1237 if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr))
1238 return NULL;
1239
1240 spin_lock(&private->mem_lock);
1241 idr_for_each_entry(&private->mem_idr, entry, id) {
1242 if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
Deepak Kumar0a4ef64e2017-05-08 15:29:03 +05301243 if (!entry->pending_free)
1244 ret = kgsl_mem_entry_get(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001245 break;
1246 }
1247 }
1248 spin_unlock(&private->mem_lock);
1249
1250 return (ret == 0) ? NULL : entry;
1251}
1252EXPORT_SYMBOL(kgsl_sharedmem_find);
1253
1254struct kgsl_mem_entry * __must_check
1255kgsl_sharedmem_find_id_flags(struct kgsl_process_private *process,
1256 unsigned int id, uint64_t flags)
1257{
1258 int count = 0;
1259 struct kgsl_mem_entry *entry;
1260
1261 spin_lock(&process->mem_lock);
1262 entry = idr_find(&process->mem_idr, id);
1263 if (entry)
1264 if (!entry->pending_free &&
1265 (flags & entry->memdesc.flags) == flags)
1266 count = kgsl_mem_entry_get(entry);
1267 spin_unlock(&process->mem_lock);
1268
1269 return (count == 0) ? NULL : entry;
1270}
1271
1272/**
1273 * kgsl_sharedmem_find_id() - find a memory entry by id
1274 * @process: the owning process
1275 * @id: id to find
1276 *
1277 * @returns - the mem_entry or NULL
1278 *
1279 * Caller must kgsl_mem_entry_put() the returned entry, when finished using
1280 * it.
1281 */
1282struct kgsl_mem_entry * __must_check
1283kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
1284{
1285 return kgsl_sharedmem_find_id_flags(process, id, 0);
1286}
1287
1288/**
1289 * kgsl_mem_entry_unset_pend() - Unset the pending free flag of an entry
1290 * @entry - The memory entry
1291 */
1292static inline void kgsl_mem_entry_unset_pend(struct kgsl_mem_entry *entry)
1293{
1294 if (entry == NULL)
1295 return;
1296 spin_lock(&entry->priv->mem_lock);
1297 entry->pending_free = 0;
1298 spin_unlock(&entry->priv->mem_lock);
1299}
1300
1301/**
1302 * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
1303 * @entry - The memory entry
1304 *
1305 * @returns - true if pending flag was 0 else false
1306 *
1307 * This function will set the pending free flag if it is previously unset. Used
1308 * to prevent race condition between ioctls calling free/freememontimestamp
1309 * on the same entry. Whichever thread set's the flag first will do the free.
1310 */
1311static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry)
1312{
1313 bool ret = false;
1314
1315 if (entry == NULL)
1316 return false;
1317
1318 spin_lock(&entry->priv->mem_lock);
1319 if (!entry->pending_free) {
1320 entry->pending_free = 1;
1321 ret = true;
1322 }
1323 spin_unlock(&entry->priv->mem_lock);
1324 return ret;
1325}
1326
1327/*call all ioctl sub functions with driver locked*/
1328long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
1329 unsigned int cmd, void *data)
1330{
1331 int result = 0;
1332 struct kgsl_device_getproperty *param = data;
1333
1334 switch (param->type) {
1335 case KGSL_PROP_VERSION:
1336 {
1337 struct kgsl_version version;
1338
1339 if (param->sizebytes != sizeof(version)) {
1340 result = -EINVAL;
1341 break;
1342 }
1343
1344 version.drv_major = KGSL_VERSION_MAJOR;
1345 version.drv_minor = KGSL_VERSION_MINOR;
1346 version.dev_major = dev_priv->device->ver_major;
1347 version.dev_minor = dev_priv->device->ver_minor;
1348
1349 if (copy_to_user(param->value, &version, sizeof(version)))
1350 result = -EFAULT;
1351
1352 break;
1353 }
1354 case KGSL_PROP_GPU_RESET_STAT:
1355 {
1356 /* Return reset status of given context and clear it */
1357 uint32_t id;
1358 struct kgsl_context *context;
1359
1360 if (param->sizebytes != sizeof(unsigned int)) {
1361 result = -EINVAL;
1362 break;
1363 }
1364 /* We expect the value passed in to contain the context id */
1365 if (copy_from_user(&id, param->value,
1366 sizeof(unsigned int))) {
1367 result = -EFAULT;
1368 break;
1369 }
1370 context = kgsl_context_get_owner(dev_priv, id);
1371 if (!context) {
1372 result = -EINVAL;
1373 break;
1374 }
1375 /*
1376 * Copy the reset status to value which also serves as
1377 * the out parameter
1378 */
1379 if (copy_to_user(param->value, &(context->reset_status),
1380 sizeof(unsigned int)))
1381 result = -EFAULT;
1382 else {
1383 /* Clear reset status once its been queried */
1384 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
1385 }
1386
1387 kgsl_context_put(context);
1388 break;
1389 }
1390 default:
1391 if (is_compat_task())
1392 result = dev_priv->device->ftbl->getproperty_compat(
1393 dev_priv->device, param->type,
1394 param->value, param->sizebytes);
1395 else
1396 result = dev_priv->device->ftbl->getproperty(
1397 dev_priv->device, param->type,
1398 param->value, param->sizebytes);
1399 }
1400
1401
1402 return result;
1403}
1404
1405long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
1406 unsigned int cmd, void *data)
1407{
1408 int result = 0;
1409 /* The getproperty struct is reused for setproperty too */
1410 struct kgsl_device_getproperty *param = data;
1411
1412 /* Reroute to compat version if coming from compat_ioctl */
1413 if (is_compat_task())
1414 result = dev_priv->device->ftbl->setproperty_compat(
1415 dev_priv, param->type, param->value,
1416 param->sizebytes);
1417 else if (dev_priv->device->ftbl->setproperty)
1418 result = dev_priv->device->ftbl->setproperty(
1419 dev_priv, param->type, param->value,
1420 param->sizebytes);
1421
1422 return result;
1423}
1424
1425long kgsl_ioctl_device_waittimestamp_ctxtid(
1426 struct kgsl_device_private *dev_priv, unsigned int cmd,
1427 void *data)
1428{
1429 struct kgsl_device_waittimestamp_ctxtid *param = data;
1430 struct kgsl_device *device = dev_priv->device;
1431 long result = -EINVAL;
1432 unsigned int temp_cur_ts = 0;
1433 struct kgsl_context *context;
1434
1435 context = kgsl_context_get_owner(dev_priv, param->context_id);
1436 if (context == NULL)
1437 return result;
1438
1439 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
1440 &temp_cur_ts);
1441
1442 trace_kgsl_waittimestamp_entry(device, context->id, temp_cur_ts,
1443 param->timestamp, param->timeout);
1444
1445 result = device->ftbl->waittimestamp(device, context, param->timestamp,
1446 param->timeout);
1447
1448 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
1449 &temp_cur_ts);
1450 trace_kgsl_waittimestamp_exit(device, temp_cur_ts, result);
1451
1452 kgsl_context_put(context);
1453
1454 return result;
1455}
1456
Tarun Karra2b8b3632016-11-14 16:38:27 -08001457static inline bool _check_context_is_sparse(struct kgsl_context *context,
1458 uint64_t flags)
1459{
1460 if ((context->flags & KGSL_CONTEXT_SPARSE) ||
1461 (flags & KGSL_DRAWOBJ_SPARSE))
1462 return true;
1463
1464 return false;
1465}
1466
1467
Shrenuj Bansala419c792016-10-20 14:05:11 -07001468long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
1469 unsigned int cmd, void *data)
1470{
1471 struct kgsl_ringbuffer_issueibcmds *param = data;
1472 struct kgsl_device *device = dev_priv->device;
1473 struct kgsl_context *context;
1474 struct kgsl_drawobj *drawobj;
1475 struct kgsl_drawobj_cmd *cmdobj;
1476 long result = -EINVAL;
1477
1478 /* The legacy functions don't support synchronization commands */
1479 if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
1480 return -EINVAL;
1481
1482 /* Sanity check the number of IBs */
1483 if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
1484 (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
1485 return -EINVAL;
1486
1487 /* Get the context */
1488 context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
1489 if (context == NULL)
1490 return -EINVAL;
1491
Tarun Karra2b8b3632016-11-14 16:38:27 -08001492 if (_check_context_is_sparse(context, param->flags)) {
1493 kgsl_context_put(context);
1494 return -EINVAL;
1495 }
1496
Shrenuj Bansala419c792016-10-20 14:05:11 -07001497 cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
1498 CMDOBJ_TYPE);
1499 if (IS_ERR(cmdobj)) {
1500 kgsl_context_put(context);
1501 return PTR_ERR(cmdobj);
1502 }
1503
1504 drawobj = DRAWOBJ(cmdobj);
1505
1506 if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
1507 result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
1508 (void __user *) param->ibdesc_addr,
1509 param->numibs);
1510 else {
1511 struct kgsl_ibdesc ibdesc;
1512 /* Ultra legacy path */
1513
1514 ibdesc.gpuaddr = param->ibdesc_addr;
1515 ibdesc.sizedwords = param->numibs;
1516 ibdesc.ctrl = 0;
1517
1518 result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
1519 }
1520
1521 if (result == 0)
1522 result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
1523 &drawobj, 1, &param->timestamp);
1524
1525 /*
1526 * -EPROTO is a "success" error - it just tells the user that the
1527 * context had previously faulted
1528 */
1529 if (result && result != -EPROTO)
1530 kgsl_drawobj_destroy(drawobj);
1531
1532 kgsl_context_put(context);
1533 return result;
1534}
1535
1536/* Returns 0 on failure. Returns command type(s) on success */
1537static unsigned int _process_command_input(struct kgsl_device *device,
1538 unsigned int flags, unsigned int numcmds,
1539 unsigned int numobjs, unsigned int numsyncs)
1540{
1541 if (numcmds > KGSL_MAX_NUMIBS ||
1542 numobjs > KGSL_MAX_NUMIBS ||
1543 numsyncs > KGSL_MAX_SYNCPOINTS)
1544 return 0;
1545
1546 /*
1547 * The SYNC bit is supposed to identify a dummy sync object
1548 * so warn the user if they specified any IBs with it.
1549 * A MARKER command can either have IBs or not but if the
1550 * command has 0 IBs it is automatically assumed to be a marker.
1551 */
1552
1553 /* If they specify the flag, go with what they say */
1554 if (flags & KGSL_DRAWOBJ_MARKER)
1555 return MARKEROBJ_TYPE;
1556 else if (flags & KGSL_DRAWOBJ_SYNC)
1557 return SYNCOBJ_TYPE;
1558
1559 /* If not, deduce what they meant */
1560 if (numsyncs && numcmds)
1561 return SYNCOBJ_TYPE | CMDOBJ_TYPE;
1562 else if (numsyncs)
1563 return SYNCOBJ_TYPE;
1564 else if (numcmds)
1565 return CMDOBJ_TYPE;
1566 else if (numcmds == 0)
1567 return MARKEROBJ_TYPE;
1568
1569 return 0;
1570}
1571
1572long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
1573 unsigned int cmd, void *data)
1574{
1575 struct kgsl_submit_commands *param = data;
1576 struct kgsl_device *device = dev_priv->device;
1577 struct kgsl_context *context;
1578 struct kgsl_drawobj *drawobj[2];
1579 unsigned int type;
1580 long result;
1581 unsigned int i = 0;
1582
1583 type = _process_command_input(device, param->flags, param->numcmds, 0,
1584 param->numsyncs);
1585 if (!type)
1586 return -EINVAL;
1587
1588 context = kgsl_context_get_owner(dev_priv, param->context_id);
1589 if (context == NULL)
1590 return -EINVAL;
1591
Tarun Karra2b8b3632016-11-14 16:38:27 -08001592 if (_check_context_is_sparse(context, param->flags)) {
1593 kgsl_context_put(context);
1594 return -EINVAL;
1595 }
1596
Shrenuj Bansala419c792016-10-20 14:05:11 -07001597 if (type & SYNCOBJ_TYPE) {
1598 struct kgsl_drawobj_sync *syncobj =
1599 kgsl_drawobj_sync_create(device, context);
1600 if (IS_ERR(syncobj)) {
1601 result = PTR_ERR(syncobj);
1602 goto done;
1603 }
1604
1605 drawobj[i++] = DRAWOBJ(syncobj);
1606
1607 result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
1608 param->synclist, param->numsyncs);
1609 if (result)
1610 goto done;
1611 }
1612
1613 if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
1614 struct kgsl_drawobj_cmd *cmdobj =
1615 kgsl_drawobj_cmd_create(device,
1616 context, param->flags, type);
1617 if (IS_ERR(cmdobj)) {
1618 result = PTR_ERR(cmdobj);
1619 goto done;
1620 }
1621
1622 drawobj[i++] = DRAWOBJ(cmdobj);
1623
1624 result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
1625 param->cmdlist, param->numcmds);
1626 if (result)
1627 goto done;
1628
1629 /* If no profiling buffer was specified, clear the flag */
1630 if (cmdobj->profiling_buf_entry == NULL)
Lynus Vazeb7af682017-04-17 18:36:01 +05301631 DRAWOBJ(cmdobj)->flags &=
1632 ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001633 }
1634
1635 result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
1636 i, &param->timestamp);
1637
1638done:
1639 /*
1640 * -EPROTO is a "success" error - it just tells the user that the
1641 * context had previously faulted
1642 */
1643 if (result && result != -EPROTO)
1644 while (i--)
1645 kgsl_drawobj_destroy(drawobj[i]);
1646
1647
1648 kgsl_context_put(context);
1649 return result;
1650}
1651
1652long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
1653 unsigned int cmd, void *data)
1654{
1655 struct kgsl_gpu_command *param = data;
1656 struct kgsl_device *device = dev_priv->device;
1657 struct kgsl_context *context;
1658 struct kgsl_drawobj *drawobj[2];
1659 unsigned int type;
1660 long result;
1661 unsigned int i = 0;
1662
1663 type = _process_command_input(device, param->flags, param->numcmds,
1664 param->numobjs, param->numsyncs);
1665 if (!type)
1666 return -EINVAL;
1667
1668 context = kgsl_context_get_owner(dev_priv, param->context_id);
1669 if (context == NULL)
1670 return -EINVAL;
1671
Tarun Karra2b8b3632016-11-14 16:38:27 -08001672 if (_check_context_is_sparse(context, param->flags)) {
1673 kgsl_context_put(context);
1674 return -EINVAL;
1675 }
1676
Shrenuj Bansala419c792016-10-20 14:05:11 -07001677 if (type & SYNCOBJ_TYPE) {
1678 struct kgsl_drawobj_sync *syncobj =
1679 kgsl_drawobj_sync_create(device, context);
1680
1681 if (IS_ERR(syncobj)) {
1682 result = PTR_ERR(syncobj);
1683 goto done;
1684 }
1685
1686 drawobj[i++] = DRAWOBJ(syncobj);
1687
1688 result = kgsl_drawobj_sync_add_synclist(device, syncobj,
1689 to_user_ptr(param->synclist),
1690 param->syncsize, param->numsyncs);
1691 if (result)
1692 goto done;
1693 }
1694
1695 if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
1696 struct kgsl_drawobj_cmd *cmdobj =
1697 kgsl_drawobj_cmd_create(device,
1698 context, param->flags, type);
1699
1700 if (IS_ERR(cmdobj)) {
1701 result = PTR_ERR(cmdobj);
1702 goto done;
1703 }
1704
1705 drawobj[i++] = DRAWOBJ(cmdobj);
1706
1707 result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
1708 to_user_ptr(param->cmdlist),
1709 param->cmdsize, param->numcmds);
1710 if (result)
1711 goto done;
1712
1713 result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
1714 to_user_ptr(param->objlist),
1715 param->objsize, param->numobjs);
1716 if (result)
1717 goto done;
1718
1719 /* If no profiling buffer was specified, clear the flag */
1720 if (cmdobj->profiling_buf_entry == NULL)
Lynus Vazeb7af682017-04-17 18:36:01 +05301721 DRAWOBJ(cmdobj)->flags &=
1722 ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001723 }
1724
1725 result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
1726 i, &param->timestamp);
1727
1728done:
1729 /*
1730 * -EPROTO is a "success" error - it just tells the user that the
1731 * context had previously faulted
1732 */
1733 if (result && result != -EPROTO)
1734 while (i--)
1735 kgsl_drawobj_destroy(drawobj[i]);
1736
1737 kgsl_context_put(context);
1738 return result;
1739}
1740
1741long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
1742 *dev_priv, unsigned int cmd,
1743 void *data)
1744{
1745 struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
1746 struct kgsl_device *device = dev_priv->device;
1747 struct kgsl_context *context;
1748 long result = -EINVAL;
1749
1750 mutex_lock(&device->mutex);
1751 context = kgsl_context_get_owner(dev_priv, param->context_id);
1752
1753 if (context) {
1754 result = kgsl_readtimestamp(device, context,
1755 param->type, &param->timestamp);
1756
1757 trace_kgsl_readtimestamp(device, context->id,
1758 param->type, param->timestamp);
1759 }
1760
1761 kgsl_context_put(context);
1762 mutex_unlock(&device->mutex);
1763 return result;
1764}
1765
1766long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1767 unsigned int cmd, void *data)
1768{
1769 int result = 0;
1770 struct kgsl_drawctxt_create *param = data;
1771 struct kgsl_context *context = NULL;
1772 struct kgsl_device *device = dev_priv->device;
1773
1774 context = device->ftbl->drawctxt_create(dev_priv, &param->flags);
1775 if (IS_ERR(context)) {
1776 result = PTR_ERR(context);
1777 goto done;
1778 }
1779 trace_kgsl_context_create(dev_priv->device, context, param->flags);
1780
1781 /* Commit the pointer to the context in context_idr */
1782 write_lock(&device->context_lock);
1783 idr_replace(&device->context_idr, context, context->id);
Sunil Khatridd90d682017-04-06 18:28:31 +05301784 param->drawctxt_id = context->id;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001785 write_unlock(&device->context_lock);
1786
Shrenuj Bansala419c792016-10-20 14:05:11 -07001787done:
1788 return result;
1789}
1790
1791long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1792 unsigned int cmd, void *data)
1793{
1794 struct kgsl_drawctxt_destroy *param = data;
1795 struct kgsl_context *context;
1796
1797 context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
1798 if (context == NULL)
1799 return -EINVAL;
1800
1801 kgsl_context_detach(context);
1802 kgsl_context_put(context);
1803
1804 return 0;
1805}
1806
1807static long gpumem_free_entry(struct kgsl_mem_entry *entry)
1808{
1809 pid_t ptname = 0;
1810
1811 if (!kgsl_mem_entry_set_pend(entry))
1812 return -EBUSY;
1813
1814 trace_kgsl_mem_free(entry);
1815
1816 if (entry->memdesc.pagetable != NULL)
1817 ptname = entry->memdesc.pagetable->name;
1818
1819 kgsl_memfree_add(entry->priv->pid, ptname, entry->memdesc.gpuaddr,
1820 entry->memdesc.size, entry->memdesc.flags);
1821
1822 kgsl_mem_entry_put(entry);
1823
1824 return 0;
1825}
1826
1827static void gpumem_free_func(struct kgsl_device *device,
1828 struct kgsl_event_group *group, void *priv, int ret)
1829{
1830 struct kgsl_context *context = group->context;
1831 struct kgsl_mem_entry *entry = priv;
1832 unsigned int timestamp;
1833
1834 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &timestamp);
1835
1836 /* Free the memory for all event types */
1837 trace_kgsl_mem_timestamp_free(device, entry, KGSL_CONTEXT_ID(context),
1838 timestamp, 0);
1839 kgsl_mem_entry_put(entry);
1840}
1841
1842static long gpumem_free_entry_on_timestamp(struct kgsl_device *device,
1843 struct kgsl_mem_entry *entry,
1844 struct kgsl_context *context, unsigned int timestamp)
1845{
1846 int ret;
1847 unsigned int temp;
1848
1849 if (!kgsl_mem_entry_set_pend(entry))
1850 return -EBUSY;
1851
1852 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &temp);
1853 trace_kgsl_mem_timestamp_queue(device, entry, context->id, temp,
1854 timestamp);
1855 ret = kgsl_add_event(device, &context->events,
1856 timestamp, gpumem_free_func, entry);
1857
1858 if (ret)
1859 kgsl_mem_entry_unset_pend(entry);
1860
1861 return ret;
1862}
1863
1864long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1865 unsigned int cmd, void *data)
1866{
1867 struct kgsl_sharedmem_free *param = data;
1868 struct kgsl_process_private *private = dev_priv->process_priv;
1869 struct kgsl_mem_entry *entry;
1870 long ret;
1871
1872 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
1873 if (entry == NULL)
1874 return -EINVAL;
1875
1876 ret = gpumem_free_entry(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05301877 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001878
1879 return ret;
1880}
1881
1882long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
1883 unsigned int cmd, void *data)
1884{
1885 struct kgsl_gpumem_free_id *param = data;
1886 struct kgsl_process_private *private = dev_priv->process_priv;
1887 struct kgsl_mem_entry *entry;
1888 long ret;
1889
1890 entry = kgsl_sharedmem_find_id(private, param->id);
1891 if (entry == NULL)
1892 return -EINVAL;
1893
1894 ret = gpumem_free_entry(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05301895 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001896
1897 return ret;
1898}
1899
1900static long gpuobj_free_on_timestamp(struct kgsl_device_private *dev_priv,
1901 struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
1902{
1903 struct kgsl_gpu_event_timestamp event;
1904 struct kgsl_context *context;
1905 long ret;
1906
1907 memset(&event, 0, sizeof(event));
1908
1909 ret = _copy_from_user(&event, to_user_ptr(param->priv),
1910 sizeof(event), param->len);
1911 if (ret)
1912 return ret;
1913
1914 if (event.context_id == 0)
1915 return -EINVAL;
1916
1917 context = kgsl_context_get_owner(dev_priv, event.context_id);
1918 if (context == NULL)
1919 return -EINVAL;
1920
1921 ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry, context,
1922 event.timestamp);
1923
1924 kgsl_context_put(context);
1925 return ret;
1926}
1927
1928static void gpuobj_free_fence_func(void *priv)
1929{
1930 struct kgsl_mem_entry *entry = priv;
1931
Hareesh Gundu615439d2017-06-16 17:06:57 +05301932 INIT_WORK(&entry->work, _deferred_put);
1933 queue_work(kgsl_driver.mem_workqueue, &entry->work);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001934}
1935
1936static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
1937 struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
1938{
Lynus Vazc031a9b2017-01-25 13:00:13 +05301939 struct kgsl_sync_fence_cb *handle;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001940 struct kgsl_gpu_event_fence event;
1941 long ret;
1942
1943 if (!kgsl_mem_entry_set_pend(entry))
1944 return -EBUSY;
1945
1946 memset(&event, 0, sizeof(event));
1947
1948 ret = _copy_from_user(&event, to_user_ptr(param->priv),
1949 sizeof(event), param->len);
1950 if (ret) {
1951 kgsl_mem_entry_unset_pend(entry);
1952 return ret;
1953 }
1954
1955 if (event.fd < 0) {
1956 kgsl_mem_entry_unset_pend(entry);
1957 return -EINVAL;
1958 }
1959
1960 handle = kgsl_sync_fence_async_wait(event.fd,
Lynus Vaze99b92b2017-04-24 18:04:54 +05301961 gpuobj_free_fence_func, entry, NULL, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001962
1963 /* if handle is NULL the fence has already signaled */
1964 if (handle == NULL)
1965 return gpumem_free_entry(entry);
1966
1967 if (IS_ERR(handle)) {
1968 kgsl_mem_entry_unset_pend(entry);
1969 return PTR_ERR(handle);
1970 }
1971
1972 return 0;
1973}
1974
1975long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
1976 unsigned int cmd, void *data)
1977{
1978 struct kgsl_gpuobj_free *param = data;
1979 struct kgsl_process_private *private = dev_priv->process_priv;
1980 struct kgsl_mem_entry *entry;
1981 long ret;
1982
1983 entry = kgsl_sharedmem_find_id(private, param->id);
1984 if (entry == NULL)
1985 return -EINVAL;
1986
1987 /* If no event is specified then free immediately */
1988 if (!(param->flags & KGSL_GPUOBJ_FREE_ON_EVENT))
1989 ret = gpumem_free_entry(entry);
1990 else if (param->type == KGSL_GPU_EVENT_TIMESTAMP)
1991 ret = gpuobj_free_on_timestamp(dev_priv, entry, param);
1992 else if (param->type == KGSL_GPU_EVENT_FENCE)
1993 ret = gpuobj_free_on_fence(dev_priv, entry, param);
1994 else
1995 ret = -EINVAL;
1996
Hareesh Gundu615439d2017-06-16 17:06:57 +05301997 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001998 return ret;
1999}
2000
2001long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
2002 struct kgsl_device_private *dev_priv,
2003 unsigned int cmd, void *data)
2004{
2005 struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
2006 struct kgsl_context *context = NULL;
2007 struct kgsl_mem_entry *entry;
2008 long ret = -EINVAL;
2009
2010 if (param->type != KGSL_TIMESTAMP_RETIRED)
2011 return -EINVAL;
2012
2013 context = kgsl_context_get_owner(dev_priv, param->context_id);
2014 if (context == NULL)
2015 return -EINVAL;
2016
2017 entry = kgsl_sharedmem_find(dev_priv->process_priv,
2018 (uint64_t) param->gpuaddr);
2019 if (entry == NULL) {
2020 kgsl_context_put(context);
2021 return -EINVAL;
2022 }
2023
2024 ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry,
2025 context, param->timestamp);
2026
2027 kgsl_mem_entry_put(entry);
2028 kgsl_context_put(context);
2029
2030 return ret;
2031}
2032
2033static inline int _check_region(unsigned long start, unsigned long size,
2034 uint64_t len)
2035{
2036 uint64_t end = ((uint64_t) start) + size;
2037
2038 return (end > len);
2039}
2040
2041static int check_vma_flags(struct vm_area_struct *vma,
2042 unsigned int flags)
2043{
2044 unsigned long flags_requested = (VM_READ | VM_WRITE);
2045
2046 if (flags & KGSL_MEMFLAGS_GPUREADONLY)
Lynus Vazeb7af682017-04-17 18:36:01 +05302047 flags_requested &= ~(unsigned long)VM_WRITE;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002048
2049 if ((vma->vm_flags & flags_requested) == flags_requested)
2050 return 0;
2051
2052 return -EFAULT;
2053}
2054
2055static int check_vma(struct vm_area_struct *vma, struct file *vmfile,
2056 struct kgsl_memdesc *memdesc)
2057{
2058 if (vma == NULL || vma->vm_file != vmfile)
2059 return -EINVAL;
2060
2061 /* userspace may not know the size, in which case use the whole vma */
2062 if (memdesc->size == 0)
2063 memdesc->size = vma->vm_end - vma->vm_start;
2064 /* range checking */
2065 if (vma->vm_start != memdesc->useraddr ||
2066 (memdesc->useraddr + memdesc->size) != vma->vm_end)
2067 return -EINVAL;
2068 return check_vma_flags(vma, memdesc->flags);
2069}
2070
2071static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
2072{
2073 int ret = 0;
2074 long npages = 0, i;
2075 size_t sglen = (size_t) (memdesc->size / PAGE_SIZE);
2076 struct page **pages = NULL;
2077 int write = ((memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 0 :
2078 FOLL_WRITE);
2079
2080 if (sglen == 0 || sglen >= LONG_MAX)
2081 return -EINVAL;
2082
2083 pages = kgsl_malloc(sglen * sizeof(struct page *));
2084 if (pages == NULL)
2085 return -ENOMEM;
2086
2087 memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
2088 if (memdesc->sgt == NULL) {
2089 ret = -ENOMEM;
2090 goto out;
2091 }
2092
2093 down_read(&current->mm->mmap_sem);
2094 /* If we have vmfile, make sure we map the correct vma and map it all */
2095 if (vmfile != NULL)
2096 ret = check_vma(find_vma(current->mm, memdesc->useraddr),
2097 vmfile, memdesc);
2098
2099 if (ret == 0) {
2100 npages = get_user_pages(memdesc->useraddr,
2101 sglen, write, pages, NULL);
2102 ret = (npages < 0) ? (int)npages : 0;
2103 }
2104 up_read(&current->mm->mmap_sem);
2105
2106 if (ret)
2107 goto out;
2108
2109 if ((unsigned long) npages != sglen) {
2110 ret = -EINVAL;
2111 goto out;
2112 }
2113
2114 ret = sg_alloc_table_from_pages(memdesc->sgt, pages, npages,
2115 0, memdesc->size, GFP_KERNEL);
2116out:
2117 if (ret) {
2118 for (i = 0; i < npages; i++)
2119 put_page(pages[i]);
2120
2121 kfree(memdesc->sgt);
2122 memdesc->sgt = NULL;
2123 }
2124 kgsl_free(pages);
2125 return ret;
2126}
2127
2128static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
2129 struct kgsl_mem_entry *entry, unsigned long hostptr,
2130 size_t offset, size_t size)
2131{
2132 /* Map an anonymous memory chunk */
2133
2134 if (size == 0 || offset != 0 ||
2135 !IS_ALIGNED(size, PAGE_SIZE))
2136 return -EINVAL;
2137
2138 entry->memdesc.pagetable = pagetable;
2139 entry->memdesc.size = (uint64_t) size;
2140 entry->memdesc.useraddr = hostptr;
Lynus Vazeb7af682017-04-17 18:36:01 +05302141 entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002142
2143 if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
2144 int ret;
2145
2146 /* Register the address in the database */
2147 ret = kgsl_mmu_set_svm_region(pagetable,
2148 (uint64_t) entry->memdesc.useraddr, (uint64_t) size);
2149
2150 if (ret)
2151 return ret;
2152
2153 entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr;
2154 }
2155
2156 return memdesc_sg_virt(&entry->memdesc, NULL);
2157}
2158
2159#ifdef CONFIG_DMA_SHARED_BUFFER
2160static int match_file(const void *p, struct file *file, unsigned int fd)
2161{
2162 /*
2163 * We must return fd + 1 because iterate_fd stops searching on
2164 * non-zero return, but 0 is a valid fd.
2165 */
2166 return (p == file) ? (fd + 1) : 0;
2167}
2168
2169static void _setup_cache_mode(struct kgsl_mem_entry *entry,
2170 struct vm_area_struct *vma)
2171{
Lynus Vazeb7af682017-04-17 18:36:01 +05302172 uint64_t mode;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002173 pgprot_t pgprot = vma->vm_page_prot;
2174
2175 if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot)))
2176 mode = KGSL_CACHEMODE_UNCACHED;
2177 else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(pgprot)))
2178 mode = KGSL_CACHEMODE_WRITECOMBINE;
2179 else
2180 mode = KGSL_CACHEMODE_WRITEBACK;
2181
2182 entry->memdesc.flags |= (mode << KGSL_CACHEMODE_SHIFT);
2183}
2184
2185static int kgsl_setup_dma_buf(struct kgsl_device *device,
2186 struct kgsl_pagetable *pagetable,
2187 struct kgsl_mem_entry *entry,
2188 struct dma_buf *dmabuf);
2189
2190static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
2191 struct kgsl_pagetable *pagetable,
2192 struct kgsl_mem_entry *entry, unsigned long hostptr)
2193{
2194 struct vm_area_struct *vma;
2195 struct dma_buf *dmabuf = NULL;
2196 int ret;
2197
2198 /*
2199 * Find the VMA containing this pointer and figure out if it
2200 * is a dma-buf.
2201 */
2202 down_read(&current->mm->mmap_sem);
2203 vma = find_vma(current->mm, hostptr);
2204
2205 if (vma && vma->vm_file) {
2206 int fd;
2207
2208 ret = check_vma_flags(vma, entry->memdesc.flags);
2209 if (ret) {
2210 up_read(&current->mm->mmap_sem);
2211 return ret;
2212 }
2213
2214 /*
2215 * Check to see that this isn't our own memory that we have
2216 * already mapped
2217 */
2218 if (vma->vm_file->f_op == &kgsl_fops) {
2219 up_read(&current->mm->mmap_sem);
2220 return -EFAULT;
2221 }
2222
2223 /* Look for the fd that matches this the vma file */
2224 fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
2225 if (fd != 0)
2226 dmabuf = dma_buf_get(fd - 1);
2227 }
2228 up_read(&current->mm->mmap_sem);
2229
2230 if (IS_ERR_OR_NULL(dmabuf))
2231 return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
2232
2233 ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
2234 if (ret) {
2235 dma_buf_put(dmabuf);
2236 return ret;
2237 }
2238
2239 /* Setup the user addr/cache mode for cache operations */
2240 entry->memdesc.useraddr = hostptr;
2241 _setup_cache_mode(entry, vma);
2242
2243 return 0;
2244}
2245#else
2246static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
2247 struct kgsl_pagetable *pagetable,
2248 struct kgsl_mem_entry *entry, unsigned long hostptr)
2249{
2250 return -ENODEV;
2251}
2252#endif
2253
2254static int kgsl_setup_useraddr(struct kgsl_device *device,
2255 struct kgsl_pagetable *pagetable,
2256 struct kgsl_mem_entry *entry,
2257 unsigned long hostptr, size_t offset, size_t size)
2258{
2259 int ret;
2260
2261 if (hostptr == 0 || !IS_ALIGNED(hostptr, PAGE_SIZE))
2262 return -EINVAL;
2263
2264 /* Try to set up a dmabuf - if it returns -ENODEV assume anonymous */
2265 ret = kgsl_setup_dmabuf_useraddr(device, pagetable, entry, hostptr);
2266 if (ret != -ENODEV)
2267 return ret;
2268
2269 /* Okay - lets go legacy */
2270 return kgsl_setup_anon_useraddr(pagetable, entry,
2271 hostptr, offset, size);
2272}
2273
2274static long _gpuobj_map_useraddr(struct kgsl_device *device,
2275 struct kgsl_pagetable *pagetable,
2276 struct kgsl_mem_entry *entry,
2277 struct kgsl_gpuobj_import *param)
2278{
2279 struct kgsl_gpuobj_import_useraddr useraddr;
2280 int ret;
2281
2282 param->flags &= KGSL_MEMFLAGS_GPUREADONLY
2283 | KGSL_CACHEMODE_MASK
2284 | KGSL_MEMTYPE_MASK
2285 | KGSL_MEMFLAGS_FORCE_32BIT;
2286
2287 /* Specifying SECURE is an explicit error */
2288 if (param->flags & KGSL_MEMFLAGS_SECURE)
2289 return -ENOTSUPP;
2290
2291 ret = _copy_from_user(&useraddr,
2292 to_user_ptr(param->priv), sizeof(useraddr),
2293 param->priv_len);
2294 if (ret)
2295 return ret;
2296
2297 /* Verify that the virtaddr and len are within bounds */
2298 if (useraddr.virtaddr > ULONG_MAX)
2299 return -EINVAL;
2300
2301 return kgsl_setup_useraddr(device, pagetable, entry,
2302 (unsigned long) useraddr.virtaddr, 0, param->priv_len);
2303}
2304
2305#ifdef CONFIG_DMA_SHARED_BUFFER
2306static long _gpuobj_map_dma_buf(struct kgsl_device *device,
2307 struct kgsl_pagetable *pagetable,
2308 struct kgsl_mem_entry *entry,
2309 struct kgsl_gpuobj_import *param,
2310 int *fd)
2311{
2312 struct kgsl_gpuobj_import_dma_buf buf;
2313 struct dma_buf *dmabuf;
2314 int ret;
2315
2316 /*
2317 * If content protection is not enabled and secure buffer
2318 * is requested to be mapped return error.
2319 */
2320 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
2321 if (!kgsl_mmu_is_secured(&device->mmu)) {
2322 dev_WARN_ONCE(device->dev, 1,
2323 "Secure buffer not supported");
2324 return -ENOTSUPP;
2325 }
2326
2327 entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
2328 }
2329
2330 ret = _copy_from_user(&buf, to_user_ptr(param->priv),
2331 sizeof(buf), param->priv_len);
2332 if (ret)
2333 return ret;
2334
2335 if (buf.fd < 0)
2336 return -EINVAL;
2337
2338 *fd = buf.fd;
2339 dmabuf = dma_buf_get(buf.fd);
2340
2341 if (IS_ERR_OR_NULL(dmabuf))
2342 return (dmabuf == NULL) ? -EINVAL : PTR_ERR(dmabuf);
2343
2344 ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
2345 if (ret)
2346 dma_buf_put(dmabuf);
2347
2348 return ret;
2349}
2350#else
2351static long _gpuobj_map_dma_buf(struct kgsl_device *device,
2352 struct kgsl_pagetable *pagetable,
2353 struct kgsl_mem_entry *entry,
2354 struct kgsl_gpuobj_import *param,
2355 int *fd)
2356{
2357 return -EINVAL;
2358}
2359#endif
2360
2361long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
2362 unsigned int cmd, void *data)
2363{
2364 struct kgsl_process_private *private = dev_priv->process_priv;
2365 struct kgsl_gpuobj_import *param = data;
2366 struct kgsl_mem_entry *entry;
2367 int ret, fd = -1;
2368 struct kgsl_mmu *mmu = &dev_priv->device->mmu;
2369
2370 entry = kgsl_mem_entry_create();
2371 if (entry == NULL)
2372 return -ENOMEM;
2373
2374 param->flags &= KGSL_MEMFLAGS_GPUREADONLY
2375 | KGSL_MEMTYPE_MASK
2376 | KGSL_MEMALIGN_MASK
2377 | KGSL_MEMFLAGS_USE_CPU_MAP
2378 | KGSL_MEMFLAGS_SECURE
2379 | KGSL_MEMFLAGS_FORCE_32BIT;
2380
2381 entry->memdesc.flags = param->flags;
2382
2383 if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
2384 entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
2385
2386 if (param->type == KGSL_USER_MEM_TYPE_ADDR)
2387 ret = _gpuobj_map_useraddr(dev_priv->device, private->pagetable,
2388 entry, param);
2389 else if (param->type == KGSL_USER_MEM_TYPE_DMABUF)
2390 ret = _gpuobj_map_dma_buf(dev_priv->device, private->pagetable,
2391 entry, param, &fd);
2392 else
2393 ret = -ENOTSUPP;
2394
2395 if (ret)
2396 goto out;
2397
2398 if (entry->memdesc.size >= SZ_1M)
2399 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
2400 else if (entry->memdesc.size >= SZ_64K)
2401 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64K));
2402
2403 param->flags = entry->memdesc.flags;
2404
2405 ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
2406 if (ret)
2407 goto unmap;
2408
2409 param->id = entry->id;
2410
2411 KGSL_STATS_ADD(entry->memdesc.size, &kgsl_driver.stats.mapped,
2412 &kgsl_driver.stats.mapped_max);
2413
2414 kgsl_process_add_stats(private,
2415 kgsl_memdesc_usermem_type(&entry->memdesc),
2416 entry->memdesc.size);
2417
2418 trace_kgsl_mem_map(entry, fd);
2419
2420 kgsl_mem_entry_commit_process(entry);
Tarun Karra24d3fe12017-04-05 15:23:03 -07002421
2422 /* Put the extra ref from kgsl_mem_entry_create() */
2423 kgsl_mem_entry_put(entry);
2424
Shrenuj Bansala419c792016-10-20 14:05:11 -07002425 return 0;
2426
2427unmap:
2428 if (param->type == KGSL_USER_MEM_TYPE_DMABUF) {
2429 kgsl_destroy_ion(entry->priv_data);
2430 entry->memdesc.sgt = NULL;
2431 }
2432
2433 kgsl_sharedmem_free(&entry->memdesc);
2434
2435out:
2436 kfree(entry);
2437 return ret;
2438}
2439
2440static long _map_usermem_addr(struct kgsl_device *device,
2441 struct kgsl_pagetable *pagetable, struct kgsl_mem_entry *entry,
2442 unsigned long hostptr, size_t offset, size_t size)
2443{
2444 if (!MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED))
2445 return -EINVAL;
2446
2447 /* No CPU mapped buffer could ever be secure */
2448 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
2449 return -EINVAL;
2450
2451 return kgsl_setup_useraddr(device, pagetable, entry, hostptr,
2452 offset, size);
2453}
2454
2455#ifdef CONFIG_DMA_SHARED_BUFFER
2456static int _map_usermem_dma_buf(struct kgsl_device *device,
2457 struct kgsl_pagetable *pagetable,
2458 struct kgsl_mem_entry *entry,
2459 unsigned int fd)
2460{
2461 int ret;
2462 struct dma_buf *dmabuf;
2463
2464 /*
2465 * If content protection is not enabled and secure buffer
2466 * is requested to be mapped return error.
2467 */
2468
2469 if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
2470 if (!kgsl_mmu_is_secured(&device->mmu)) {
2471 dev_WARN_ONCE(device->dev, 1,
2472 "Secure buffer not supported");
2473 return -EINVAL;
2474 }
2475
2476 entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
2477 }
2478
2479 dmabuf = dma_buf_get(fd);
2480 if (IS_ERR_OR_NULL(dmabuf)) {
2481 ret = PTR_ERR(dmabuf);
2482 return ret ? ret : -EINVAL;
2483 }
2484 ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
2485 if (ret)
2486 dma_buf_put(dmabuf);
2487 return ret;
2488}
2489#else
2490static int _map_usermem_dma_buf(struct kgsl_device *device,
2491 struct kgsl_pagetable *pagetable,
2492 struct kgsl_mem_entry *entry,
2493 unsigned int fd)
2494{
2495 return -EINVAL;
2496}
2497#endif
2498
2499#ifdef CONFIG_DMA_SHARED_BUFFER
2500static int kgsl_setup_dma_buf(struct kgsl_device *device,
2501 struct kgsl_pagetable *pagetable,
2502 struct kgsl_mem_entry *entry,
2503 struct dma_buf *dmabuf)
2504{
2505 int ret = 0;
2506 struct scatterlist *s;
2507 struct sg_table *sg_table;
2508 struct dma_buf_attachment *attach = NULL;
2509 struct kgsl_dma_buf_meta *meta;
2510
2511 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
2512 if (!meta)
2513 return -ENOMEM;
2514
2515 attach = dma_buf_attach(dmabuf, device->dev);
2516 if (IS_ERR_OR_NULL(attach)) {
2517 ret = attach ? PTR_ERR(attach) : -EINVAL;
2518 goto out;
2519 }
2520
2521 meta->dmabuf = dmabuf;
2522 meta->attach = attach;
2523
2524 attach->priv = entry;
2525
2526 entry->priv_data = meta;
2527 entry->memdesc.pagetable = pagetable;
2528 entry->memdesc.size = 0;
2529 /* USE_CPU_MAP is not impemented for ION. */
2530 entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
Lynus Vazeb7af682017-04-17 18:36:01 +05302531 entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ION;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002532
2533 sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
2534
2535 if (IS_ERR_OR_NULL(sg_table)) {
2536 ret = PTR_ERR(sg_table);
2537 goto out;
2538 }
2539
2540 meta->table = sg_table;
2541 entry->priv_data = meta;
2542 entry->memdesc.sgt = sg_table;
2543
2544 /* Calculate the size of the memdesc from the sglist */
2545 for (s = entry->memdesc.sgt->sgl; s != NULL; s = sg_next(s)) {
2546 int priv = (entry->memdesc.priv & KGSL_MEMDESC_SECURE) ? 1 : 0;
2547
2548 /*
2549 * Check that each chunk of of the sg table matches the secure
2550 * flag.
2551 */
2552
2553 if (PagePrivate(sg_page(s)) != priv) {
2554 ret = -EPERM;
2555 goto out;
2556 }
2557
2558 entry->memdesc.size += (uint64_t) s->length;
2559 }
2560
2561 entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size);
2562
2563out:
2564 if (ret) {
2565 if (!IS_ERR_OR_NULL(attach))
2566 dma_buf_detach(dmabuf, attach);
2567
2568
2569 kfree(meta);
2570 }
2571
2572 return ret;
2573}
2574#endif
2575
2576#ifdef CONFIG_DMA_SHARED_BUFFER
2577void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
2578 int *egl_surface_count, int *egl_image_count)
2579{
2580 struct kgsl_dma_buf_meta *meta = entry->priv_data;
2581 struct dma_buf *dmabuf = meta->dmabuf;
2582 struct dma_buf_attachment *mem_entry_buf_attachment = meta->attach;
2583 struct device *buf_attachment_dev = mem_entry_buf_attachment->dev;
2584 struct dma_buf_attachment *attachment = NULL;
2585
2586 mutex_lock(&dmabuf->lock);
2587 list_for_each_entry(attachment, &dmabuf->attachments, node) {
2588 struct kgsl_mem_entry *scan_mem_entry = NULL;
2589
2590 if (attachment->dev != buf_attachment_dev)
2591 continue;
2592
2593 scan_mem_entry = attachment->priv;
2594 if (!scan_mem_entry)
2595 continue;
2596
2597 switch (kgsl_memdesc_get_memtype(&scan_mem_entry->memdesc)) {
2598 case KGSL_MEMTYPE_EGL_SURFACE:
2599 (*egl_surface_count)++;
2600 break;
2601 case KGSL_MEMTYPE_EGL_IMAGE:
2602 (*egl_image_count)++;
2603 break;
2604 }
2605 }
2606 mutex_unlock(&dmabuf->lock);
2607}
2608#else
2609void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
2610 int *egl_surface_count, int *egl_image_count)
2611{
2612}
2613#endif
2614
2615long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
2616 unsigned int cmd, void *data)
2617{
2618 int result = -EINVAL;
2619 struct kgsl_map_user_mem *param = data;
2620 struct kgsl_mem_entry *entry = NULL;
2621 struct kgsl_process_private *private = dev_priv->process_priv;
2622 struct kgsl_mmu *mmu = &dev_priv->device->mmu;
2623 unsigned int memtype;
2624
2625 /*
2626 * If content protection is not enabled and secure buffer
2627 * is requested to be mapped return error.
2628 */
2629
2630 if (param->flags & KGSL_MEMFLAGS_SECURE) {
2631 /* Log message and return if context protection isn't enabled */
2632 if (!kgsl_mmu_is_secured(mmu)) {
2633 dev_WARN_ONCE(dev_priv->device->dev, 1,
2634 "Secure buffer not supported");
2635 return -EOPNOTSUPP;
2636 }
2637
2638 /* Can't use CPU map with secure buffers */
2639 if (param->flags & KGSL_MEMFLAGS_USE_CPU_MAP)
2640 return -EINVAL;
2641 }
2642
2643 entry = kgsl_mem_entry_create();
2644
2645 if (entry == NULL)
2646 return -ENOMEM;
2647
2648 /*
2649 * Convert from enum value to KGSL_MEM_ENTRY value, so that
2650 * we can use the latter consistently everywhere.
2651 */
2652 memtype = param->memtype + 1;
2653
2654 /*
2655 * Mask off unknown flags from userspace. This way the caller can
2656 * check if a flag is supported by looking at the returned flags.
2657 * Note: CACHEMODE is ignored for this call. Caching should be
2658 * determined by type of allocation being mapped.
2659 */
2660 param->flags &= KGSL_MEMFLAGS_GPUREADONLY
2661 | KGSL_MEMTYPE_MASK
2662 | KGSL_MEMALIGN_MASK
2663 | KGSL_MEMFLAGS_USE_CPU_MAP
2664 | KGSL_MEMFLAGS_SECURE;
2665 entry->memdesc.flags = ((uint64_t) param->flags)
2666 | KGSL_MEMFLAGS_FORCE_32BIT;
2667
2668 if (!kgsl_mmu_use_cpu_map(mmu))
2669 entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
2670
2671 if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
2672 entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
2673
2674 if (param->flags & KGSL_MEMFLAGS_SECURE)
2675 entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
2676
2677 switch (memtype) {
2678 case KGSL_MEM_ENTRY_USER:
2679 result = _map_usermem_addr(dev_priv->device, private->pagetable,
2680 entry, param->hostptr, param->offset, param->len);
2681 break;
2682 case KGSL_MEM_ENTRY_ION:
2683 if (param->offset != 0)
2684 result = -EINVAL;
2685 else
2686 result = _map_usermem_dma_buf(dev_priv->device,
2687 private->pagetable, entry, param->fd);
2688 break;
2689 default:
2690 result = -EOPNOTSUPP;
2691 break;
2692 }
2693
2694 if (result)
2695 goto error;
2696
2697 if ((param->flags & KGSL_MEMFLAGS_SECURE) &&
2698 (entry->memdesc.size & mmu->secure_align_mask)) {
2699 result = -EINVAL;
2700 goto error_attach;
2701 }
2702
2703 if (entry->memdesc.size >= SZ_2M)
2704 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_2M));
2705 else if (entry->memdesc.size >= SZ_1M)
2706 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
2707 else if (entry->memdesc.size >= SZ_64K)
2708 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
2709
2710 /* echo back flags */
2711 param->flags = (unsigned int) entry->memdesc.flags;
2712
2713 result = kgsl_mem_entry_attach_process(dev_priv->device, private,
2714 entry);
2715 if (result)
2716 goto error_attach;
2717
2718 /* Adjust the returned value for a non 4k aligned offset */
2719 param->gpuaddr = (unsigned long)
2720 entry->memdesc.gpuaddr + (param->offset & PAGE_MASK);
2721
2722 KGSL_STATS_ADD(param->len, &kgsl_driver.stats.mapped,
2723 &kgsl_driver.stats.mapped_max);
2724
2725 kgsl_process_add_stats(private,
2726 kgsl_memdesc_usermem_type(&entry->memdesc), param->len);
2727
2728 trace_kgsl_mem_map(entry, param->fd);
2729
2730 kgsl_mem_entry_commit_process(entry);
Tarun Karra24d3fe12017-04-05 15:23:03 -07002731
2732 /* Put the extra ref from kgsl_mem_entry_create() */
2733 kgsl_mem_entry_put(entry);
2734
Shrenuj Bansala419c792016-10-20 14:05:11 -07002735 return result;
2736
2737error_attach:
2738 switch (memtype) {
2739 case KGSL_MEM_ENTRY_ION:
2740 kgsl_destroy_ion(entry->priv_data);
2741 entry->memdesc.sgt = NULL;
2742 break;
2743 default:
2744 break;
2745 }
2746 kgsl_sharedmem_free(&entry->memdesc);
2747error:
2748 /* Clear gpuaddr here so userspace doesn't get any wrong ideas */
2749 param->gpuaddr = 0;
2750
2751 kfree(entry);
2752 return result;
2753}
2754
2755static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
2756 uint64_t offset, uint64_t length, unsigned int op)
2757{
2758 int ret = 0;
2759 int cacheop;
2760 int mode;
2761
2762 /*
2763 * Flush is defined as (clean | invalidate). If both bits are set, then
2764 * do a flush, otherwise check for the individual bits and clean or inv
2765 * as requested
2766 */
2767
2768 if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
2769 cacheop = KGSL_CACHE_OP_FLUSH;
2770 else if (op & KGSL_GPUMEM_CACHE_CLEAN)
2771 cacheop = KGSL_CACHE_OP_CLEAN;
2772 else if (op & KGSL_GPUMEM_CACHE_INV)
2773 cacheop = KGSL_CACHE_OP_INV;
2774 else {
2775 ret = -EINVAL;
2776 goto done;
2777 }
2778
2779 if (!(op & KGSL_GPUMEM_CACHE_RANGE)) {
2780 offset = 0;
2781 length = entry->memdesc.size;
2782 }
2783
2784 mode = kgsl_memdesc_get_cachemode(&entry->memdesc);
2785 if (mode != KGSL_CACHEMODE_UNCACHED
2786 && mode != KGSL_CACHEMODE_WRITECOMBINE) {
2787 trace_kgsl_mem_sync_cache(entry, offset, length, op);
2788 ret = kgsl_cache_range_op(&entry->memdesc, offset,
2789 length, cacheop);
2790 }
2791
2792done:
2793 return ret;
2794}
2795
2796/* New cache sync function - supports both directions (clean and invalidate) */
2797
2798long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
2799 unsigned int cmd, void *data)
2800{
2801 struct kgsl_gpumem_sync_cache *param = data;
2802 struct kgsl_process_private *private = dev_priv->process_priv;
2803 struct kgsl_mem_entry *entry = NULL;
2804 long ret;
2805
2806 if (param->id != 0)
2807 entry = kgsl_sharedmem_find_id(private, param->id);
2808 else if (param->gpuaddr != 0)
2809 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
2810
2811 if (entry == NULL)
2812 return -EINVAL;
2813
2814 ret = _kgsl_gpumem_sync_cache(entry, (uint64_t) param->offset,
2815 (uint64_t) param->length, param->op);
2816 kgsl_mem_entry_put(entry);
2817 return ret;
2818}
2819
2820static int mem_id_cmp(const void *_a, const void *_b)
2821{
2822 const unsigned int *a = _a, *b = _b;
2823
2824 if (*a == *b)
2825 return 0;
2826 return (*a > *b) ? 1 : -1;
2827}
2828
2829#ifdef CONFIG_ARM64
2830/* Do not support full flush on ARM64 targets */
2831static inline bool check_full_flush(size_t size, int op)
2832{
2833 return false;
2834}
2835#else
2836/* Support full flush if the size is bigger than the threshold */
2837static inline bool check_full_flush(size_t size, int op)
2838{
2839 /* If we exceed the breakeven point, flush the entire cache */
2840 bool ret = (kgsl_driver.full_cache_threshold != 0) &&
2841 (size >= kgsl_driver.full_cache_threshold) &&
2842 (op == KGSL_GPUMEM_CACHE_FLUSH);
2843 if (ret) {
2844 trace_kgsl_mem_sync_full_cache(actual_count, op_size);
2845 flush_cache_all();
2846 }
2847 return ret;
2848}
2849#endif
2850
2851long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
2852 unsigned int cmd, void *data)
2853{
2854 int i;
2855 struct kgsl_gpumem_sync_cache_bulk *param = data;
2856 struct kgsl_process_private *private = dev_priv->process_priv;
2857 unsigned int id, last_id = 0, *id_list = NULL, actual_count = 0;
2858 struct kgsl_mem_entry **entries = NULL;
2859 long ret = 0;
2860 uint64_t op_size = 0;
2861 bool full_flush = false;
2862
2863 if (param->id_list == NULL || param->count == 0
2864 || param->count > (PAGE_SIZE / sizeof(unsigned int)))
2865 return -EINVAL;
2866
2867 id_list = kcalloc(param->count, sizeof(unsigned int), GFP_KERNEL);
2868 if (id_list == NULL)
2869 return -ENOMEM;
2870
2871 entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
2872 if (entries == NULL) {
2873 ret = -ENOMEM;
2874 goto end;
2875 }
2876
2877 if (copy_from_user(id_list, param->id_list,
2878 param->count * sizeof(unsigned int))) {
2879 ret = -EFAULT;
2880 goto end;
2881 }
2882 /* sort the ids so we can weed out duplicates */
2883 sort(id_list, param->count, sizeof(*id_list), mem_id_cmp, NULL);
2884
2885 for (i = 0; i < param->count; i++) {
2886 unsigned int cachemode;
2887 struct kgsl_mem_entry *entry = NULL;
2888
2889 id = id_list[i];
2890 /* skip 0 ids or duplicates */
2891 if (id == last_id)
2892 continue;
2893
2894 entry = kgsl_sharedmem_find_id(private, id);
2895 if (entry == NULL)
2896 continue;
2897
2898 /* skip uncached memory */
2899 cachemode = kgsl_memdesc_get_cachemode(&entry->memdesc);
2900 if (cachemode != KGSL_CACHEMODE_WRITETHROUGH &&
2901 cachemode != KGSL_CACHEMODE_WRITEBACK) {
2902 kgsl_mem_entry_put(entry);
2903 continue;
2904 }
2905
2906 op_size += entry->memdesc.size;
2907 entries[actual_count++] = entry;
2908
2909 full_flush = check_full_flush(op_size, param->op);
2910 if (full_flush)
2911 break;
2912
2913 last_id = id;
2914 }
2915
2916 param->op &= ~KGSL_GPUMEM_CACHE_RANGE;
2917
2918 for (i = 0; i < actual_count; i++) {
2919 if (!full_flush)
2920 _kgsl_gpumem_sync_cache(entries[i], 0,
2921 entries[i]->memdesc.size,
2922 param->op);
2923 kgsl_mem_entry_put(entries[i]);
2924 }
2925end:
2926 kfree(entries);
2927 kfree(id_list);
2928 return ret;
2929}
2930
2931/* Legacy cache function, does a flush (clean + invalidate) */
2932
2933long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
2934 unsigned int cmd, void *data)
2935{
2936 struct kgsl_sharedmem_free *param = data;
2937 struct kgsl_process_private *private = dev_priv->process_priv;
2938 struct kgsl_mem_entry *entry = NULL;
2939 long ret;
2940
2941 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
2942 if (entry == NULL)
2943 return -EINVAL;
2944
2945 ret = _kgsl_gpumem_sync_cache(entry, 0, entry->memdesc.size,
2946 KGSL_GPUMEM_CACHE_FLUSH);
2947 kgsl_mem_entry_put(entry);
2948 return ret;
2949}
2950
2951long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
2952 unsigned int cmd, void *data)
2953{
2954 struct kgsl_process_private *private = dev_priv->process_priv;
2955 struct kgsl_gpuobj_sync *param = data;
2956 struct kgsl_gpuobj_sync_obj *objs;
2957 struct kgsl_mem_entry **entries;
2958 long ret = 0;
2959 bool full_flush = false;
2960 uint64_t size = 0;
2961 int i, count = 0;
2962 void __user *ptr;
2963
2964 if (param->count == 0 || param->count > 128)
2965 return -EINVAL;
2966
2967 objs = kcalloc(param->count, sizeof(*objs), GFP_KERNEL);
2968 if (objs == NULL)
2969 return -ENOMEM;
2970
2971 entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
2972 if (entries == NULL) {
2973 ret = -ENOMEM;
2974 goto out;
2975 }
2976
2977 ptr = to_user_ptr(param->objs);
2978
2979 for (i = 0; i < param->count; i++) {
2980 ret = _copy_from_user(&objs[i], ptr, sizeof(*objs),
2981 param->obj_len);
2982 if (ret)
2983 goto out;
2984
2985 entries[i] = kgsl_sharedmem_find_id(private, objs[i].id);
2986
2987 /* Not finding the ID is not a fatal failure - just skip it */
2988 if (entries[i] == NULL)
2989 continue;
2990
2991 count++;
2992
2993 if (!(objs[i].op & KGSL_GPUMEM_CACHE_RANGE))
2994 size += entries[i]->memdesc.size;
2995 else if (objs[i].offset < entries[i]->memdesc.size)
2996 size += (entries[i]->memdesc.size - objs[i].offset);
2997
2998 full_flush = check_full_flush(size, objs[i].op);
2999 if (full_flush)
3000 break;
3001
3002 ptr += sizeof(*objs);
3003 }
3004
3005 if (!full_flush) {
3006 for (i = 0; !ret && i < param->count; i++)
3007 if (entries[i])
3008 ret = _kgsl_gpumem_sync_cache(entries[i],
3009 objs[i].offset, objs[i].length,
3010 objs[i].op);
3011 }
3012
3013 for (i = 0; i < param->count; i++)
3014 if (entries[i])
3015 kgsl_mem_entry_put(entries[i]);
3016
3017out:
3018 kfree(entries);
3019 kfree(objs);
3020
3021 return ret;
3022}
3023
3024#ifdef CONFIG_ARM64
3025static uint64_t kgsl_filter_cachemode(uint64_t flags)
3026{
3027 /*
3028 * WRITETHROUGH is not supported in arm64, so we tell the user that we
3029 * use WRITEBACK which is the default caching policy.
3030 */
3031 if ((flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT ==
3032 KGSL_CACHEMODE_WRITETHROUGH) {
3033 flags &= ~((uint64_t) KGSL_CACHEMODE_MASK);
Lynus Vazeb7af682017-04-17 18:36:01 +05303034 flags |= (uint64_t)((KGSL_CACHEMODE_WRITEBACK <<
3035 KGSL_CACHEMODE_SHIFT) &
3036 KGSL_CACHEMODE_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003037 }
3038 return flags;
3039}
3040#else
3041static uint64_t kgsl_filter_cachemode(uint64_t flags)
3042{
3043 return flags;
3044}
3045#endif
3046
3047/* The largest allowable alignment for a GPU object is 32MB */
3048#define KGSL_MAX_ALIGN (32 * SZ_1M)
3049
3050static struct kgsl_mem_entry *gpumem_alloc_entry(
3051 struct kgsl_device_private *dev_priv,
3052 uint64_t size, uint64_t flags)
3053{
3054 int ret;
3055 struct kgsl_process_private *private = dev_priv->process_priv;
3056 struct kgsl_mem_entry *entry;
3057 unsigned int align;
3058
3059 flags &= KGSL_MEMFLAGS_GPUREADONLY
3060 | KGSL_CACHEMODE_MASK
3061 | KGSL_MEMTYPE_MASK
3062 | KGSL_MEMALIGN_MASK
3063 | KGSL_MEMFLAGS_USE_CPU_MAP
3064 | KGSL_MEMFLAGS_SECURE
3065 | KGSL_MEMFLAGS_FORCE_32BIT;
3066
3067 /* Turn off SVM if the system doesn't support it */
3068 if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu))
3069 flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
3070
3071 /* Return not supported error if secure memory isn't enabled */
3072 if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
3073 (flags & KGSL_MEMFLAGS_SECURE)) {
3074 dev_WARN_ONCE(dev_priv->device->dev, 1,
3075 "Secure memory not supported");
3076 return ERR_PTR(-EOPNOTSUPP);
3077 }
3078
3079 /* Secure memory disables advanced addressing modes */
3080 if (flags & KGSL_MEMFLAGS_SECURE)
3081 flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
3082
3083 /* Cap the alignment bits to the highest number we can handle */
3084 align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
3085 if (align >= ilog2(KGSL_MAX_ALIGN)) {
3086 KGSL_CORE_ERR("Alignment too large; restricting to %dK\n",
3087 KGSL_MAX_ALIGN >> 10);
3088
3089 flags &= ~((uint64_t) KGSL_MEMALIGN_MASK);
Lynus Vazeb7af682017-04-17 18:36:01 +05303090 flags |= (uint64_t)((ilog2(KGSL_MAX_ALIGN) <<
3091 KGSL_MEMALIGN_SHIFT) &
3092 KGSL_MEMALIGN_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003093 }
3094
3095 /* For now only allow allocations up to 4G */
3096 if (size == 0 || size > UINT_MAX)
3097 return ERR_PTR(-EINVAL);
3098
3099 flags = kgsl_filter_cachemode(flags);
3100
3101 entry = kgsl_mem_entry_create();
3102 if (entry == NULL)
3103 return ERR_PTR(-ENOMEM);
3104
3105 if (MMU_FEATURE(&dev_priv->device->mmu, KGSL_MMU_NEED_GUARD_PAGE))
3106 entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
3107
3108 if (flags & KGSL_MEMFLAGS_SECURE)
3109 entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
3110
3111 ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
3112 size, flags);
3113 if (ret != 0)
3114 goto err;
3115
3116 ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
3117 if (ret != 0) {
3118 kgsl_sharedmem_free(&entry->memdesc);
3119 goto err;
3120 }
3121
3122 kgsl_process_add_stats(private,
3123 kgsl_memdesc_usermem_type(&entry->memdesc),
3124 entry->memdesc.size);
3125 trace_kgsl_mem_alloc(entry);
3126
3127 kgsl_mem_entry_commit_process(entry);
3128 return entry;
3129err:
3130 kfree(entry);
3131 return ERR_PTR(ret);
3132}
3133
3134static void copy_metadata(struct kgsl_mem_entry *entry, uint64_t metadata,
3135 unsigned int len)
3136{
3137 unsigned int i, size;
3138
3139 if (len == 0)
3140 return;
3141
3142 size = min_t(unsigned int, len, sizeof(entry->metadata) - 1);
3143
3144 if (copy_from_user(entry->metadata, to_user_ptr(metadata), size)) {
3145 memset(entry->metadata, 0, sizeof(entry->metadata));
3146 return;
3147 }
3148
3149 /* Clean up non printable characters in the string */
3150 for (i = 0; i < size && entry->metadata[i] != 0; i++) {
3151 if (!isprint(entry->metadata[i]))
3152 entry->metadata[i] = '?';
3153 }
3154}
3155
3156long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
3157 unsigned int cmd, void *data)
3158{
3159 struct kgsl_gpuobj_alloc *param = data;
3160 struct kgsl_mem_entry *entry;
3161
3162 entry = gpumem_alloc_entry(dev_priv, param->size, param->flags);
3163
3164 if (IS_ERR(entry))
3165 return PTR_ERR(entry);
3166
3167 copy_metadata(entry, param->metadata, param->metadata_len);
3168
3169 param->size = entry->memdesc.size;
3170 param->flags = entry->memdesc.flags;
3171 param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc);
3172 param->id = entry->id;
3173
Tarun Karra24d3fe12017-04-05 15:23:03 -07003174 /* Put the extra ref from kgsl_mem_entry_create() */
3175 kgsl_mem_entry_put(entry);
3176
Shrenuj Bansala419c792016-10-20 14:05:11 -07003177 return 0;
3178}
3179
3180long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
3181 unsigned int cmd, void *data)
3182{
3183 struct kgsl_gpumem_alloc *param = data;
3184 struct kgsl_mem_entry *entry;
3185 uint64_t flags = param->flags;
3186
3187 /* Legacy functions doesn't support these advanced features */
3188 flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
3189 flags |= KGSL_MEMFLAGS_FORCE_32BIT;
3190
3191 entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);
3192
3193 if (IS_ERR(entry))
3194 return PTR_ERR(entry);
3195
3196 param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
3197 param->size = (size_t) entry->memdesc.size;
3198 param->flags = (unsigned int) entry->memdesc.flags;
3199
Tarun Karra24d3fe12017-04-05 15:23:03 -07003200 /* Put the extra ref from kgsl_mem_entry_create() */
3201 kgsl_mem_entry_put(entry);
3202
Shrenuj Bansala419c792016-10-20 14:05:11 -07003203 return 0;
3204}
3205
3206long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
3207 unsigned int cmd, void *data)
3208{
3209 struct kgsl_gpumem_alloc_id *param = data;
3210 struct kgsl_mem_entry *entry;
3211 uint64_t flags = param->flags;
3212
3213 flags |= KGSL_MEMFLAGS_FORCE_32BIT;
3214
3215 entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);
3216
3217 if (IS_ERR(entry))
3218 return PTR_ERR(entry);
3219
3220 param->id = entry->id;
3221 param->flags = (unsigned int) entry->memdesc.flags;
3222 param->size = (size_t) entry->memdesc.size;
3223 param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
3224 param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
3225
Tarun Karra24d3fe12017-04-05 15:23:03 -07003226 /* Put the extra ref from kgsl_mem_entry_create() */
3227 kgsl_mem_entry_put(entry);
3228
Shrenuj Bansala419c792016-10-20 14:05:11 -07003229 return 0;
3230}
3231
3232long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
3233 unsigned int cmd, void *data)
3234{
3235 struct kgsl_process_private *private = dev_priv->process_priv;
3236 struct kgsl_gpumem_get_info *param = data;
3237 struct kgsl_mem_entry *entry = NULL;
3238 int result = 0;
3239
3240 if (param->id != 0)
3241 entry = kgsl_sharedmem_find_id(private, param->id);
3242 else if (param->gpuaddr != 0)
3243 entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
3244
3245 if (entry == NULL)
3246 return -EINVAL;
3247
3248 /*
3249 * If any of the 64 bit address / sizes would end up being
3250 * truncated, return -ERANGE. That will signal the user that they
3251 * should use a more modern API
3252 */
3253 if (entry->memdesc.gpuaddr > ULONG_MAX)
3254 result = -ERANGE;
3255
3256 param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
3257 param->id = entry->id;
3258 param->flags = (unsigned int) entry->memdesc.flags;
3259 param->size = (size_t) entry->memdesc.size;
3260 param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
3261 param->useraddr = entry->memdesc.useraddr;
3262
3263 kgsl_mem_entry_put(entry);
3264 return result;
3265}
3266
3267static inline int _sparse_alloc_param_sanity_check(uint64_t size,
3268 uint64_t pagesize)
3269{
3270 if (size == 0 || pagesize == 0)
3271 return -EINVAL;
3272
3273 if (pagesize != PAGE_SIZE && pagesize != SZ_64K)
3274 return -EINVAL;
3275
3276 if (pagesize > size || !IS_ALIGNED(size, pagesize))
3277 return -EINVAL;
3278
3279 return 0;
3280}
3281
3282long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
3283 unsigned int cmd, void *data)
3284{
3285 struct kgsl_process_private *process = dev_priv->process_priv;
3286 struct kgsl_sparse_phys_alloc *param = data;
3287 struct kgsl_mem_entry *entry;
3288 int ret;
3289 int id;
3290
3291 ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
3292 if (ret)
3293 return ret;
3294
3295 entry = kgsl_mem_entry_create();
3296 if (entry == NULL)
3297 return -ENOMEM;
3298
3299 ret = kgsl_process_private_get(process);
3300 if (!ret) {
3301 ret = -EBADF;
3302 goto err_free_entry;
3303 }
3304
3305 idr_preload(GFP_KERNEL);
3306 spin_lock(&process->mem_lock);
3307 /* Allocate the ID but don't attach the pointer just yet */
3308 id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
3309 spin_unlock(&process->mem_lock);
3310 idr_preload_end();
3311
3312 if (id < 0) {
3313 ret = id;
3314 goto err_put_proc_priv;
3315 }
3316
3317 entry->id = id;
3318 entry->priv = process;
3319
3320 entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
3321 kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
3322
3323 ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
3324 param->size, entry->memdesc.flags);
3325 if (ret)
3326 goto err_remove_idr;
3327
3328 /* Sanity check to verify we got correct pagesize */
3329 if (param->pagesize != PAGE_SIZE && entry->memdesc.sgt != NULL) {
3330 struct scatterlist *s;
3331 int i;
3332
3333 for_each_sg(entry->memdesc.sgt->sgl, s,
3334 entry->memdesc.sgt->nents, i) {
3335 if (!IS_ALIGNED(s->length, param->pagesize))
3336 goto err_invalid_pages;
3337 }
3338 }
3339
3340 param->id = entry->id;
3341 param->flags = entry->memdesc.flags;
3342
3343 trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
3344 kgsl_mem_entry_commit_process(entry);
3345
Tarun Karra24d3fe12017-04-05 15:23:03 -07003346 /* Put the extra ref from kgsl_mem_entry_create() */
3347 kgsl_mem_entry_put(entry);
3348
Shrenuj Bansala419c792016-10-20 14:05:11 -07003349 return 0;
3350
3351err_invalid_pages:
3352 kgsl_sharedmem_free(&entry->memdesc);
3353err_remove_idr:
3354 spin_lock(&process->mem_lock);
3355 idr_remove(&process->mem_idr, entry->id);
3356 spin_unlock(&process->mem_lock);
3357err_put_proc_priv:
3358 kgsl_process_private_put(process);
3359err_free_entry:
3360 kfree(entry);
3361
3362 return ret;
3363}
3364
3365long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
3366 unsigned int cmd, void *data)
3367{
3368 struct kgsl_process_private *process = dev_priv->process_priv;
3369 struct kgsl_sparse_phys_free *param = data;
3370 struct kgsl_mem_entry *entry;
3371
3372 entry = kgsl_sharedmem_find_id_flags(process, param->id,
3373 KGSL_MEMFLAGS_SPARSE_PHYS);
3374 if (entry == NULL)
3375 return -EINVAL;
3376
3377 if (entry->memdesc.cur_bindings != 0) {
3378 kgsl_mem_entry_put(entry);
3379 return -EINVAL;
3380 }
3381
3382 trace_sparse_phys_free(entry->id);
3383
3384 /* One put for find_id(), one put for the kgsl_mem_entry_create() */
3385 kgsl_mem_entry_put(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05303386 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003387
3388 return 0;
3389}
3390
3391long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
3392 unsigned int cmd, void *data)
3393{
3394 struct kgsl_process_private *private = dev_priv->process_priv;
3395 struct kgsl_sparse_virt_alloc *param = data;
3396 struct kgsl_mem_entry *entry;
3397 int ret;
3398
3399 ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
3400 if (ret)
3401 return ret;
3402
3403 entry = kgsl_mem_entry_create();
3404 if (entry == NULL)
3405 return -ENOMEM;
3406
3407 entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
3408 entry->memdesc.size = param->size;
3409 entry->memdesc.cur_bindings = 0;
3410 kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
3411
3412 spin_lock_init(&entry->bind_lock);
3413 entry->bind_tree = RB_ROOT;
3414
3415 ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
3416 if (ret) {
3417 kfree(entry);
3418 return ret;
3419 }
3420
3421 param->id = entry->id;
3422 param->gpuaddr = entry->memdesc.gpuaddr;
3423 param->flags = entry->memdesc.flags;
3424
3425 trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
3426 kgsl_mem_entry_commit_process(entry);
3427
Tarun Karra24d3fe12017-04-05 15:23:03 -07003428 /* Put the extra ref from kgsl_mem_entry_create() */
3429 kgsl_mem_entry_put(entry);
3430
Shrenuj Bansala419c792016-10-20 14:05:11 -07003431 return 0;
3432}
3433
3434long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
3435 unsigned int cmd, void *data)
3436{
3437 struct kgsl_process_private *process = dev_priv->process_priv;
3438 struct kgsl_sparse_virt_free *param = data;
3439 struct kgsl_mem_entry *entry = NULL;
3440
3441 entry = kgsl_sharedmem_find_id_flags(process, param->id,
3442 KGSL_MEMFLAGS_SPARSE_VIRT);
3443 if (entry == NULL)
3444 return -EINVAL;
3445
3446 if (entry->bind_tree.rb_node != NULL) {
3447 kgsl_mem_entry_put(entry);
3448 return -EINVAL;
3449 }
3450
3451 trace_sparse_virt_free(entry->id);
3452
3453 /* One put for find_id(), one put for the kgsl_mem_entry_create() */
3454 kgsl_mem_entry_put(entry);
Hareesh Gundu615439d2017-06-16 17:06:57 +05303455 kgsl_mem_entry_put(entry);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003456
3457 return 0;
3458}
3459
3460static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
3461 uint64_t v_offset,
3462 struct kgsl_memdesc *memdesc,
3463 uint64_t p_offset,
3464 uint64_t size,
3465 uint64_t flags)
3466{
3467 struct sparse_bind_object *new;
3468 struct rb_node **node, *parent = NULL;
3469
3470 new = kzalloc(sizeof(*new), GFP_KERNEL);
3471 if (new == NULL)
3472 return -ENOMEM;
3473
3474 new->v_off = v_offset;
3475 new->p_off = p_offset;
3476 new->p_memdesc = memdesc;
3477 new->size = size;
3478 new->flags = flags;
3479
3480 node = &entry->bind_tree.rb_node;
3481
3482 while (*node != NULL) {
3483 struct sparse_bind_object *this;
3484
3485 parent = *node;
3486 this = rb_entry(parent, struct sparse_bind_object, node);
3487
3488 if (new->v_off < this->v_off)
3489 node = &parent->rb_left;
3490 else if (new->v_off > this->v_off)
3491 node = &parent->rb_right;
3492 }
3493
3494 rb_link_node(&new->node, parent, node);
3495 rb_insert_color(&new->node, &entry->bind_tree);
3496
3497 return 0;
3498}
3499
3500static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry,
3501 struct sparse_bind_object *obj,
3502 uint64_t v_offset, uint64_t size)
3503{
3504 spin_lock(&entry->bind_lock);
3505 if (v_offset == obj->v_off && size >= obj->size) {
3506 /*
3507 * We are all encompassing, remove the entry and free
3508 * things up
3509 */
3510 rb_erase(&obj->node, &entry->bind_tree);
3511 kfree(obj);
3512 } else if (v_offset == obj->v_off) {
3513 /*
3514 * We are the front of the node, adjust the front of
3515 * the node
3516 */
3517 obj->v_off += size;
3518 obj->p_off += size;
3519 obj->size -= size;
3520 } else if ((v_offset + size) == (obj->v_off + obj->size)) {
3521 /*
3522 * We are at the end of the obj, adjust the beginning
3523 * points
3524 */
3525 obj->size -= size;
3526 } else {
3527 /*
3528 * We are in the middle of a node, split it up and
3529 * create a new mini node. Adjust this node's bounds
3530 * and add the new node to the list.
3531 */
3532 uint64_t tmp_size = obj->size;
3533 int ret;
3534
3535 obj->size = v_offset - obj->v_off;
3536
3537 spin_unlock(&entry->bind_lock);
3538 ret = _sparse_add_to_bind_tree(entry, v_offset + size,
3539 obj->p_memdesc,
3540 obj->p_off + (v_offset - obj->v_off) + size,
3541 tmp_size - (v_offset - obj->v_off) - size,
3542 obj->flags);
3543
3544 return ret;
3545 }
3546
3547 spin_unlock(&entry->bind_lock);
3548
3549 return 0;
3550}
3551
3552static struct sparse_bind_object *_find_containing_bind_obj(
3553 struct kgsl_mem_entry *entry,
3554 uint64_t offset, uint64_t size)
3555{
3556 struct sparse_bind_object *obj = NULL;
3557 struct rb_node *node = entry->bind_tree.rb_node;
3558
3559 spin_lock(&entry->bind_lock);
3560
3561 while (node != NULL) {
3562 obj = rb_entry(node, struct sparse_bind_object, node);
3563
3564 if (offset == obj->v_off) {
3565 break;
3566 } else if (offset < obj->v_off) {
3567 if (offset + size > obj->v_off)
3568 break;
3569 node = node->rb_left;
3570 obj = NULL;
3571 } else if (offset > obj->v_off) {
3572 if (offset < obj->v_off + obj->size)
3573 break;
3574 node = node->rb_right;
3575 obj = NULL;
3576 }
3577 }
3578
3579 spin_unlock(&entry->bind_lock);
3580
3581 return obj;
3582}
3583
3584static int _sparse_unbind(struct kgsl_mem_entry *entry,
3585 struct sparse_bind_object *bind_obj,
3586 uint64_t offset, uint64_t size)
3587{
3588 struct kgsl_memdesc *memdesc = bind_obj->p_memdesc;
3589 struct kgsl_pagetable *pt = memdesc->pagetable;
3590 int ret;
3591
3592 if (memdesc->cur_bindings < (size / PAGE_SIZE))
3593 return -EINVAL;
3594
3595 memdesc->cur_bindings -= size / PAGE_SIZE;
3596
3597 ret = kgsl_mmu_unmap_offset(pt, memdesc,
3598 entry->memdesc.gpuaddr, offset, size);
3599 if (ret)
3600 return ret;
3601
3602 ret = kgsl_mmu_sparse_dummy_map(pt, &entry->memdesc, offset, size);
3603 if (ret)
3604 return ret;
3605
3606 ret = _sparse_rm_from_bind_tree(entry, bind_obj, offset, size);
3607 if (ret == 0) {
3608 atomic_long_sub(size, &kgsl_driver.stats.mapped);
3609 trace_sparse_unbind(entry->id, offset, size);
3610 }
3611
3612 return ret;
3613}
3614
3615static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj,
3616 struct kgsl_mem_entry *virt_entry)
3617{
3618 struct sparse_bind_object *bind_obj;
3619 int ret = 0;
3620 uint64_t size = obj->size;
3621 uint64_t tmp_size = obj->size;
3622 uint64_t offset = obj->virtoffset;
3623
3624 while (size > 0 && ret == 0) {
3625 tmp_size = size;
3626 bind_obj = _find_containing_bind_obj(virt_entry, offset, size);
3627 if (bind_obj == NULL)
3628 return 0;
3629
3630 if (bind_obj->v_off > offset) {
3631 tmp_size = size - bind_obj->v_off - offset;
3632 if (tmp_size > bind_obj->size)
3633 tmp_size = bind_obj->size;
3634 offset = bind_obj->v_off;
3635 } else if (bind_obj->v_off < offset) {
3636 uint64_t diff = offset - bind_obj->v_off;
3637
3638 if (diff + size > bind_obj->size)
3639 tmp_size = bind_obj->size - diff;
3640 } else {
3641 if (tmp_size > bind_obj->size)
3642 tmp_size = bind_obj->size;
3643 }
3644
3645 ret = _sparse_unbind(virt_entry, bind_obj, offset, tmp_size);
3646 if (ret == 0) {
3647 offset += tmp_size;
3648 size -= tmp_size;
3649 }
3650 }
3651
3652 return ret;
3653}
3654
3655static inline bool _is_phys_bindable(struct kgsl_mem_entry *phys_entry,
3656 uint64_t offset, uint64_t size, uint64_t flags)
3657{
3658 struct kgsl_memdesc *memdesc = &phys_entry->memdesc;
3659
3660 if (!IS_ALIGNED(offset | size, kgsl_memdesc_get_pagesize(memdesc)))
3661 return false;
3662
Sudeep Yedalapure8ff97992017-01-20 20:12:51 +05303663 if (offset + size < offset)
3664 return false;
3665
Shrenuj Bansala419c792016-10-20 14:05:11 -07003666 if (!(flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
3667 offset + size > memdesc->size)
3668 return false;
3669
3670 return true;
3671}
3672
3673static int _sparse_bind(struct kgsl_process_private *process,
3674 struct kgsl_mem_entry *virt_entry, uint64_t v_offset,
3675 struct kgsl_mem_entry *phys_entry, uint64_t p_offset,
3676 uint64_t size, uint64_t flags)
3677{
3678 int ret;
3679 struct kgsl_pagetable *pagetable;
3680 struct kgsl_memdesc *memdesc = &phys_entry->memdesc;
3681
3682 /* map the memory after unlocking if gpuaddr has been assigned */
3683 if (memdesc->gpuaddr)
3684 return -EINVAL;
3685
3686 if (memdesc->useraddr != 0)
3687 return -EINVAL;
3688
3689 pagetable = memdesc->pagetable;
3690
3691 /* Clear out any mappings */
3692 ret = kgsl_mmu_unmap_offset(pagetable, &virt_entry->memdesc,
3693 virt_entry->memdesc.gpuaddr, v_offset, size);
3694 if (ret)
3695 return ret;
3696
3697 ret = kgsl_mmu_map_offset(pagetable, virt_entry->memdesc.gpuaddr,
3698 v_offset, memdesc, p_offset, size, flags);
3699 if (ret) {
3700 /* Try to clean up, but not the end of the world */
3701 kgsl_mmu_sparse_dummy_map(pagetable, &virt_entry->memdesc,
3702 v_offset, size);
3703 return ret;
3704 }
3705
3706 ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc,
3707 p_offset, size, flags);
3708 if (ret == 0)
3709 memdesc->cur_bindings += size / PAGE_SIZE;
3710
3711 return ret;
3712}
3713
3714static long sparse_bind_range(struct kgsl_process_private *private,
3715 struct kgsl_sparse_binding_object *obj,
3716 struct kgsl_mem_entry *virt_entry)
3717{
3718 struct kgsl_mem_entry *phys_entry;
3719 int ret;
3720
3721 phys_entry = kgsl_sharedmem_find_id_flags(private, obj->id,
3722 KGSL_MEMFLAGS_SPARSE_PHYS);
3723 if (phys_entry == NULL)
3724 return -EINVAL;
3725
3726 if (!_is_phys_bindable(phys_entry, obj->physoffset, obj->size,
3727 obj->flags)) {
3728 kgsl_mem_entry_put(phys_entry);
3729 return -EINVAL;
3730 }
3731
3732 if (kgsl_memdesc_get_align(&virt_entry->memdesc) !=
3733 kgsl_memdesc_get_align(&phys_entry->memdesc)) {
3734 kgsl_mem_entry_put(phys_entry);
3735 return -EINVAL;
3736 }
3737
3738 ret = sparse_unbind_range(obj, virt_entry);
3739 if (ret) {
3740 kgsl_mem_entry_put(phys_entry);
3741 return -EINVAL;
3742 }
3743
3744 ret = _sparse_bind(private, virt_entry, obj->virtoffset,
3745 phys_entry, obj->physoffset, obj->size,
3746 obj->flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS);
3747 if (ret == 0) {
3748 KGSL_STATS_ADD(obj->size, &kgsl_driver.stats.mapped,
3749 &kgsl_driver.stats.mapped_max);
3750
3751 trace_sparse_bind(virt_entry->id, obj->virtoffset,
3752 phys_entry->id, obj->physoffset,
3753 obj->size, obj->flags);
3754 }
3755
3756 kgsl_mem_entry_put(phys_entry);
3757
3758 return ret;
3759}
3760
3761long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
3762 unsigned int cmd, void *data)
3763{
3764 struct kgsl_process_private *private = dev_priv->process_priv;
3765 struct kgsl_sparse_bind *param = data;
3766 struct kgsl_sparse_binding_object obj;
3767 struct kgsl_mem_entry *virt_entry;
3768 int pg_sz;
3769 void __user *ptr;
3770 int ret = 0;
3771 int i = 0;
3772
3773 ptr = (void __user *) (uintptr_t) param->list;
3774
3775 if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
3776 param->count == 0 || ptr == NULL)
3777 return -EINVAL;
3778
3779 virt_entry = kgsl_sharedmem_find_id_flags(private, param->id,
3780 KGSL_MEMFLAGS_SPARSE_VIRT);
3781 if (virt_entry == NULL)
3782 return -EINVAL;
3783
3784 pg_sz = kgsl_memdesc_get_pagesize(&virt_entry->memdesc);
3785
3786 for (i = 0; i < param->count; i++) {
3787 memset(&obj, 0, sizeof(obj));
3788 ret = _copy_from_user(&obj, ptr, sizeof(obj), param->size);
3789 if (ret)
3790 break;
3791
3792 /* Sanity check initial range */
Sudeep Yedalapure8ff97992017-01-20 20:12:51 +05303793 if (obj.size == 0 || obj.virtoffset + obj.size < obj.size ||
Shrenuj Bansala419c792016-10-20 14:05:11 -07003794 obj.virtoffset + obj.size > virt_entry->memdesc.size ||
3795 !(IS_ALIGNED(obj.virtoffset | obj.size, pg_sz))) {
3796 ret = -EINVAL;
3797 break;
3798 }
3799
3800 if (obj.flags & KGSL_SPARSE_BIND)
3801 ret = sparse_bind_range(private, &obj, virt_entry);
3802 else if (obj.flags & KGSL_SPARSE_UNBIND)
3803 ret = sparse_unbind_range(&obj, virt_entry);
3804 else
3805 ret = -EINVAL;
3806 if (ret)
3807 break;
3808
3809 ptr += sizeof(obj);
3810 }
3811
3812 kgsl_mem_entry_put(virt_entry);
3813
3814 return ret;
3815}
3816
Tarun Karra2b8b3632016-11-14 16:38:27 -08003817long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
3818 unsigned int cmd, void *data)
3819{
3820 struct kgsl_gpu_sparse_command *param = data;
3821 struct kgsl_device *device = dev_priv->device;
3822 struct kgsl_context *context;
3823 struct kgsl_drawobj *drawobj[2];
3824 struct kgsl_drawobj_sparse *sparseobj;
3825 long result;
3826 unsigned int i = 0;
3827
3828 /* Make sure sparse and syncpoint count isn't too big */
3829 if (param->numsparse > KGSL_MAX_SPARSE ||
3830 param->numsyncs > KGSL_MAX_SYNCPOINTS)
3831 return -EINVAL;
3832
3833 /* Make sure there is atleast one sparse or sync */
3834 if (param->numsparse == 0 && param->numsyncs == 0)
3835 return -EINVAL;
3836
3837 /* Only Sparse commands are supported in this ioctl */
3838 if (!(param->flags & KGSL_DRAWOBJ_SPARSE) || (param->flags &
3839 (KGSL_DRAWOBJ_SUBMIT_IB_LIST | KGSL_DRAWOBJ_MARKER
3840 | KGSL_DRAWOBJ_SYNC)))
3841 return -EINVAL;
3842
3843 context = kgsl_context_get_owner(dev_priv, param->context_id);
3844 if (context == NULL)
3845 return -EINVAL;
3846
3847 /* Restrict bind commands to bind context */
3848 if (!(context->flags & KGSL_CONTEXT_SPARSE)) {
3849 kgsl_context_put(context);
3850 return -EINVAL;
3851 }
3852
3853 if (param->numsyncs) {
3854 struct kgsl_drawobj_sync *syncobj = kgsl_drawobj_sync_create(
3855 device, context);
3856 if (IS_ERR(syncobj)) {
3857 result = PTR_ERR(syncobj);
3858 goto done;
3859 }
3860
3861 drawobj[i++] = DRAWOBJ(syncobj);
3862 result = kgsl_drawobj_sync_add_synclist(device, syncobj,
3863 to_user_ptr(param->synclist),
3864 param->syncsize, param->numsyncs);
3865 if (result)
3866 goto done;
3867 }
3868
3869 if (param->numsparse) {
3870 sparseobj = kgsl_drawobj_sparse_create(device, context,
3871 param->flags);
3872 if (IS_ERR(sparseobj)) {
3873 result = PTR_ERR(sparseobj);
3874 goto done;
3875 }
3876
3877 sparseobj->id = param->id;
3878 drawobj[i++] = DRAWOBJ(sparseobj);
3879 result = kgsl_drawobj_sparse_add_sparselist(device, sparseobj,
3880 param->id, to_user_ptr(param->sparselist),
3881 param->sparsesize, param->numsparse);
3882 if (result)
3883 goto done;
3884 }
3885
3886 result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
3887 drawobj, i, &param->timestamp);
3888
3889done:
3890 /*
3891 * -EPROTO is a "success" error - it just tells the user that the
3892 * context had previously faulted
3893 */
3894 if (result && result != -EPROTO)
3895 while (i--)
3896 kgsl_drawobj_destroy(drawobj[i]);
3897
3898 kgsl_context_put(context);
3899 return result;
3900}
3901
3902void kgsl_sparse_bind(struct kgsl_process_private *private,
3903 struct kgsl_drawobj_sparse *sparseobj)
3904{
3905 struct kgsl_sparseobj_node *sparse_node;
3906 struct kgsl_mem_entry *virt_entry = NULL;
3907 long ret = 0;
3908 char *name;
3909
3910 virt_entry = kgsl_sharedmem_find_id_flags(private, sparseobj->id,
3911 KGSL_MEMFLAGS_SPARSE_VIRT);
3912 if (virt_entry == NULL)
3913 return;
3914
3915 list_for_each_entry(sparse_node, &sparseobj->sparselist, node) {
3916 if (sparse_node->obj.flags & KGSL_SPARSE_BIND) {
3917 ret = sparse_bind_range(private, &sparse_node->obj,
3918 virt_entry);
3919 name = "bind";
3920 } else {
3921 ret = sparse_unbind_range(&sparse_node->obj,
3922 virt_entry);
3923 name = "unbind";
3924 }
3925
3926 if (ret)
3927 KGSL_CORE_ERR("kgsl: Unable to '%s' ret %ld virt_id %d, phys_id %d, virt_offset %16.16llX, phys_offset %16.16llX, size %16.16llX, flags %16.16llX\n",
3928 name, ret, sparse_node->virt_id,
3929 sparse_node->obj.id,
3930 sparse_node->obj.virtoffset,
3931 sparse_node->obj.physoffset,
3932 sparse_node->obj.size, sparse_node->obj.flags);
3933 }
3934
3935 kgsl_mem_entry_put(virt_entry);
3936}
3937EXPORT_SYMBOL(kgsl_sparse_bind);
3938
Shrenuj Bansala419c792016-10-20 14:05:11 -07003939long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
3940 unsigned int cmd, void *data)
3941{
3942 struct kgsl_process_private *private = dev_priv->process_priv;
3943 struct kgsl_gpuobj_info *param = data;
3944 struct kgsl_mem_entry *entry;
3945
3946 if (param->id == 0)
3947 return -EINVAL;
3948
3949 entry = kgsl_sharedmem_find_id(private, param->id);
3950 if (entry == NULL)
3951 return -EINVAL;
3952
3953 param->id = entry->id;
3954 param->gpuaddr = entry->memdesc.gpuaddr;
3955 param->flags = entry->memdesc.flags;
3956 param->size = entry->memdesc.size;
3957 param->va_len = kgsl_memdesc_footprint(&entry->memdesc);
3958 param->va_addr = (uint64_t) entry->memdesc.useraddr;
3959
3960 kgsl_mem_entry_put(entry);
3961 return 0;
3962}
3963
3964long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
3965 unsigned int cmd, void *data)
3966{
3967 struct kgsl_process_private *private = dev_priv->process_priv;
3968 struct kgsl_gpuobj_set_info *param = data;
3969 struct kgsl_mem_entry *entry;
3970
3971 if (param->id == 0)
3972 return -EINVAL;
3973
3974 entry = kgsl_sharedmem_find_id(private, param->id);
3975 if (entry == NULL)
3976 return -EINVAL;
3977
3978 if (param->flags & KGSL_GPUOBJ_SET_INFO_METADATA)
3979 copy_metadata(entry, param->metadata, param->metadata_len);
3980
3981 if (param->flags & KGSL_GPUOBJ_SET_INFO_TYPE) {
3982 entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
Lynus Vazeb7af682017-04-17 18:36:01 +05303983 entry->memdesc.flags |= (uint64_t)(param->type <<
3984 KGSL_MEMTYPE_SHIFT);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003985 }
3986
3987 kgsl_mem_entry_put(entry);
3988 return 0;
3989}
3990
3991/**
3992 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
3993 * @dev_priv - pointer to the private device structure
3994 * @cmd - the ioctl cmd passed from kgsl_ioctl
3995 * @data - the user data buffer from kgsl_ioctl
3996 * @returns 0 on success or error code on failure
3997 */
3998
3999long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
4000 unsigned int cmd, void *data)
4001{
4002 struct kgsl_timestamp_event *param = data;
4003 int ret;
4004
4005 switch (param->type) {
4006 case KGSL_TIMESTAMP_EVENT_FENCE:
4007 ret = kgsl_add_fence_event(dev_priv->device,
4008 param->context_id, param->timestamp, param->priv,
4009 param->len, dev_priv);
4010 break;
4011 default:
4012 ret = -EINVAL;
4013 }
4014
4015 return ret;
4016}
4017
4018static int
4019kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
4020{
4021 struct kgsl_memdesc *memdesc = &device->memstore;
4022 int result;
4023 unsigned int vma_size = vma->vm_end - vma->vm_start;
4024
4025 /* The memstore can only be mapped as read only */
4026
4027 if (vma->vm_flags & VM_WRITE)
4028 return -EPERM;
4029
4030 if (memdesc->size != vma_size) {
4031 KGSL_MEM_ERR(device, "memstore bad size: %d should be %llu\n",
4032 vma_size, memdesc->size);
4033 return -EINVAL;
4034 }
4035
4036 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
4037
4038 result = remap_pfn_range(vma, vma->vm_start,
4039 device->memstore.physaddr >> PAGE_SHIFT,
4040 vma_size, vma->vm_page_prot);
4041 if (result != 0)
4042 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
4043 result);
4044
4045 return result;
4046}
4047
4048/*
4049 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
4050 * Increase the refcount to make sure that the accounting stays correct
4051 */
4052
4053static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
4054{
4055 struct kgsl_mem_entry *entry = vma->vm_private_data;
4056
4057 if (kgsl_mem_entry_get(entry) == 0)
4058 vma->vm_private_data = NULL;
4059}
4060
4061static int
4062kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4063{
4064 struct kgsl_mem_entry *entry = vma->vm_private_data;
4065
4066 if (!entry)
4067 return VM_FAULT_SIGBUS;
4068 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
4069 return VM_FAULT_SIGBUS;
4070
4071 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
4072}
4073
4074static void
4075kgsl_gpumem_vm_close(struct vm_area_struct *vma)
4076{
4077 struct kgsl_mem_entry *entry = vma->vm_private_data;
4078
4079 if (!entry)
4080 return;
4081
4082 entry->memdesc.useraddr = 0;
4083 kgsl_mem_entry_put(entry);
4084}
4085
4086static const struct vm_operations_struct kgsl_gpumem_vm_ops = {
4087 .open = kgsl_gpumem_vm_open,
4088 .fault = kgsl_gpumem_vm_fault,
4089 .close = kgsl_gpumem_vm_close,
4090};
4091
4092static int
4093get_mmap_entry(struct kgsl_process_private *private,
4094 struct kgsl_mem_entry **out_entry, unsigned long pgoff,
4095 unsigned long len)
4096{
4097 int ret = 0;
4098 struct kgsl_mem_entry *entry;
4099
4100 entry = kgsl_sharedmem_find_id(private, pgoff);
4101 if (entry == NULL)
4102 entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
4103
4104 if (!entry)
4105 return -EINVAL;
4106
4107 if (!entry->memdesc.ops ||
4108 !entry->memdesc.ops->vmflags ||
4109 !entry->memdesc.ops->vmfault) {
4110 ret = -EINVAL;
4111 goto err_put;
4112 }
4113
4114 if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_PHYS) {
4115 if (len != entry->memdesc.size) {
4116 ret = -EINVAL;
4117 goto err_put;
4118 }
4119 }
4120
4121 if (entry->memdesc.useraddr != 0) {
4122 ret = -EBUSY;
4123 goto err_put;
4124 }
4125
4126 if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
4127 if (len != kgsl_memdesc_footprint(&entry->memdesc)) {
4128 ret = -ERANGE;
4129 goto err_put;
4130 }
4131 } else if (len != kgsl_memdesc_footprint(&entry->memdesc) &&
4132 len != entry->memdesc.size) {
4133 /*
4134 * If cpu_map != gpumap then user can map either the
4135 * footprint or the entry size
4136 */
4137 ret = -ERANGE;
4138 goto err_put;
4139 }
4140
4141 *out_entry = entry;
4142 return 0;
4143err_put:
4144 kgsl_mem_entry_put(entry);
4145 return ret;
4146}
4147
4148static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
4149 struct kgsl_mem_entry *entry, unsigned long addr,
4150 unsigned long size)
4151{
4152 int ret;
4153
4154 ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
4155 (uint64_t) size);
4156
4157 if (ret != 0)
4158 return ret;
4159
4160 entry->memdesc.gpuaddr = (uint64_t) addr;
4161 entry->memdesc.pagetable = private->pagetable;
4162
4163 ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
4164 if (ret) {
4165 kgsl_mmu_put_gpuaddr(&entry->memdesc);
4166 return ret;
4167 }
4168
4169 kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
4170 entry->memdesc.size);
4171
4172 return addr;
4173}
4174
4175static unsigned long _gpu_find_svm(struct kgsl_process_private *private,
4176 unsigned long start, unsigned long end, unsigned long len,
4177 unsigned int align)
4178{
4179 uint64_t addr = kgsl_mmu_find_svm_region(private->pagetable,
4180 (uint64_t) start, (uint64_t)end, (uint64_t) len, align);
4181
4182 BUG_ON(!IS_ERR_VALUE((unsigned long)addr) && (addr > ULONG_MAX));
4183
4184 return (unsigned long) addr;
4185}
4186
4187/* Search top down in the CPU VM region for a free address */
4188static unsigned long _cpu_get_unmapped_area(unsigned long bottom,
4189 unsigned long top, unsigned long len, unsigned long align)
4190{
4191 struct vm_unmapped_area_info info;
4192 unsigned long addr, err;
4193
4194 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
4195 info.low_limit = bottom;
4196 info.high_limit = top;
4197 info.length = len;
4198 info.align_offset = 0;
4199 info.align_mask = align - 1;
4200
4201 addr = vm_unmapped_area(&info);
4202
4203 if (IS_ERR_VALUE(addr))
4204 return addr;
4205
4206 err = security_mmap_addr(addr);
4207 return err ? err : addr;
4208}
4209
4210static unsigned long _search_range(struct kgsl_process_private *private,
4211 struct kgsl_mem_entry *entry,
4212 unsigned long start, unsigned long end,
4213 unsigned long len, uint64_t align)
4214{
4215 unsigned long cpu, gpu = end, result = -ENOMEM;
4216
4217 while (gpu > start) {
4218 /* find a new empty spot on the CPU below the last one */
4219 cpu = _cpu_get_unmapped_area(start, gpu, len,
4220 (unsigned long) align);
4221 if (IS_ERR_VALUE(cpu)) {
4222 result = cpu;
4223 break;
4224 }
4225 /* try to map it on the GPU */
4226 result = _gpu_set_svm_region(private, entry, cpu, len);
4227 if (!IS_ERR_VALUE(result))
4228 break;
4229
4230 trace_kgsl_mem_unmapped_area_collision(entry, cpu, len);
4231
4232 if (cpu <= start) {
4233 result = -ENOMEM;
4234 break;
4235 }
4236
4237 /* move downward to the next empty spot on the GPU */
4238 gpu = _gpu_find_svm(private, start, cpu, len, align);
4239 if (IS_ERR_VALUE(gpu)) {
4240 result = gpu;
4241 break;
4242 }
4243
4244 /* Check that_gpu_find_svm doesn't put us in a loop */
4245 if (gpu >= cpu) {
4246 result = -ENOMEM;
4247 break;
4248 }
4249
4250 /* Break if the recommended GPU address is out of range */
4251 if (gpu < start) {
4252 result = -ENOMEM;
4253 break;
4254 }
4255
4256 /*
4257 * Add the length of the chunk to the GPU address to yield the
4258 * upper bound for the CPU search
4259 */
4260 gpu += len;
4261 }
4262 return result;
4263}
4264
4265static unsigned long _get_svm_area(struct kgsl_process_private *private,
4266 struct kgsl_mem_entry *entry, unsigned long hint,
4267 unsigned long len, unsigned long flags)
4268{
4269 uint64_t start, end;
4270 int align_shift = kgsl_memdesc_get_align(&entry->memdesc);
4271 uint64_t align;
4272 unsigned long result;
4273 unsigned long addr;
4274
4275 if (align_shift >= ilog2(SZ_2M))
4276 align = SZ_2M;
4277 else if (align_shift >= ilog2(SZ_1M))
4278 align = SZ_1M;
4279 else if (align_shift >= ilog2(SZ_64K))
4280 align = SZ_64K;
4281 else
4282 align = SZ_4K;
4283
4284 /* get the GPU pagetable's SVM range */
4285 if (kgsl_mmu_svm_range(private->pagetable, &start, &end,
4286 entry->memdesc.flags))
4287 return -ERANGE;
4288
4289 /* now clamp the range based on the CPU's requirements */
4290 start = max_t(uint64_t, start, mmap_min_addr);
4291 end = min_t(uint64_t, end, current->mm->mmap_base);
4292 if (start >= end)
4293 return -ERANGE;
4294
4295 if (flags & MAP_FIXED) {
4296 /* we must use addr 'hint' or fail */
4297 return _gpu_set_svm_region(private, entry, hint, len);
4298 } else if (hint != 0) {
4299 struct vm_area_struct *vma;
4300
4301 /*
4302 * See if the hint is usable, if not we will use
4303 * it as the start point for searching.
4304 */
4305 addr = clamp_t(unsigned long, hint & ~(align - 1),
4306 start, (end - len) & ~(align - 1));
4307
4308 vma = find_vma(current->mm, addr);
4309
4310 if (vma == NULL || ((addr + len) <= vma->vm_start)) {
4311 result = _gpu_set_svm_region(private, entry, addr, len);
4312
4313 /* On failure drop down to keep searching */
4314 if (!IS_ERR_VALUE(result))
4315 return result;
4316 }
4317 } else {
4318 /* no hint, start search at the top and work down */
4319 addr = end & ~(align - 1);
4320 }
4321
4322 /*
4323 * Search downwards from the hint first. If that fails we
4324 * must try to search above it.
4325 */
4326 result = _search_range(private, entry, start, addr, len, align);
4327 if (IS_ERR_VALUE(result) && hint != 0)
4328 result = _search_range(private, entry, addr, end, len, align);
4329
4330 return result;
4331}
4332
4333static unsigned long
4334kgsl_get_unmapped_area(struct file *file, unsigned long addr,
4335 unsigned long len, unsigned long pgoff,
4336 unsigned long flags)
4337{
4338 unsigned long val;
4339 unsigned long vma_offset = pgoff << PAGE_SHIFT;
4340 struct kgsl_device_private *dev_priv = file->private_data;
4341 struct kgsl_process_private *private = dev_priv->process_priv;
4342 struct kgsl_device *device = dev_priv->device;
4343 struct kgsl_mem_entry *entry = NULL;
4344
4345 if (vma_offset == (unsigned long) device->memstore.gpuaddr)
4346 return get_unmapped_area(NULL, addr, len, pgoff, flags);
4347
4348 val = get_mmap_entry(private, &entry, pgoff, len);
4349 if (val)
4350 return val;
4351
4352 /* Do not allow CPU mappings for secure buffers */
4353 if (kgsl_memdesc_is_secured(&entry->memdesc)) {
4354 val = -EPERM;
4355 goto put;
4356 }
4357
4358 if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
4359 val = get_unmapped_area(NULL, addr, len, 0, flags);
4360 if (IS_ERR_VALUE(val))
4361 KGSL_MEM_ERR(device,
4362 "get_unmapped_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
4363 private->pid, addr, pgoff, len, (int) val);
4364 } else {
4365 val = _get_svm_area(private, entry, addr, len, flags);
4366 if (IS_ERR_VALUE(val))
4367 KGSL_MEM_ERR(device,
Hareesh Gunduca522a12017-02-15 16:02:06 +05304368 "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n",
4369 private->pid, current->mm->mmap_base, addr,
4370 pgoff, len, (int) val);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004371 }
4372
4373put:
4374 kgsl_mem_entry_put(entry);
4375 return val;
4376}
4377
4378static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
4379{
4380 unsigned int ret, cache;
4381 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
4382 struct kgsl_device_private *dev_priv = file->private_data;
4383 struct kgsl_process_private *private = dev_priv->process_priv;
4384 struct kgsl_mem_entry *entry = NULL;
4385 struct kgsl_device *device = dev_priv->device;
4386
4387 /* Handle leagacy behavior for memstore */
4388
4389 if (vma_offset == (unsigned long) device->memstore.gpuaddr)
4390 return kgsl_mmap_memstore(device, vma);
4391
4392 /*
4393 * The reference count on the entry that we get from
4394 * get_mmap_entry() will be held until kgsl_gpumem_vm_close().
4395 */
4396 ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
4397 vma->vm_end - vma->vm_start);
4398 if (ret)
4399 return ret;
4400
4401 vma->vm_flags |= entry->memdesc.ops->vmflags;
4402
4403 vma->vm_private_data = entry;
4404
4405 /* Determine user-side caching policy */
4406
4407 cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
4408
4409 switch (cache) {
4410 case KGSL_CACHEMODE_UNCACHED:
4411 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
4412 break;
4413 case KGSL_CACHEMODE_WRITETHROUGH:
4414 vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
4415 if (pgprot_val(vma->vm_page_prot) ==
4416 pgprot_val(pgprot_writebackcache(vma->vm_page_prot)))
4417 WARN_ONCE(1, "WRITETHROUGH is deprecated for arm64");
4418 break;
4419 case KGSL_CACHEMODE_WRITEBACK:
4420 vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
4421 break;
4422 case KGSL_CACHEMODE_WRITECOMBINE:
4423 default:
4424 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
4425 break;
4426 }
4427
4428 vma->vm_ops = &kgsl_gpumem_vm_ops;
4429
4430 if (cache == KGSL_CACHEMODE_WRITEBACK
4431 || cache == KGSL_CACHEMODE_WRITETHROUGH) {
4432 int i;
4433 unsigned long addr = vma->vm_start;
4434 struct kgsl_memdesc *m = &entry->memdesc;
4435
4436 for (i = 0; i < m->page_count; i++) {
4437 struct page *page = m->pages[i];
4438
4439 vm_insert_page(vma, addr, page);
4440 addr += PAGE_SIZE;
4441 }
4442 }
4443
4444 vma->vm_file = file;
4445
4446 entry->memdesc.useraddr = vma->vm_start;
4447
4448 trace_kgsl_mem_mmap(entry);
4449 return 0;
4450}
4451
4452static irqreturn_t kgsl_irq_handler(int irq, void *data)
4453{
4454 struct kgsl_device *device = data;
4455
4456 return device->ftbl->irq_handler(device);
4457
4458}
4459
4460#define KGSL_READ_MESSAGE "OH HAI GPU\n"
4461
4462static ssize_t kgsl_read(struct file *filep, char __user *buf, size_t count,
4463 loff_t *pos)
4464{
4465 return simple_read_from_buffer(buf, count, pos,
4466 KGSL_READ_MESSAGE, strlen(KGSL_READ_MESSAGE) + 1);
4467}
4468
4469static const struct file_operations kgsl_fops = {
4470 .owner = THIS_MODULE,
4471 .release = kgsl_release,
4472 .open = kgsl_open,
4473 .mmap = kgsl_mmap,
4474 .read = kgsl_read,
4475 .get_unmapped_area = kgsl_get_unmapped_area,
4476 .unlocked_ioctl = kgsl_ioctl,
4477 .compat_ioctl = kgsl_compat_ioctl,
4478};
4479
4480struct kgsl_driver kgsl_driver = {
4481 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
4482 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
4483 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
4484 /*
4485 * Full cache flushes are faster than line by line on at least
4486 * 8064 and 8974 once the region to be flushed is > 16mb.
4487 */
4488 .full_cache_threshold = SZ_16M,
4489
4490 .stats.vmalloc = ATOMIC_LONG_INIT(0),
4491 .stats.vmalloc_max = ATOMIC_LONG_INIT(0),
4492 .stats.page_alloc = ATOMIC_LONG_INIT(0),
4493 .stats.page_alloc_max = ATOMIC_LONG_INIT(0),
4494 .stats.coherent = ATOMIC_LONG_INIT(0),
4495 .stats.coherent_max = ATOMIC_LONG_INIT(0),
4496 .stats.secure = ATOMIC_LONG_INIT(0),
4497 .stats.secure_max = ATOMIC_LONG_INIT(0),
4498 .stats.mapped = ATOMIC_LONG_INIT(0),
4499 .stats.mapped_max = ATOMIC_LONG_INIT(0),
4500};
4501EXPORT_SYMBOL(kgsl_driver);
4502
4503static void _unregister_device(struct kgsl_device *device)
4504{
4505 int minor;
4506
4507 mutex_lock(&kgsl_driver.devlock);
4508 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
4509 if (device == kgsl_driver.devp[minor])
4510 break;
4511 }
4512 if (minor != KGSL_DEVICE_MAX) {
4513 device_destroy(kgsl_driver.class,
4514 MKDEV(MAJOR(kgsl_driver.major), minor));
4515 kgsl_driver.devp[minor] = NULL;
4516 }
4517 mutex_unlock(&kgsl_driver.devlock);
4518}
4519
4520static int _register_device(struct kgsl_device *device)
4521{
4522 int minor, ret;
4523 dev_t dev;
4524
4525 /* Find a minor for the device */
4526
4527 mutex_lock(&kgsl_driver.devlock);
4528 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
4529 if (kgsl_driver.devp[minor] == NULL) {
4530 kgsl_driver.devp[minor] = device;
4531 break;
4532 }
4533 }
4534 mutex_unlock(&kgsl_driver.devlock);
4535
4536 if (minor == KGSL_DEVICE_MAX) {
4537 KGSL_CORE_ERR("minor devices exhausted\n");
4538 return -ENODEV;
4539 }
4540
4541 /* Create the device */
4542 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
4543 device->dev = device_create(kgsl_driver.class,
4544 &device->pdev->dev,
4545 dev, device,
4546 device->name);
4547
4548 if (IS_ERR(device->dev)) {
4549 mutex_lock(&kgsl_driver.devlock);
4550 kgsl_driver.devp[minor] = NULL;
4551 mutex_unlock(&kgsl_driver.devlock);
4552 ret = PTR_ERR(device->dev);
4553 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
4554 return ret;
4555 }
4556
4557 dev_set_drvdata(&device->pdev->dev, device);
4558 return 0;
4559}
4560
4561int kgsl_device_platform_probe(struct kgsl_device *device)
4562{
4563 int status = -EINVAL;
4564 struct resource *res;
4565 int cpu;
4566
4567 status = _register_device(device);
4568 if (status)
4569 return status;
4570
4571 /* Initialize logging first, so that failures below actually print. */
4572 kgsl_device_debugfs_init(device);
4573
4574 status = kgsl_pwrctrl_init(device);
4575 if (status)
4576 goto error;
4577
Shrenuj Bansala419c792016-10-20 14:05:11 -07004578 /*
4579 * Check if a shadermemname is defined, and then get shader memory
4580 * details including shader memory starting physical address
4581 * and shader memory length
4582 */
4583 if (device->shadermemname != NULL) {
4584 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
4585 device->shadermemname);
4586
4587 if (res == NULL) {
4588 KGSL_DRV_WARN(device,
4589 "Shader memory: platform_get_resource_byname failed\n");
4590 }
4591
4592 else {
4593 device->shader_mem_phys = res->start;
4594 device->shader_mem_len = resource_size(res);
4595 }
4596
4597 if (!devm_request_mem_region(device->dev,
4598 device->shader_mem_phys,
4599 device->shader_mem_len,
4600 device->name)) {
4601 KGSL_DRV_WARN(device, "request_mem_region_failed\n");
4602 }
4603 }
4604
4605 if (!devm_request_mem_region(device->dev, device->reg_phys,
4606 device->reg_len, device->name)) {
4607 KGSL_DRV_ERR(device, "request_mem_region failed\n");
4608 status = -ENODEV;
4609 goto error_pwrctrl_close;
4610 }
4611
4612 device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
4613 device->reg_len);
4614
4615 if (device->reg_virt == NULL) {
4616 KGSL_DRV_ERR(device, "ioremap failed\n");
4617 status = -ENODEV;
4618 goto error_pwrctrl_close;
4619 }
4620 /*acquire interrupt */
4621 device->pwrctrl.interrupt_num =
4622 platform_get_irq_byname(device->pdev, device->pwrctrl.irq_name);
4623
4624 if (device->pwrctrl.interrupt_num <= 0) {
4625 KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
4626 device->pwrctrl.interrupt_num);
4627 status = -EINVAL;
4628 goto error_pwrctrl_close;
4629 }
4630
4631 status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
4632 kgsl_irq_handler, IRQF_TRIGGER_HIGH,
4633 device->name, device);
4634 if (status) {
4635 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
4636 device->pwrctrl.interrupt_num, status);
4637 goto error_pwrctrl_close;
4638 }
4639 disable_irq(device->pwrctrl.interrupt_num);
4640
4641 KGSL_DRV_INFO(device,
4642 "dev_id %d regs phys 0x%08lx size 0x%08x\n",
4643 device->id, device->reg_phys, device->reg_len);
4644
4645 rwlock_init(&device->context_lock);
4646
4647 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
4648
4649 status = kgsl_mmu_probe(device, kgsl_mmu_type);
4650 if (status != 0)
4651 goto error_pwrctrl_close;
4652
4653 /* Check to see if our device can perform DMA correctly */
4654 status = dma_set_coherent_mask(&device->pdev->dev, KGSL_DMA_BIT_MASK);
4655 if (status)
4656 goto error_close_mmu;
4657
4658 /* Initialize the memory pools */
4659 kgsl_init_page_pools(device->pdev);
4660
4661 status = kgsl_allocate_global(device, &device->memstore,
4662 KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
4663
4664 if (status != 0)
4665 goto error_close_mmu;
4666
4667 status = kgsl_allocate_global(device, &device->scratch,
4668 PAGE_SIZE, 0, 0, "scratch");
4669 if (status != 0)
4670 goto error_free_memstore;
4671
4672 /*
4673 * The default request type PM_QOS_REQ_ALL_CORES is
4674 * applicable to all CPU cores that are online and
4675 * would have a power impact when there are more
4676 * number of CPUs. PM_QOS_REQ_AFFINE_IRQ request
4677 * type shall update/apply the vote only to that CPU to
4678 * which IRQ's affinity is set to.
4679 */
4680#ifdef CONFIG_SMP
4681
4682 device->pwrctrl.pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
4683 device->pwrctrl.pm_qos_req_dma.irq = device->pwrctrl.interrupt_num;
4684
4685#endif
4686 pm_qos_add_request(&device->pwrctrl.pm_qos_req_dma,
4687 PM_QOS_CPU_DMA_LATENCY,
4688 PM_QOS_DEFAULT_VALUE);
4689
4690 if (device->pwrctrl.l2pc_cpus_mask) {
4691
4692 device->pwrctrl.l2pc_cpus_qos.type =
4693 PM_QOS_REQ_AFFINE_CORES;
4694 cpumask_empty(&device->pwrctrl.l2pc_cpus_qos.cpus_affine);
4695 for_each_possible_cpu(cpu) {
4696 if ((1 << cpu) & device->pwrctrl.l2pc_cpus_mask)
4697 cpumask_set_cpu(cpu, &device->pwrctrl.
4698 l2pc_cpus_qos.cpus_affine);
4699 }
4700
4701 pm_qos_add_request(&device->pwrctrl.l2pc_cpus_qos,
4702 PM_QOS_CPU_DMA_LATENCY,
4703 PM_QOS_DEFAULT_VALUE);
4704 }
4705
4706 device->events_wq = alloc_workqueue("kgsl-events",
4707 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4708
4709 /* Initialize the snapshot engine */
4710 kgsl_device_snapshot_init(device);
4711
4712 /* Initialize common sysfs entries */
4713 kgsl_pwrctrl_init_sysfs(device);
4714
4715 return 0;
4716
4717error_free_memstore:
4718 kgsl_free_global(device, &device->memstore);
4719error_close_mmu:
4720 kgsl_mmu_close(device);
4721error_pwrctrl_close:
4722 kgsl_pwrctrl_close(device);
4723error:
Lynus Vaz519dacfd2017-02-14 12:17:37 +05304724 kgsl_device_debugfs_close(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004725 _unregister_device(device);
4726 return status;
4727}
4728EXPORT_SYMBOL(kgsl_device_platform_probe);
4729
4730void kgsl_device_platform_remove(struct kgsl_device *device)
4731{
4732 destroy_workqueue(device->events_wq);
4733
4734 kgsl_device_snapshot_close(device);
4735
4736 kgsl_exit_page_pools();
4737
4738 kgsl_pwrctrl_uninit_sysfs(device);
4739
4740 pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma);
4741 if (device->pwrctrl.l2pc_cpus_mask)
4742 pm_qos_remove_request(&device->pwrctrl.l2pc_cpus_qos);
4743
4744 idr_destroy(&device->context_idr);
4745
4746 kgsl_free_global(device, &device->scratch);
4747
4748 kgsl_free_global(device, &device->memstore);
4749
4750 kgsl_mmu_close(device);
4751
4752 kgsl_pwrctrl_close(device);
4753
Lynus Vaz519dacfd2017-02-14 12:17:37 +05304754 kgsl_device_debugfs_close(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004755 _unregister_device(device);
4756}
4757EXPORT_SYMBOL(kgsl_device_platform_remove);
4758
4759static void kgsl_core_exit(void)
4760{
4761 kgsl_events_exit();
4762 kgsl_core_debugfs_close();
4763
4764 /*
4765 * We call kgsl_sharedmem_uninit_sysfs() and device_unregister()
4766 * only if kgsl_driver.virtdev has been populated.
4767 * We check at least one member of kgsl_driver.virtdev to
4768 * see if it is not NULL (and thus, has been populated).
4769 */
4770 if (kgsl_driver.virtdev.class) {
4771 kgsl_sharedmem_uninit_sysfs();
4772 device_unregister(&kgsl_driver.virtdev);
4773 }
4774
4775 if (kgsl_driver.class) {
4776 class_destroy(kgsl_driver.class);
4777 kgsl_driver.class = NULL;
4778 }
4779
Tarun Karra2b8b3632016-11-14 16:38:27 -08004780 kgsl_drawobjs_cache_exit();
Shrenuj Bansala419c792016-10-20 14:05:11 -07004781
4782 kgsl_memfree_exit();
4783 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
4784}
4785
4786static int __init kgsl_core_init(void)
4787{
4788 int result = 0;
4789 /* alloc major and minor device numbers */
4790 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
4791 "kgsl");
4792
4793 if (result < 0) {
4794
4795 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
4796 goto err;
4797 }
4798
4799 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
4800 kgsl_driver.cdev.owner = THIS_MODULE;
4801 kgsl_driver.cdev.ops = &kgsl_fops;
4802 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
4803 KGSL_DEVICE_MAX);
4804
4805 if (result) {
4806 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d, result= %d\n",
4807 kgsl_driver.major, result);
4808 goto err;
4809 }
4810
4811 kgsl_driver.class = class_create(THIS_MODULE, "kgsl");
4812
4813 if (IS_ERR(kgsl_driver.class)) {
4814 result = PTR_ERR(kgsl_driver.class);
4815 KGSL_CORE_ERR("failed to create class for kgsl");
4816 goto err;
4817 }
4818
4819 /*
4820 * Make a virtual device for managing core related things
4821 * in sysfs
4822 */
4823 kgsl_driver.virtdev.class = kgsl_driver.class;
4824 dev_set_name(&kgsl_driver.virtdev, "kgsl");
4825 result = device_register(&kgsl_driver.virtdev);
4826 if (result) {
4827 KGSL_CORE_ERR("driver_register failed\n");
4828 goto err;
4829 }
4830
4831 /* Make kobjects in the virtual device for storing statistics */
4832
4833 kgsl_driver.ptkobj =
4834 kobject_create_and_add("pagetables",
4835 &kgsl_driver.virtdev.kobj);
4836
4837 kgsl_driver.prockobj =
4838 kobject_create_and_add("proc",
4839 &kgsl_driver.virtdev.kobj);
4840
4841 kgsl_core_debugfs_init();
4842
4843 kgsl_sharedmem_init_sysfs();
4844
4845 INIT_LIST_HEAD(&kgsl_driver.process_list);
4846
4847 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
4848
4849 kgsl_driver.workqueue = alloc_workqueue("kgsl-workqueue",
4850 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4851
4852 kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
Hareesh Gundu615439d2017-06-16 17:06:57 +05304853 WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07004854
4855 kgsl_events_init();
4856
Tarun Karra2b8b3632016-11-14 16:38:27 -08004857 result = kgsl_drawobjs_cache_init();
Shrenuj Bansala419c792016-10-20 14:05:11 -07004858 if (result)
4859 goto err;
4860
4861 kgsl_memfree_init();
4862
4863 return 0;
4864
4865err:
4866 kgsl_core_exit();
4867 return result;
4868}
4869
4870module_init(kgsl_core_init);
4871module_exit(kgsl_core_exit);
4872
4873MODULE_DESCRIPTION("MSM GPU driver");
4874MODULE_LICENSE("GPL");