blob: 8e4f0eaa6837784934909719ec5ca47b60d8c28f [file] [log] [blame]
Jordan Crousef7f87a62019-09-11 08:32:15 -06001/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_H
14#define __KGSL_H
15
16#include <linux/types.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/msm_kgsl.h>
20#include <linux/platform_device.h>
21#include <linux/clk.h>
22#include <linux/interrupt.h>
23#include <linux/mutex.h>
24#include <linux/cdev.h>
25#include <linux/regulator/consumer.h>
26#include <linux/mm.h>
27#include <linux/uaccess.h>
Tim Murray85040432017-02-20 15:59:32 +053028#include <linux/kthread.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070029#include <asm/cacheflush.h>
Deepak Kumarcf056d12018-04-17 15:59:42 +053030#include <linux/compat.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070031
32/*
33 * --- kgsl drawobj flags ---
34 * These flags are same as --- drawobj flags ---
35 * but renamed to reflect that cmdbatch is renamed to drawobj.
36 */
37#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
38#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
39#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
40#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
41#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
42#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
43#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
44#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
45#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
46#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
47
48#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
49
50
51/* The number of memstore arrays limits the number of contexts allowed.
52 * If more contexts are needed, update multiple for MEMSTORE_SIZE
53 */
Harshdeep Dhatt074a6ae2017-12-12 11:35:51 -070054#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 8))
Shrenuj Bansala419c792016-10-20 14:05:11 -070055#define KGSL_MEMSTORE_GLOBAL (0)
56#define KGSL_PRIORITY_MAX_RB_LEVELS 4
57#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
58 sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
Harshdeep Dhatt410e83932017-12-12 14:56:20 -070059#define KGSL_MAX_CONTEXTS_PER_PROC 200
Shrenuj Bansala419c792016-10-20 14:05:11 -070060
61#define MEMSTORE_RB_OFFSET(rb, field) \
62 KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
63
64#define MEMSTORE_ID_GPU_ADDR(dev, iter, field) \
65 ((dev)->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(iter, field))
66
67#define MEMSTORE_RB_GPU_ADDR(dev, rb, field) \
68 ((dev)->memstore.gpuaddr + \
69 KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field))
70
71/*
72 * SCRATCH MEMORY: The scratch memory is one page worth of data that
73 * is mapped into the GPU. This allows for some 'shared' data between
74 * the GPU and CPU. For example, it will be used by the GPU to write
75 * each updated RPTR for each RB.
76 *
77 * Used Data:
78 * Offset: Length(bytes): What
79 * 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -060080 * 0x10: 8 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 CTXT RESTORE ADDR
Shrenuj Bansala419c792016-10-20 14:05:11 -070081 */
82
83/* Shadow global helpers */
84#define SCRATCH_RPTR_OFFSET(id) ((id) * sizeof(unsigned int))
85#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
86 ((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
87
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -060088#define SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id) \
89 (SCRATCH_RPTR_OFFSET(KGSL_PRIORITY_MAX_RB_LEVELS) + \
90 ((id) * sizeof(uint64_t)))
91#define SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(dev, id) \
92 ((dev)->scratch.gpuaddr + \
93 SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id))
94
Shrenuj Bansala419c792016-10-20 14:05:11 -070095/* Timestamp window used to detect rollovers (half of integer range) */
96#define KGSL_TIMESTAMP_WINDOW 0x80000000
97
98/*
99 * A macro for memory statistics - add the new size to the stat and if
100 * the statisic is greater then _max, set _max
101 */
102static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
103 atomic_long_t *max)
104{
105 uint64_t ret = atomic_long_add_return(size, stat);
106
107 if (ret > atomic_long_read(max))
108 atomic_long_set(max, ret);
109}
110
111#define KGSL_MAX_NUMIBS 100000
112#define KGSL_MAX_SYNCPOINTS 32
Tarun Karra2b8b3632016-11-14 16:38:27 -0800113#define KGSL_MAX_SPARSE 1000
Shrenuj Bansala419c792016-10-20 14:05:11 -0700114
115struct kgsl_device;
116struct kgsl_context;
117
118/**
119 * struct kgsl_driver - main container for global KGSL things
120 * @cdev: Character device struct
121 * @major: Major ID for the KGSL device
122 * @class: Pointer to the class struct for the core KGSL sysfs entries
123 * @virtdev: Virtual device for managing the core
124 * @ptkobj: kobject for storing the pagetable statistics
125 * @prockobj: kobject for storing the process statistics
126 * @devp: Array of pointers to the individual KGSL device structs
127 * @process_list: List of open processes
128 * @pagetable_list: LIst of open pagetables
129 * @ptlock: Lock for accessing the pagetable list
130 * @process_mutex: Mutex for accessing the process list
131 * @devlock: Mutex protecting the device list
132 * @stats: Struct containing atomic memory statistics
133 * @full_cache_threshold: the threshold that triggers a full cache flush
134 * @workqueue: Pointer to a single threaded workqueue
135 * @mem_workqueue: Pointer to a workqueue for deferring memory entries
136 */
137struct kgsl_driver {
138 struct cdev cdev;
139 dev_t major;
140 struct class *class;
141 struct device virtdev;
142 struct kobject *ptkobj;
143 struct kobject *prockobj;
144 struct kgsl_device *devp[KGSL_DEVICE_MAX];
145 struct list_head process_list;
146 struct list_head pagetable_list;
147 spinlock_t ptlock;
148 struct mutex process_mutex;
149 struct mutex devlock;
150 struct {
151 atomic_long_t vmalloc;
152 atomic_long_t vmalloc_max;
153 atomic_long_t page_alloc;
154 atomic_long_t page_alloc_max;
155 atomic_long_t coherent;
156 atomic_long_t coherent_max;
157 atomic_long_t secure;
158 atomic_long_t secure_max;
159 atomic_long_t mapped;
160 atomic_long_t mapped_max;
161 } stats;
162 unsigned int full_cache_threshold;
163 struct workqueue_struct *workqueue;
164 struct workqueue_struct *mem_workqueue;
Tim Murray85040432017-02-20 15:59:32 +0530165 struct kthread_worker worker;
166 struct task_struct *worker_thread;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700167};
168
169extern struct kgsl_driver kgsl_driver;
170extern struct mutex kgsl_mmu_sync;
171
172struct kgsl_pagetable;
173struct kgsl_memdesc;
174
175struct kgsl_memdesc_ops {
176 unsigned int vmflags;
177 int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
178 struct vm_fault *);
179 void (*free)(struct kgsl_memdesc *memdesc);
180 int (*map_kernel)(struct kgsl_memdesc *);
181 void (*unmap_kernel)(struct kgsl_memdesc *);
182};
183
184/* Internal definitions for memdesc->priv */
185#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
186/* Set if the memdesc is mapped into all pagetables */
187#define KGSL_MEMDESC_GLOBAL BIT(1)
188/* The memdesc is frozen during a snapshot */
189#define KGSL_MEMDESC_FROZEN BIT(2)
190/* The memdesc is mapped into a pagetable */
191#define KGSL_MEMDESC_MAPPED BIT(3)
192/* The memdesc is secured for content protection */
193#define KGSL_MEMDESC_SECURE BIT(4)
194/* Memory is accessible in privileged mode */
195#define KGSL_MEMDESC_PRIVILEGED BIT(6)
196/* The memdesc is TZ locked content protection */
197#define KGSL_MEMDESC_TZ_LOCKED BIT(7)
198/* The memdesc is allocated through contiguous memory */
199#define KGSL_MEMDESC_CONTIG BIT(8)
Jordan Crousef7f87a62019-09-11 08:32:15 -0600200/* For global buffers, randomly assign an address from the region */
201#define KGSL_MEMDESC_RANDOM BIT(9)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700202
203/**
204 * struct kgsl_memdesc - GPU memory object descriptor
205 * @pagetable: Pointer to the pagetable that the object is mapped in
206 * @hostptr: Kernel virtual address
207 * @hostptr_count: Number of threads using hostptr
Shrenuj Bansala419c792016-10-20 14:05:11 -0700208 * @gpuaddr: GPU virtual address
209 * @physaddr: Physical address of the memory object
210 * @size: Size of the memory object
Shrenuj Bansala419c792016-10-20 14:05:11 -0700211 * @priv: Internal flags and settings
212 * @sgt: Scatter gather table for allocated pages
213 * @ops: Function hooks for the memdesc memory type
214 * @flags: Flags set from userspace
215 * @dev: Pointer to the struct device that owns this memory
216 * @attrs: dma attributes for this memory
217 * @pages: An array of pointers to allocated pages
218 * @page_count: Total number of pages allocated
219 * @cur_bindings: Number of sparse pages actively bound
220 */
221struct kgsl_memdesc {
222 struct kgsl_pagetable *pagetable;
223 void *hostptr;
224 unsigned int hostptr_count;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700225 uint64_t gpuaddr;
226 phys_addr_t physaddr;
227 uint64_t size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700228 unsigned int priv;
229 struct sg_table *sgt;
230 struct kgsl_memdesc_ops *ops;
231 uint64_t flags;
232 struct device *dev;
233 unsigned long attrs;
234 struct page **pages;
235 unsigned int page_count;
236 unsigned int cur_bindings;
Jordan Crouse497c3cb2020-12-30 16:30:50 +0530237 /*
238 * @lock: Spinlock to protect the gpuaddr from being accessed by
239 * multiple entities trying to map the same SVM region at once
240 */
241 spinlock_t lock;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700242};
243
244/*
245 * List of different memory entry types. The usermem enum
246 * starts at 0, which we use for allocated memory, so 1 is
247 * added to the enum values.
248 */
249#define KGSL_MEM_ENTRY_KERNEL 0
250#define KGSL_MEM_ENTRY_USER (KGSL_USER_MEM_TYPE_ADDR + 1)
251#define KGSL_MEM_ENTRY_ION (KGSL_USER_MEM_TYPE_ION + 1)
252#define KGSL_MEM_ENTRY_MAX (KGSL_USER_MEM_TYPE_MAX + 1)
253
254/* symbolic table for trace and debugfs */
255#define KGSL_MEM_TYPES \
256 { KGSL_MEM_ENTRY_KERNEL, "gpumem" }, \
257 { KGSL_MEM_ENTRY_USER, "usermem" }, \
258 { KGSL_MEM_ENTRY_ION, "ion" }
259
260/*
261 * struct kgsl_mem_entry - a userspace memory allocation
262 * @refcount: reference count. Currently userspace can only
263 * hold a single reference count, but the kernel may hold more.
264 * @memdesc: description of the memory
265 * @priv_data: type-specific data, such as the dma-buf attachment pointer.
266 * @node: rb_node for the gpu address lookup rb tree
267 * @id: idr index for this entry, can be used to find memory that does not have
268 * a valid GPU address.
269 * @priv: back pointer to the process that owns this memory
270 * @pending_free: if !0, userspace requested that his memory be freed, but there
271 * are still references to it.
272 * @dev_priv: back pointer to the device file that created this entry.
273 * @metadata: String containing user specified metadata for the entry
274 * @work: Work struct used to schedule a kgsl_mem_entry_put in atomic contexts
275 * @bind_lock: Lock for sparse memory bindings
276 * @bind_tree: RB Tree for sparse memory bindings
277 */
278struct kgsl_mem_entry {
279 struct kref refcount;
280 struct kgsl_memdesc memdesc;
281 void *priv_data;
282 struct rb_node node;
283 unsigned int id;
284 struct kgsl_process_private *priv;
285 int pending_free;
286 char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
287 struct work_struct work;
288 spinlock_t bind_lock;
289 struct rb_root bind_tree;
Jordan Crouse6bce65c2020-12-28 16:06:42 +0530290 /*
291 * @map_count: Count how many vmas this object is mapped in - used for
292 * debugfs accounting
293 */
294 atomic_t map_count;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700295};
296
297struct kgsl_device_private;
298struct kgsl_event_group;
299
300typedef void (*kgsl_event_func)(struct kgsl_device *, struct kgsl_event_group *,
301 void *, int);
302
303/**
304 * struct kgsl_event - KGSL GPU timestamp event
305 * @device: Pointer to the KGSL device that owns the event
306 * @context: Pointer to the context that owns the event
307 * @timestamp: Timestamp for the event to expire
308 * @func: Callback function for for the event when it expires
309 * @priv: Private data passed to the callback function
310 * @node: List node for the kgsl_event_group list
311 * @created: Jiffies when the event was created
312 * @work: Work struct for dispatching the callback
313 * @result: KGSL event result type to pass to the callback
314 * group: The event group this event belongs to
315 */
316struct kgsl_event {
317 struct kgsl_device *device;
318 struct kgsl_context *context;
319 unsigned int timestamp;
320 kgsl_event_func func;
321 void *priv;
322 struct list_head node;
323 unsigned int created;
Deepak Kumar1fa9bc02019-06-26 10:37:22 +0530324 struct work_struct work;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700325 int result;
326 struct kgsl_event_group *group;
327};
328
329typedef int (*readtimestamp_func)(struct kgsl_device *, void *,
330 enum kgsl_timestamp_type, unsigned int *);
331
332/**
333 * struct event_group - A list of GPU events
334 * @context: Pointer to the active context for the events
335 * @lock: Spinlock for protecting the list
336 * @events: List of active GPU events
337 * @group: Node for the master group list
338 * @processed: Last processed timestamp
339 * @name: String name for the group (for the debugfs file)
340 * @readtimestamp: Function pointer to read a timestamp
341 * @priv: Priv member to pass to the readtimestamp function
342 */
343struct kgsl_event_group {
344 struct kgsl_context *context;
345 spinlock_t lock;
346 struct list_head events;
347 struct list_head group;
348 unsigned int processed;
349 char name[64];
350 readtimestamp_func readtimestamp;
351 void *priv;
352};
353
354/**
355 * struct kgsl_protected_registers - Protected register range
356 * @base: Offset of the range to be protected
357 * @range: Range (# of registers = 2 ** range)
358 */
359struct kgsl_protected_registers {
360 unsigned int base;
361 int range;
362};
363
364/**
365 * struct sparse_bind_object - Bind metadata
366 * @node: Node for the rb tree
367 * @p_memdesc: Physical memdesc bound to
368 * @v_off: Offset of bind in the virtual entry
369 * @p_off: Offset of bind in the physical memdesc
370 * @size: Size of the bind
371 * @flags: Flags for the bind
372 */
373struct sparse_bind_object {
374 struct rb_node node;
375 struct kgsl_memdesc *p_memdesc;
376 uint64_t v_off;
377 uint64_t p_off;
378 uint64_t size;
379 uint64_t flags;
380};
381
382long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
383 unsigned int cmd, void *data);
384long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
385 unsigned int cmd, void *data);
386long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
387 *dev_priv, unsigned int cmd, void *data);
388long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
389 unsigned int cmd, void *data);
390long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
391 unsigned int cmd, void *data);
392long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
393 *dev_priv, unsigned int cmd,
394 void *data);
395long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
396 struct kgsl_device_private
397 *dev_priv, unsigned int cmd,
398 void *data);
399long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
400 unsigned int cmd, void *data);
401long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
402 unsigned int cmd, void *data);
403long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
404 unsigned int cmd, void *data);
405long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
406 unsigned int cmd, void *data);
407long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
408 unsigned int cmd, void *data);
409long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
410 unsigned int cmd, void *data);
411long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
412 unsigned int cmd, void *data);
413long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
414 unsigned int cmd, void *data);
415long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
416 unsigned int cmd, void *data);
417long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
418 unsigned int cmd, void *data);
419long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
420 unsigned int cmd, void *data);
421long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
422 unsigned int cmd, void *data);
423long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
424 unsigned int cmd, void *data);
425long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
426 unsigned int cmd, void *data);
427long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
428 unsigned int cmd, void *data);
429long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
430 unsigned int cmd, void *data);
431long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
432 unsigned int cmd, void *data);
433long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
434 unsigned int cmd, void *data);
435long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
436 unsigned int cmd, void *data);
437
438long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
439 unsigned int cmd, void *data);
440long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
441 unsigned int cmd, void *data);
442long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
443 unsigned int cmd, void *data);
444long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
445 unsigned int cmd, void *data);
446long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
447 unsigned int cmd, void *data);
448long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
449 unsigned int cmd, void *data);
Tarun Karra2b8b3632016-11-14 16:38:27 -0800450long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
451 unsigned int cmd, void *data);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700452
453void kgsl_mem_entry_destroy(struct kref *kref);
454
455void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
456 int *egl_surface_count, int *egl_image_count);
457
458struct kgsl_mem_entry * __must_check
459kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr);
460
461struct kgsl_mem_entry * __must_check
462kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id);
463
464extern const struct dev_pm_ops kgsl_pm_ops;
465
466int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
467int kgsl_resume_driver(struct platform_device *pdev);
468
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600469struct kgsl_mem_entry *gpumem_alloc_entry(struct kgsl_device_private *dev_priv,
470 uint64_t size, uint64_t flags);
471long gpumem_free_entry(struct kgsl_mem_entry *entry);
472
Shrenuj Bansala419c792016-10-20 14:05:11 -0700473static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
474 uint64_t gpuaddr, uint64_t size)
475{
476 /* set a minimum size to search for */
477 if (!size)
478 size = 1;
479
480 /* don't overflow */
481 if (size > U64_MAX - gpuaddr)
482 return 0;
483
484 if (gpuaddr >= memdesc->gpuaddr &&
485 ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
486 return 1;
487 }
488 return 0;
489}
490
491static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
492{
493 if (memdesc->ops && memdesc->ops->map_kernel)
494 memdesc->ops->map_kernel(memdesc);
495
496 return memdesc->hostptr;
497}
498
499static inline void kgsl_memdesc_unmap(struct kgsl_memdesc *memdesc)
500{
501 if (memdesc->ops && memdesc->ops->unmap_kernel)
502 memdesc->ops->unmap_kernel(memdesc);
503}
504
505static inline void *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
506 uint64_t gpuaddr)
507{
508 void *hostptr = NULL;
509
510 if ((gpuaddr >= memdesc->gpuaddr) &&
511 (gpuaddr < (memdesc->gpuaddr + memdesc->size)))
512 hostptr = kgsl_memdesc_map(memdesc);
513
514 return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL;
515}
516
517static inline int timestamp_cmp(unsigned int a, unsigned int b)
518{
519 /* check for equal */
520 if (a == b)
521 return 0;
522
523 /* check for greater-than for non-rollover case */
524 if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW))
525 return 1;
526
527 /* check for greater-than for rollover case
528 * note that <= is required to ensure that consistent
529 * results are returned for values whose difference is
530 * equal to the window size
531 */
532 a += KGSL_TIMESTAMP_WINDOW;
533 b += KGSL_TIMESTAMP_WINDOW;
534 return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
535}
536
537/**
538 * kgsl_schedule_work() - Schedule a work item on the KGSL workqueue
539 * @work: work item to schedule
540 */
541static inline void kgsl_schedule_work(struct work_struct *work)
542{
543 queue_work(kgsl_driver.workqueue, work);
544}
545
546static inline int
547kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
548{
549 if (entry)
550 return kref_get_unless_zero(&entry->refcount);
551 return 0;
552}
553
554static inline void
555kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
556{
557 if (entry)
558 kref_put(&entry->refcount, kgsl_mem_entry_destroy);
559}
560
561/*
562 * kgsl_addr_range_overlap() - Checks if 2 ranges overlap
563 * @gpuaddr1: Start of first address range
564 * @size1: Size of first address range
565 * @gpuaddr2: Start of second address range
566 * @size2: Size of second address range
567 *
568 * Function returns true if the 2 given address ranges overlap
569 * else false
570 */
571static inline bool kgsl_addr_range_overlap(uint64_t gpuaddr1,
572 uint64_t size1, uint64_t gpuaddr2, uint64_t size2)
573{
574 if ((size1 > (U64_MAX - gpuaddr1)) || (size2 > (U64_MAX - gpuaddr2)))
575 return false;
576 return !(((gpuaddr1 + size1) <= gpuaddr2) ||
577 (gpuaddr1 >= (gpuaddr2 + size2)));
578}
579
580/**
581 * kgsl_malloc() - Use either kzalloc or vmalloc to allocate memory
582 * @size: Size of the desired allocation
583 *
584 * Allocate a block of memory for the driver - if it is small try to allocate it
585 * from kmalloc (fast!) otherwise we need to go with vmalloc (safe!)
586 */
587static inline void *kgsl_malloc(size_t size)
588{
589 if (size <= PAGE_SIZE)
590 return kzalloc(size, GFP_KERNEL);
591
592 return vmalloc(size);
593}
594
595/**
596 * kgsl_free() - Free memory allocated by kgsl_malloc()
597 * @ptr: Pointer to the memory to free
598 *
599 * Free the memory be it in vmalloc or kmalloc space
600 */
601static inline void kgsl_free(void *ptr)
602{
603 if (ptr != NULL && is_vmalloc_addr(ptr))
604 return vfree(ptr);
605
606 kfree(ptr);
607}
608
609static inline int _copy_from_user(void *dest, void __user *src,
610 unsigned int ksize, unsigned int usize)
611{
612 unsigned int copy = ksize < usize ? ksize : usize;
613
614 if (copy == 0)
615 return -EINVAL;
616
617 return copy_from_user(dest, src, copy) ? -EFAULT : 0;
618}
619
620static inline void __user *to_user_ptr(uint64_t address)
621{
622 return (void __user *)(uintptr_t)address;
623}
624
625static inline void kgsl_gpu_sysfs_add_link(struct kobject *dst,
626 struct kobject *src, const char *src_name,
627 const char *dst_name)
628{
629 struct kernfs_node *old;
630
631 if (dst == NULL || src == NULL)
632 return;
633
634 old = sysfs_get_dirent(src->sd, src_name);
635 if (IS_ERR_OR_NULL(old))
636 return;
637
638 kernfs_create_link(dst->sd, dst_name, old);
639}
Deepak Kumarcf056d12018-04-17 15:59:42 +0530640
641static inline bool kgsl_is_compat_task(void)
642{
643 return (BITS_PER_LONG == 32) || is_compat_task();
644}
Shrenuj Bansala419c792016-10-20 14:05:11 -0700645#endif /* __KGSL_H */