blob: 3f1c86ef74b14e040ef580d7e250059bce1510fe [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_H
14#define __KGSL_H
15
16#include <linux/types.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/msm_kgsl.h>
20#include <linux/platform_device.h>
21#include <linux/clk.h>
22#include <linux/interrupt.h>
23#include <linux/mutex.h>
24#include <linux/cdev.h>
25#include <linux/regulator/consumer.h>
26#include <linux/mm.h>
27#include <linux/uaccess.h>
28#include <asm/cacheflush.h>
29
30/*
31 * --- kgsl drawobj flags ---
32 * These flags are same as --- drawobj flags ---
33 * but renamed to reflect that cmdbatch is renamed to drawobj.
34 */
35#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
36#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
37#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
38#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
39#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
40#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
41#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
42#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
43#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
44#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
45
46#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
47
48
49/* The number of memstore arrays limits the number of contexts allowed.
50 * If more contexts are needed, update multiple for MEMSTORE_SIZE
51 */
52#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2))
53#define KGSL_MEMSTORE_GLOBAL (0)
54#define KGSL_PRIORITY_MAX_RB_LEVELS 4
55#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
56 sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
57
58#define MEMSTORE_RB_OFFSET(rb, field) \
59 KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
60
61#define MEMSTORE_ID_GPU_ADDR(dev, iter, field) \
62 ((dev)->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(iter, field))
63
64#define MEMSTORE_RB_GPU_ADDR(dev, rb, field) \
65 ((dev)->memstore.gpuaddr + \
66 KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field))
67
68/*
69 * SCRATCH MEMORY: The scratch memory is one page worth of data that
70 * is mapped into the GPU. This allows for some 'shared' data between
71 * the GPU and CPU. For example, it will be used by the GPU to write
72 * each updated RPTR for each RB.
73 *
74 * Used Data:
75 * Offset: Length(bytes): What
76 * 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
77 */
78
79/* Shadow global helpers */
80#define SCRATCH_RPTR_OFFSET(id) ((id) * sizeof(unsigned int))
81#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
82 ((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
83
84/* Timestamp window used to detect rollovers (half of integer range) */
85#define KGSL_TIMESTAMP_WINDOW 0x80000000
86
87/*
88 * A macro for memory statistics - add the new size to the stat and if
89 * the statisic is greater then _max, set _max
90 */
91static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
92 atomic_long_t *max)
93{
94 uint64_t ret = atomic_long_add_return(size, stat);
95
96 if (ret > atomic_long_read(max))
97 atomic_long_set(max, ret);
98}
99
100#define KGSL_MAX_NUMIBS 100000
101#define KGSL_MAX_SYNCPOINTS 32
Tarun Karra2b8b3632016-11-14 16:38:27 -0800102#define KGSL_MAX_SPARSE 1000
Shrenuj Bansala419c792016-10-20 14:05:11 -0700103
104struct kgsl_device;
105struct kgsl_context;
106
107/**
108 * struct kgsl_driver - main container for global KGSL things
109 * @cdev: Character device struct
110 * @major: Major ID for the KGSL device
111 * @class: Pointer to the class struct for the core KGSL sysfs entries
112 * @virtdev: Virtual device for managing the core
113 * @ptkobj: kobject for storing the pagetable statistics
114 * @prockobj: kobject for storing the process statistics
115 * @devp: Array of pointers to the individual KGSL device structs
116 * @process_list: List of open processes
117 * @pagetable_list: LIst of open pagetables
118 * @ptlock: Lock for accessing the pagetable list
119 * @process_mutex: Mutex for accessing the process list
120 * @devlock: Mutex protecting the device list
121 * @stats: Struct containing atomic memory statistics
122 * @full_cache_threshold: the threshold that triggers a full cache flush
123 * @workqueue: Pointer to a single threaded workqueue
124 * @mem_workqueue: Pointer to a workqueue for deferring memory entries
125 */
126struct kgsl_driver {
127 struct cdev cdev;
128 dev_t major;
129 struct class *class;
130 struct device virtdev;
131 struct kobject *ptkobj;
132 struct kobject *prockobj;
133 struct kgsl_device *devp[KGSL_DEVICE_MAX];
134 struct list_head process_list;
135 struct list_head pagetable_list;
136 spinlock_t ptlock;
137 struct mutex process_mutex;
138 struct mutex devlock;
139 struct {
140 atomic_long_t vmalloc;
141 atomic_long_t vmalloc_max;
142 atomic_long_t page_alloc;
143 atomic_long_t page_alloc_max;
144 atomic_long_t coherent;
145 atomic_long_t coherent_max;
146 atomic_long_t secure;
147 atomic_long_t secure_max;
148 atomic_long_t mapped;
149 atomic_long_t mapped_max;
150 } stats;
151 unsigned int full_cache_threshold;
152 struct workqueue_struct *workqueue;
153 struct workqueue_struct *mem_workqueue;
154};
155
156extern struct kgsl_driver kgsl_driver;
157extern struct mutex kgsl_mmu_sync;
158
159struct kgsl_pagetable;
160struct kgsl_memdesc;
161
162struct kgsl_memdesc_ops {
163 unsigned int vmflags;
164 int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
165 struct vm_fault *);
166 void (*free)(struct kgsl_memdesc *memdesc);
167 int (*map_kernel)(struct kgsl_memdesc *);
168 void (*unmap_kernel)(struct kgsl_memdesc *);
169};
170
171/* Internal definitions for memdesc->priv */
172#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
173/* Set if the memdesc is mapped into all pagetables */
174#define KGSL_MEMDESC_GLOBAL BIT(1)
175/* The memdesc is frozen during a snapshot */
176#define KGSL_MEMDESC_FROZEN BIT(2)
177/* The memdesc is mapped into a pagetable */
178#define KGSL_MEMDESC_MAPPED BIT(3)
179/* The memdesc is secured for content protection */
180#define KGSL_MEMDESC_SECURE BIT(4)
181/* Memory is accessible in privileged mode */
182#define KGSL_MEMDESC_PRIVILEGED BIT(6)
183/* The memdesc is TZ locked content protection */
184#define KGSL_MEMDESC_TZ_LOCKED BIT(7)
185/* The memdesc is allocated through contiguous memory */
186#define KGSL_MEMDESC_CONTIG BIT(8)
187
188/**
189 * struct kgsl_memdesc - GPU memory object descriptor
190 * @pagetable: Pointer to the pagetable that the object is mapped in
191 * @hostptr: Kernel virtual address
192 * @hostptr_count: Number of threads using hostptr
193 * @useraddr: User virtual address (if applicable)
194 * @gpuaddr: GPU virtual address
195 * @physaddr: Physical address of the memory object
196 * @size: Size of the memory object
197 * @mapsize: Size of memory mapped in userspace
198 * @priv: Internal flags and settings
199 * @sgt: Scatter gather table for allocated pages
200 * @ops: Function hooks for the memdesc memory type
201 * @flags: Flags set from userspace
202 * @dev: Pointer to the struct device that owns this memory
203 * @attrs: dma attributes for this memory
204 * @pages: An array of pointers to allocated pages
205 * @page_count: Total number of pages allocated
206 * @cur_bindings: Number of sparse pages actively bound
207 */
208struct kgsl_memdesc {
209 struct kgsl_pagetable *pagetable;
210 void *hostptr;
211 unsigned int hostptr_count;
212 unsigned long useraddr;
213 uint64_t gpuaddr;
214 phys_addr_t physaddr;
215 uint64_t size;
216 uint64_t mapsize;
217 unsigned int priv;
218 struct sg_table *sgt;
219 struct kgsl_memdesc_ops *ops;
220 uint64_t flags;
221 struct device *dev;
222 unsigned long attrs;
223 struct page **pages;
224 unsigned int page_count;
225 unsigned int cur_bindings;
226};
227
228/*
229 * List of different memory entry types. The usermem enum
230 * starts at 0, which we use for allocated memory, so 1 is
231 * added to the enum values.
232 */
233#define KGSL_MEM_ENTRY_KERNEL 0
234#define KGSL_MEM_ENTRY_USER (KGSL_USER_MEM_TYPE_ADDR + 1)
235#define KGSL_MEM_ENTRY_ION (KGSL_USER_MEM_TYPE_ION + 1)
236#define KGSL_MEM_ENTRY_MAX (KGSL_USER_MEM_TYPE_MAX + 1)
237
238/* symbolic table for trace and debugfs */
239#define KGSL_MEM_TYPES \
240 { KGSL_MEM_ENTRY_KERNEL, "gpumem" }, \
241 { KGSL_MEM_ENTRY_USER, "usermem" }, \
242 { KGSL_MEM_ENTRY_ION, "ion" }
243
244/*
245 * struct kgsl_mem_entry - a userspace memory allocation
246 * @refcount: reference count. Currently userspace can only
247 * hold a single reference count, but the kernel may hold more.
248 * @memdesc: description of the memory
249 * @priv_data: type-specific data, such as the dma-buf attachment pointer.
250 * @node: rb_node for the gpu address lookup rb tree
251 * @id: idr index for this entry, can be used to find memory that does not have
252 * a valid GPU address.
253 * @priv: back pointer to the process that owns this memory
254 * @pending_free: if !0, userspace requested that his memory be freed, but there
255 * are still references to it.
256 * @dev_priv: back pointer to the device file that created this entry.
257 * @metadata: String containing user specified metadata for the entry
258 * @work: Work struct used to schedule a kgsl_mem_entry_put in atomic contexts
259 * @bind_lock: Lock for sparse memory bindings
260 * @bind_tree: RB Tree for sparse memory bindings
261 */
262struct kgsl_mem_entry {
263 struct kref refcount;
264 struct kgsl_memdesc memdesc;
265 void *priv_data;
266 struct rb_node node;
267 unsigned int id;
268 struct kgsl_process_private *priv;
269 int pending_free;
270 char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
271 struct work_struct work;
272 spinlock_t bind_lock;
273 struct rb_root bind_tree;
274};
275
276struct kgsl_device_private;
277struct kgsl_event_group;
278
279typedef void (*kgsl_event_func)(struct kgsl_device *, struct kgsl_event_group *,
280 void *, int);
281
282/**
283 * struct kgsl_event - KGSL GPU timestamp event
284 * @device: Pointer to the KGSL device that owns the event
285 * @context: Pointer to the context that owns the event
286 * @timestamp: Timestamp for the event to expire
287 * @func: Callback function for for the event when it expires
288 * @priv: Private data passed to the callback function
289 * @node: List node for the kgsl_event_group list
290 * @created: Jiffies when the event was created
291 * @work: Work struct for dispatching the callback
292 * @result: KGSL event result type to pass to the callback
293 * group: The event group this event belongs to
294 */
295struct kgsl_event {
296 struct kgsl_device *device;
297 struct kgsl_context *context;
298 unsigned int timestamp;
299 kgsl_event_func func;
300 void *priv;
301 struct list_head node;
302 unsigned int created;
303 struct work_struct work;
304 int result;
305 struct kgsl_event_group *group;
306};
307
308typedef int (*readtimestamp_func)(struct kgsl_device *, void *,
309 enum kgsl_timestamp_type, unsigned int *);
310
311/**
312 * struct event_group - A list of GPU events
313 * @context: Pointer to the active context for the events
314 * @lock: Spinlock for protecting the list
315 * @events: List of active GPU events
316 * @group: Node for the master group list
317 * @processed: Last processed timestamp
318 * @name: String name for the group (for the debugfs file)
319 * @readtimestamp: Function pointer to read a timestamp
320 * @priv: Priv member to pass to the readtimestamp function
321 */
322struct kgsl_event_group {
323 struct kgsl_context *context;
324 spinlock_t lock;
325 struct list_head events;
326 struct list_head group;
327 unsigned int processed;
328 char name[64];
329 readtimestamp_func readtimestamp;
330 void *priv;
331};
332
333/**
334 * struct kgsl_protected_registers - Protected register range
335 * @base: Offset of the range to be protected
336 * @range: Range (# of registers = 2 ** range)
337 */
338struct kgsl_protected_registers {
339 unsigned int base;
340 int range;
341};
342
343/**
344 * struct sparse_bind_object - Bind metadata
345 * @node: Node for the rb tree
346 * @p_memdesc: Physical memdesc bound to
347 * @v_off: Offset of bind in the virtual entry
348 * @p_off: Offset of bind in the physical memdesc
349 * @size: Size of the bind
350 * @flags: Flags for the bind
351 */
352struct sparse_bind_object {
353 struct rb_node node;
354 struct kgsl_memdesc *p_memdesc;
355 uint64_t v_off;
356 uint64_t p_off;
357 uint64_t size;
358 uint64_t flags;
359};
360
361long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
362 unsigned int cmd, void *data);
363long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
364 unsigned int cmd, void *data);
365long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
366 *dev_priv, unsigned int cmd, void *data);
367long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
368 unsigned int cmd, void *data);
369long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
370 unsigned int cmd, void *data);
371long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
372 *dev_priv, unsigned int cmd,
373 void *data);
374long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
375 struct kgsl_device_private
376 *dev_priv, unsigned int cmd,
377 void *data);
378long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
379 unsigned int cmd, void *data);
380long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
381 unsigned int cmd, void *data);
382long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
383 unsigned int cmd, void *data);
384long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
385 unsigned int cmd, void *data);
386long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
387 unsigned int cmd, void *data);
388long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
389 unsigned int cmd, void *data);
390long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
391 unsigned int cmd, void *data);
392long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
393 unsigned int cmd, void *data);
394long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
395 unsigned int cmd, void *data);
396long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
397 unsigned int cmd, void *data);
398long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
399 unsigned int cmd, void *data);
400long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
401 unsigned int cmd, void *data);
402long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
403 unsigned int cmd, void *data);
404long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
405 unsigned int cmd, void *data);
406long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
407 unsigned int cmd, void *data);
408long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
409 unsigned int cmd, void *data);
410long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
411 unsigned int cmd, void *data);
412long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
413 unsigned int cmd, void *data);
414long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
415 unsigned int cmd, void *data);
416
417long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
418 unsigned int cmd, void *data);
419long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
420 unsigned int cmd, void *data);
421long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
422 unsigned int cmd, void *data);
423long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
424 unsigned int cmd, void *data);
425long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
426 unsigned int cmd, void *data);
427long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
428 unsigned int cmd, void *data);
Tarun Karra2b8b3632016-11-14 16:38:27 -0800429long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
430 unsigned int cmd, void *data);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700431
432void kgsl_mem_entry_destroy(struct kref *kref);
433
434void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
435 int *egl_surface_count, int *egl_image_count);
436
437struct kgsl_mem_entry * __must_check
438kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr);
439
440struct kgsl_mem_entry * __must_check
441kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id);
442
443extern const struct dev_pm_ops kgsl_pm_ops;
444
445int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
446int kgsl_resume_driver(struct platform_device *pdev);
447
448static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
449 uint64_t gpuaddr, uint64_t size)
450{
451 /* set a minimum size to search for */
452 if (!size)
453 size = 1;
454
455 /* don't overflow */
456 if (size > U64_MAX - gpuaddr)
457 return 0;
458
459 if (gpuaddr >= memdesc->gpuaddr &&
460 ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
461 return 1;
462 }
463 return 0;
464}
465
466static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
467{
468 if (memdesc->ops && memdesc->ops->map_kernel)
469 memdesc->ops->map_kernel(memdesc);
470
471 return memdesc->hostptr;
472}
473
474static inline void kgsl_memdesc_unmap(struct kgsl_memdesc *memdesc)
475{
476 if (memdesc->ops && memdesc->ops->unmap_kernel)
477 memdesc->ops->unmap_kernel(memdesc);
478}
479
480static inline void *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
481 uint64_t gpuaddr)
482{
483 void *hostptr = NULL;
484
485 if ((gpuaddr >= memdesc->gpuaddr) &&
486 (gpuaddr < (memdesc->gpuaddr + memdesc->size)))
487 hostptr = kgsl_memdesc_map(memdesc);
488
489 return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL;
490}
491
492static inline int timestamp_cmp(unsigned int a, unsigned int b)
493{
494 /* check for equal */
495 if (a == b)
496 return 0;
497
498 /* check for greater-than for non-rollover case */
499 if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW))
500 return 1;
501
502 /* check for greater-than for rollover case
503 * note that <= is required to ensure that consistent
504 * results are returned for values whose difference is
505 * equal to the window size
506 */
507 a += KGSL_TIMESTAMP_WINDOW;
508 b += KGSL_TIMESTAMP_WINDOW;
509 return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
510}
511
512/**
513 * kgsl_schedule_work() - Schedule a work item on the KGSL workqueue
514 * @work: work item to schedule
515 */
516static inline void kgsl_schedule_work(struct work_struct *work)
517{
518 queue_work(kgsl_driver.workqueue, work);
519}
520
521static inline int
522kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
523{
524 if (entry)
525 return kref_get_unless_zero(&entry->refcount);
526 return 0;
527}
528
529static inline void
530kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
531{
532 if (entry)
533 kref_put(&entry->refcount, kgsl_mem_entry_destroy);
534}
535
536/*
537 * kgsl_addr_range_overlap() - Checks if 2 ranges overlap
538 * @gpuaddr1: Start of first address range
539 * @size1: Size of first address range
540 * @gpuaddr2: Start of second address range
541 * @size2: Size of second address range
542 *
543 * Function returns true if the 2 given address ranges overlap
544 * else false
545 */
546static inline bool kgsl_addr_range_overlap(uint64_t gpuaddr1,
547 uint64_t size1, uint64_t gpuaddr2, uint64_t size2)
548{
549 if ((size1 > (U64_MAX - gpuaddr1)) || (size2 > (U64_MAX - gpuaddr2)))
550 return false;
551 return !(((gpuaddr1 + size1) <= gpuaddr2) ||
552 (gpuaddr1 >= (gpuaddr2 + size2)));
553}
554
555/**
556 * kgsl_malloc() - Use either kzalloc or vmalloc to allocate memory
557 * @size: Size of the desired allocation
558 *
559 * Allocate a block of memory for the driver - if it is small try to allocate it
560 * from kmalloc (fast!) otherwise we need to go with vmalloc (safe!)
561 */
562static inline void *kgsl_malloc(size_t size)
563{
564 if (size <= PAGE_SIZE)
565 return kzalloc(size, GFP_KERNEL);
566
567 return vmalloc(size);
568}
569
570/**
571 * kgsl_free() - Free memory allocated by kgsl_malloc()
572 * @ptr: Pointer to the memory to free
573 *
574 * Free the memory be it in vmalloc or kmalloc space
575 */
576static inline void kgsl_free(void *ptr)
577{
578 if (ptr != NULL && is_vmalloc_addr(ptr))
579 return vfree(ptr);
580
581 kfree(ptr);
582}
583
584static inline int _copy_from_user(void *dest, void __user *src,
585 unsigned int ksize, unsigned int usize)
586{
587 unsigned int copy = ksize < usize ? ksize : usize;
588
589 if (copy == 0)
590 return -EINVAL;
591
592 return copy_from_user(dest, src, copy) ? -EFAULT : 0;
593}
594
595static inline void __user *to_user_ptr(uint64_t address)
596{
597 return (void __user *)(uintptr_t)address;
598}
599
600static inline void kgsl_gpu_sysfs_add_link(struct kobject *dst,
601 struct kobject *src, const char *src_name,
602 const char *dst_name)
603{
604 struct kernfs_node *old;
605
606 if (dst == NULL || src == NULL)
607 return;
608
609 old = sysfs_get_dirent(src->sd, src_name);
610 if (IS_ERR_OR_NULL(old))
611 return;
612
613 kernfs_create_link(dst->sd, dst_name, old);
614}
615#endif /* __KGSL_H */