blob: 96053c68f85e229ef6fe0cdbf2c6a25c60384a35 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001#ifndef _UAPI_MSM_KGSL_H
2#define _UAPI_MSM_KGSL_H
3
4#include <linux/types.h>
5#include <linux/ioctl.h>
6
7/*
8 * The KGSL version has proven not to be very useful in userspace if features
9 * are cherry picked into other trees out of order so it is frozen as of 3.14.
10 * It is left here for backwards compatabilty and as a reminder that
11 * software releases are never linear. Also, I like pie.
12 */
13
14#define KGSL_VERSION_MAJOR 3
15#define KGSL_VERSION_MINOR 14
16
17/*
18 * We have traditionally mixed context and issueibcmds / command batch flags
19 * together into a big flag stew. This worked fine until we started adding a
20 * lot more command batch flags and we started running out of bits. Turns out
21 * we have a bit of room in the context type / priority mask that we could use
22 * for command batches, but that means we need to split out the flags into two
23 * coherent sets.
24 *
25 * If any future definitions are for both context and cmdbatch add both defines
26 * and link the cmdbatch to the context define as we do below. Otherwise feel
27 * free to add exclusive bits to either set.
28 */
29
30/* --- context flags --- */
31#define KGSL_CONTEXT_SAVE_GMEM 0x00000001
32#define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002
33/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34#define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004
35#define KGSL_CONTEXT_CTX_SWITCH 0x00000008
36#define KGSL_CONTEXT_PREAMBLE 0x00000010
37#define KGSL_CONTEXT_TRASH_STATE 0x00000020
38#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
39#define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080
40/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41#define KGSL_CONTEXT_END_OF_FRAME 0x00000100
42#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44#define KGSL_CONTEXT_SYNC 0x00000400
45#define KGSL_CONTEXT_PWR_CONSTRAINT 0x00000800
46#define KGSL_CONTEXT_PRIORITY_MASK 0x0000F000
47#define KGSL_CONTEXT_PRIORITY_SHIFT 12
48#define KGSL_CONTEXT_PRIORITY_UNDEF 0
49
50#define KGSL_CONTEXT_IFH_NOP 0x00010000
51#define KGSL_CONTEXT_SECURE 0x00020000
52#define KGSL_CONTEXT_NO_SNAPSHOT 0x00040000
Tarun Karra2b8b3632016-11-14 16:38:27 -080053#define KGSL_CONTEXT_SPARSE 0x00080000
Shrenuj Bansala419c792016-10-20 14:05:11 -070054
55#define KGSL_CONTEXT_PREEMPT_STYLE_MASK 0x0E000000
56#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT 25
57#define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT 0x0
58#define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
59#define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN 0x2
60
61#define KGSL_CONTEXT_TYPE_MASK 0x01F00000
62#define KGSL_CONTEXT_TYPE_SHIFT 20
63#define KGSL_CONTEXT_TYPE_ANY 0
64#define KGSL_CONTEXT_TYPE_GL 1
65#define KGSL_CONTEXT_TYPE_CL 2
66#define KGSL_CONTEXT_TYPE_C2D 3
67#define KGSL_CONTEXT_TYPE_RS 4
Rajesh Kemisettic05883a2018-09-17 11:34:08 +053068#define KGSL_CONTEXT_TYPE_VK 5
Shrenuj Bansala419c792016-10-20 14:05:11 -070069#define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E
70
Hareesh Gunduccfb89b2017-04-14 18:36:20 +053071#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
72
Shrenuj Bansala419c792016-10-20 14:05:11 -070073#define KGSL_CONTEXT_INVALID 0xffffffff
74
75/*
76 * --- command batch flags ---
77 * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
78 * definitions or bits that are valid for both contexts and cmdbatches. To be
79 * safe the other 8 bits that are still available in the context field should be
80 * omitted here in case we need to share - the other bits are available for
81 * cmdbatch only flags as needed
82 */
83#define KGSL_CMDBATCH_MEMLIST 0x00000001
84#define KGSL_CMDBATCH_MARKER 0x00000002
85#define KGSL_CMDBATCH_SUBMIT_IB_LIST KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
86#define KGSL_CMDBATCH_CTX_SWITCH KGSL_CONTEXT_CTX_SWITCH /* 0x008 */
87#define KGSL_CMDBATCH_PROFILING 0x00000010
88/*
89 * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
90 * to take effect, as the latter only affects the time data returned.
91 */
92#define KGSL_CMDBATCH_PROFILING_KTIME 0x00000020
93#define KGSL_CMDBATCH_END_OF_FRAME KGSL_CONTEXT_END_OF_FRAME /* 0x100 */
94#define KGSL_CMDBATCH_SYNC KGSL_CONTEXT_SYNC /* 0x400 */
95#define KGSL_CMDBATCH_PWR_CONSTRAINT KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
Tarun Karra2b8b3632016-11-14 16:38:27 -080096#define KGSL_CMDBATCH_SPARSE 0x1000 /* 0x1000 */
Shrenuj Bansala419c792016-10-20 14:05:11 -070097
98/*
99 * Reserve bits [16:19] and bits [28:31] for possible bits shared between
100 * contexts and command batches. Update this comment as new flags are added.
101 */
102
103/*
104 * gpu_command_object flags - these flags communicate the type of command or
105 * memory object being submitted for a GPU command
106 */
107
108/* Flags for GPU command objects */
109#define KGSL_CMDLIST_IB 0x00000001U
110#define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
111#define KGSL_CMDLIST_IB_PREAMBLE 0x00000004U
112
113/* Flags for GPU command memory objects */
114#define KGSL_OBJLIST_MEMOBJ 0x00000008U
115#define KGSL_OBJLIST_PROFILE 0x00000010U
116
117/* Flags for GPU command sync points */
118#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
119#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
120
121/* --- Memory allocation flags --- */
122
123/* General allocation hints */
124#define KGSL_MEMFLAGS_SECURE 0x00000008ULL
125#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
126#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
127#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
128
129/* Flag for binding all the virt range to single phys data */
130#define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
131#define KGSL_SPARSE_BIND 0x1ULL
132#define KGSL_SPARSE_UNBIND 0x2ULL
133
134/* Memory caching hints */
135#define KGSL_CACHEMODE_MASK 0x0C000000U
136#define KGSL_CACHEMODE_SHIFT 26
137
138#define KGSL_CACHEMODE_WRITECOMBINE 0
139#define KGSL_CACHEMODE_UNCACHED 1
140#define KGSL_CACHEMODE_WRITETHROUGH 2
141#define KGSL_CACHEMODE_WRITEBACK 3
142
143#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
144#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
145#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -0700146#define KGSL_MEMFLAGS_IOCOHERENT 0x80000000ULL
Shrenuj Bansala419c792016-10-20 14:05:11 -0700147
148/* Memory types for which allocations are made */
149#define KGSL_MEMTYPE_MASK 0x0000FF00
150#define KGSL_MEMTYPE_SHIFT 8
151
152#define KGSL_MEMTYPE_OBJECTANY 0
153#define KGSL_MEMTYPE_FRAMEBUFFER 1
154#define KGSL_MEMTYPE_RENDERBUFFER 2
155#define KGSL_MEMTYPE_ARRAYBUFFER 3
156#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER 4
157#define KGSL_MEMTYPE_VERTEXARRAYBUFFER 5
158#define KGSL_MEMTYPE_TEXTURE 6
159#define KGSL_MEMTYPE_SURFACE 7
160#define KGSL_MEMTYPE_EGL_SURFACE 8
161#define KGSL_MEMTYPE_GL 9
162#define KGSL_MEMTYPE_CL 10
163#define KGSL_MEMTYPE_CL_BUFFER_MAP 11
164#define KGSL_MEMTYPE_CL_BUFFER_NOMAP 12
165#define KGSL_MEMTYPE_CL_IMAGE_MAP 13
166#define KGSL_MEMTYPE_CL_IMAGE_NOMAP 14
167#define KGSL_MEMTYPE_CL_KERNEL_STACK 15
168#define KGSL_MEMTYPE_COMMAND 16
169#define KGSL_MEMTYPE_2D 17
170#define KGSL_MEMTYPE_EGL_IMAGE 18
171#define KGSL_MEMTYPE_EGL_SHADOW 19
172#define KGSL_MEMTYPE_MULTISAMPLE 20
173#define KGSL_MEMTYPE_KERNEL 255
174
175/*
176 * Alignment hint, passed as the power of 2 exponent.
177 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
178 */
179#define KGSL_MEMALIGN_MASK 0x00FF0000
180#define KGSL_MEMALIGN_SHIFT 16
181
182enum kgsl_user_mem_type {
183 KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
184 KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
185 KGSL_USER_MEM_TYPE_ADDR = 0x00000002,
186 KGSL_USER_MEM_TYPE_ION = 0x00000003,
187 /*
188 * ION type is retained for backwards compatibility but Ion buffers are
189 * dma-bufs so try to use that naming if we can
190 */
191 KGSL_USER_MEM_TYPE_DMABUF = 0x00000003,
192 KGSL_USER_MEM_TYPE_MAX = 0x00000007,
193};
194#define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
195#define KGSL_MEMFLAGS_USERMEM_SHIFT 5
196
197/*
198 * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
199 * leave a good value for allocated memory. In the flags we use
200 * 0 to indicate allocated memory and thus need to add 1 to the enum
201 * values.
202 */
203#define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
204
205#define KGSL_MEMFLAGS_NOT_USERMEM 0
206#define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
207#define KGSL_MEMFLAGS_USERMEM_ASHMEM \
208 KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
209#define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
210#define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
211
212/* --- generic KGSL flag values --- */
213
214#define KGSL_FLAGS_NORMALMODE 0x00000000
215#define KGSL_FLAGS_SAFEMODE 0x00000001
216#define KGSL_FLAGS_INITIALIZED0 0x00000002
217#define KGSL_FLAGS_INITIALIZED 0x00000004
218#define KGSL_FLAGS_STARTED 0x00000008
219#define KGSL_FLAGS_ACTIVE 0x00000010
220#define KGSL_FLAGS_RESERVED0 0x00000020
221#define KGSL_FLAGS_RESERVED1 0x00000040
222#define KGSL_FLAGS_RESERVED2 0x00000080
223#define KGSL_FLAGS_SOFT_RESET 0x00000100
224#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
225
226/* Server Side Sync Timeout in milliseconds */
227#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
228
Shrenuj Bansala9ae9de2016-11-15 16:01:00 -0800229/* UBWC Modes */
230#define KGSL_UBWC_NONE 0
231#define KGSL_UBWC_1_0 1
232#define KGSL_UBWC_2_0 2
233#define KGSL_UBWC_3_0 3
234
Shrenuj Bansala419c792016-10-20 14:05:11 -0700235/*
236 * Reset status values for context
237 */
238enum kgsl_ctx_reset_stat {
239 KGSL_CTX_STAT_NO_ERROR = 0x00000000,
240 KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001,
241 KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002,
242 KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003
243};
244
245#define KGSL_CONVERT_TO_MBPS(val) \
246 (val*1000*1000U)
247
248/* device id */
249enum kgsl_deviceid {
250 KGSL_DEVICE_3D0 = 0x00000000,
251 KGSL_DEVICE_MAX
252};
253
254struct kgsl_devinfo {
255
256 unsigned int device_id;
257 /*
258 * chip revision id
259 * coreid:8 majorrev:8 minorrev:8 patch:8
260 */
261 unsigned int chip_id;
262 unsigned int mmu_enabled;
263 unsigned long gmem_gpubaseaddr;
264 /*
265 * This field contains the adreno revision
266 * number 200, 205, 220, etc...
267 */
268 unsigned int gpu_id;
269 size_t gmem_sizebytes;
270};
271
272/*
273 * struct kgsl_devmemstore - this structure defines the region of memory
274 * that can be mmap()ed from this driver. The timestamp fields are volatile
275 * because they are written by the GPU
276 * @soptimestamp: Start of pipeline timestamp written by GPU before the
277 * commands in concern are processed
278 * @sbz: Unused, kept for 8 byte alignment
279 * @eoptimestamp: End of pipeline timestamp written by GPU after the
280 * commands in concern are processed
281 * @sbz2: Unused, kept for 8 byte alignment
282 * @preempted: Indicates if the context was preempted
283 * @sbz3: Unused, kept for 8 byte alignment
284 * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
285 * @sbz4: Unused, kept for 8 byte alignment
286 * @current_context: The current context the GPU is working on
287 * @sbz5: Unused, kept for 8 byte alignment
288 */
289struct kgsl_devmemstore {
290 volatile unsigned int soptimestamp;
291 unsigned int sbz;
292 volatile unsigned int eoptimestamp;
293 unsigned int sbz2;
294 volatile unsigned int preempted;
295 unsigned int sbz3;
296 volatile unsigned int ref_wait_ts;
297 unsigned int sbz4;
298 unsigned int current_context;
299 unsigned int sbz5;
300};
301
302#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
303 ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
304 offsetof(struct kgsl_devmemstore, field))
305
306/* timestamp id*/
307enum kgsl_timestamp_type {
308 KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
309 KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/
310 KGSL_TIMESTAMP_QUEUED = 0x00000003,
311};
312
313/* property types - used with kgsl_device_getproperty */
314#define KGSL_PROP_DEVICE_INFO 0x1
315#define KGSL_PROP_DEVICE_SHADOW 0x2
316#define KGSL_PROP_DEVICE_POWER 0x3
317#define KGSL_PROP_SHMEM 0x4
318#define KGSL_PROP_SHMEM_APERTURES 0x5
319#define KGSL_PROP_MMU_ENABLE 0x6
320#define KGSL_PROP_INTERRUPT_WAITS 0x7
321#define KGSL_PROP_VERSION 0x8
322#define KGSL_PROP_GPU_RESET_STAT 0x9
323#define KGSL_PROP_PWRCTRL 0xE
324#define KGSL_PROP_PWR_CONSTRAINT 0x12
325#define KGSL_PROP_UCHE_GMEM_VADDR 0x13
326#define KGSL_PROP_SP_GENERIC_MEM 0x14
327#define KGSL_PROP_UCODE_VERSION 0x15
328#define KGSL_PROP_GPMU_VERSION 0x16
329#define KGSL_PROP_HIGHEST_BANK_BIT 0x17
330#define KGSL_PROP_DEVICE_BITNESS 0x18
331#define KGSL_PROP_DEVICE_QDSS_STM 0x19
Shrenuj Bansala9ae9de2016-11-15 16:01:00 -0800332#define KGSL_PROP_MIN_ACCESS_LENGTH 0x1A
333#define KGSL_PROP_UBWC_MODE 0x1B
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700334#define KGSL_PROP_DEVICE_QTIMER 0x20
Urvashi Agrawal53245652018-03-08 14:42:15 -0800335#define KGSL_PROP_L3_PWR_CONSTRAINT 0x22
Sunil Khatrib055b6b2018-07-19 17:10:39 +0530336#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
Sunil Khatric4d4c2d2018-08-10 11:46:58 +0530337#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
Shrenuj Bansala419c792016-10-20 14:05:11 -0700338
339struct kgsl_shadowprop {
340 unsigned long gpuaddr;
341 size_t size;
342 unsigned int flags; /* contains KGSL_FLAGS_ values */
343};
344
345struct kgsl_qdss_stm_prop {
346 uint64_t gpuaddr;
347 uint64_t size;
348};
349
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700350struct kgsl_qtimer_prop {
351 uint64_t gpuaddr;
352 uint64_t size;
353};
354
Shrenuj Bansala419c792016-10-20 14:05:11 -0700355struct kgsl_version {
356 unsigned int drv_major;
357 unsigned int drv_minor;
358 unsigned int dev_major;
359 unsigned int dev_minor;
360};
361
362struct kgsl_sp_generic_mem {
363 uint64_t local;
364 uint64_t pvt;
365};
366
367struct kgsl_ucode_version {
368 unsigned int pfp;
369 unsigned int pm4;
370};
371
372struct kgsl_gpmu_version {
373 unsigned int major;
374 unsigned int minor;
375 unsigned int features;
376};
377
378/* Performance counter groups */
379
380#define KGSL_PERFCOUNTER_GROUP_CP 0x0
381#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
382#define KGSL_PERFCOUNTER_GROUP_PC 0x2
383#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
384#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
385#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
386#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
387#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
388#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
389#define KGSL_PERFCOUNTER_GROUP_TP 0x9
390#define KGSL_PERFCOUNTER_GROUP_SP 0xA
391#define KGSL_PERFCOUNTER_GROUP_RB 0xB
392#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
393#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
394#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
395#define KGSL_PERFCOUNTER_GROUP_MH 0xF
396#define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
397#define KGSL_PERFCOUNTER_GROUP_SQ 0x11
398#define KGSL_PERFCOUNTER_GROUP_SX 0x12
399#define KGSL_PERFCOUNTER_GROUP_TCF 0x13
400#define KGSL_PERFCOUNTER_GROUP_TCM 0x14
401#define KGSL_PERFCOUNTER_GROUP_TCR 0x15
402#define KGSL_PERFCOUNTER_GROUP_L2 0x16
403#define KGSL_PERFCOUNTER_GROUP_VSC 0x17
404#define KGSL_PERFCOUNTER_GROUP_CCU 0x18
405#define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
406#define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
407#define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
408#define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
409#define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
410#define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
411#define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
412#define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
413#define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
414#define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
415#define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
416#define KGSL_PERFCOUNTER_GROUP_MAX 0x24
417
418#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
419#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
420
421/* structure holds list of ibs */
422struct kgsl_ibdesc {
423 unsigned long gpuaddr;
424 unsigned long __pad;
425 size_t sizedwords;
426 unsigned int ctrl;
427};
428
429/**
430 * struct kgsl_cmdbatch_profiling_buffer
431 * @wall_clock_s: Ringbuffer submission time (seconds).
432 * If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
433 * in kernel clocks, otherwise wall clock time is used.
434 * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
435 * If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
436 * in kernel clocks, otherwise wall clock time is used.
437 * @gpu_ticks_queued: GPU ticks at ringbuffer submission
438 * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
439 * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
440 *
441 * This structure defines the profiling buffer used to measure cmdbatch
442 * execution time
443 */
444struct kgsl_cmdbatch_profiling_buffer {
445 uint64_t wall_clock_s;
446 uint64_t wall_clock_ns;
447 uint64_t gpu_ticks_queued;
448 uint64_t gpu_ticks_submitted;
449 uint64_t gpu_ticks_retired;
450};
451
452/* ioctls */
453#define KGSL_IOC_TYPE 0x09
454
455/*
456 * get misc info about the GPU
457 * type should be a value from enum kgsl_property_type
458 * value points to a structure that varies based on type
459 * sizebytes is sizeof() that structure
460 * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
461 * this structure contaings hardware versioning info.
462 * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
463 * this is used to find mmap() offset and sizes for mapping
464 * struct kgsl_memstore into userspace.
465 */
466struct kgsl_device_getproperty {
467 unsigned int type;
468 void __user *value;
469 size_t sizebytes;
470};
471
472#define IOCTL_KGSL_DEVICE_GETPROPERTY \
473 _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
474
475/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
476 */
477
478/* block until the GPU has executed past a given timestamp
479 * timeout is in milliseconds.
480 */
481struct kgsl_device_waittimestamp {
482 unsigned int timestamp;
483 unsigned int timeout;
484};
485
486#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
487 _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
488
489struct kgsl_device_waittimestamp_ctxtid {
490 unsigned int context_id;
491 unsigned int timestamp;
492 unsigned int timeout;
493};
494
495#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
496 _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
497
498/* DEPRECATED: issue indirect commands to the GPU.
499 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
500 * ibaddr and sizedwords must specify a subset of a buffer created
501 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
502 * flags may be a mask of KGSL_CONTEXT_ values
503 * timestamp is a returned counter value which can be passed to
504 * other ioctls to determine when the commands have been executed by
505 * the GPU.
506 *
507 * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
508 * instead
509 */
510struct kgsl_ringbuffer_issueibcmds {
511 unsigned int drawctxt_id;
512 unsigned long ibdesc_addr;
513 unsigned int numibs;
514 unsigned int timestamp; /*output param */
515 unsigned int flags;
516};
517
518#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
519 _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
520
521/* read the most recently executed timestamp value
522 * type should be a value from enum kgsl_timestamp_type
523 */
524struct kgsl_cmdstream_readtimestamp {
525 unsigned int type;
526 unsigned int timestamp; /*output param */
527};
528
529#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
530 _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
531
532#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
533 _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
534
535/* free memory when the GPU reaches a given timestamp.
536 * gpuaddr specify a memory region created by a
537 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
538 * type should be a value from enum kgsl_timestamp_type
539 */
540struct kgsl_cmdstream_freememontimestamp {
541 unsigned long gpuaddr;
542 unsigned int type;
543 unsigned int timestamp;
544};
545
546#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
547 _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
548
549/*
550 * Previous versions of this header had incorrectly defined
551 * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
552 * of a write only ioctl. To ensure binary compatibility, the following
553 * #define will be used to intercept the incorrect ioctl
554 */
555
556#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
557 _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
558
559/* create a draw context, which is used to preserve GPU state.
560 * The flags field may contain a mask KGSL_CONTEXT_* values
561 */
562struct kgsl_drawctxt_create {
563 unsigned int flags;
564 unsigned int drawctxt_id; /*output param */
565};
566
567#define IOCTL_KGSL_DRAWCTXT_CREATE \
568 _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
569
570/* destroy a draw context */
571struct kgsl_drawctxt_destroy {
572 unsigned int drawctxt_id;
573};
574
575#define IOCTL_KGSL_DRAWCTXT_DESTROY \
576 _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
577
578/*
579 * add a block of pmem, fb, ashmem or user allocated address
580 * into the GPU address space
581 */
582struct kgsl_map_user_mem {
583 int fd;
584 unsigned long gpuaddr; /*output param */
585 size_t len;
586 size_t offset;
587 unsigned long hostptr; /*input param */
588 enum kgsl_user_mem_type memtype;
589 unsigned int flags;
590};
591
592#define IOCTL_KGSL_MAP_USER_MEM \
593 _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
594
595struct kgsl_cmdstream_readtimestamp_ctxtid {
596 unsigned int context_id;
597 unsigned int type;
598 unsigned int timestamp; /*output param */
599};
600
601#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
602 _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
603
604struct kgsl_cmdstream_freememontimestamp_ctxtid {
605 unsigned int context_id;
606 unsigned long gpuaddr;
607 unsigned int type;
608 unsigned int timestamp;
609};
610
611#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
612 _IOW(KGSL_IOC_TYPE, 0x17, \
613 struct kgsl_cmdstream_freememontimestamp_ctxtid)
614
615/* add a block of pmem or fb into the GPU address space */
616struct kgsl_sharedmem_from_pmem {
617 int pmem_fd;
618 unsigned long gpuaddr; /*output param */
619 unsigned int len;
620 unsigned int offset;
621};
622
623#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
624 _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
625
626/* remove memory from the GPU's address space */
627struct kgsl_sharedmem_free {
628 unsigned long gpuaddr;
629};
630
631#define IOCTL_KGSL_SHAREDMEM_FREE \
632 _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
633
634struct kgsl_cff_user_event {
635 unsigned char cff_opcode;
636 unsigned int op1;
637 unsigned int op2;
638 unsigned int op3;
639 unsigned int op4;
640 unsigned int op5;
641 unsigned int __pad[2];
642};
643
644#define IOCTL_KGSL_CFF_USER_EVENT \
645 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
646
647struct kgsl_gmem_desc {
648 unsigned int x;
649 unsigned int y;
650 unsigned int width;
651 unsigned int height;
652 unsigned int pitch;
653};
654
655struct kgsl_buffer_desc {
656 void *hostptr;
657 unsigned long gpuaddr;
658 int size;
659 unsigned int format;
660 unsigned int pitch;
661 unsigned int enabled;
662};
663
664struct kgsl_bind_gmem_shadow {
665 unsigned int drawctxt_id;
666 struct kgsl_gmem_desc gmem_desc;
667 unsigned int shadow_x;
668 unsigned int shadow_y;
669 struct kgsl_buffer_desc shadow_buffer;
670 unsigned int buffer_id;
671};
672
673#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
674 _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
675
676/* add a block of memory into the GPU address space */
677
678/*
679 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
680 * use IOCTL_KGSL_GPUMEM_ALLOC instead
681 */
682
683struct kgsl_sharedmem_from_vmalloc {
684 unsigned long gpuaddr; /*output param */
685 unsigned int hostptr;
686 unsigned int flags;
687};
688
689#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
690 _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
691
692/*
693 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
694 * supports both directions (flush and invalidate). This code will still
695 * work, but by definition it will do a flush of the cache which might not be
696 * what you want to have happen on a buffer following a GPU operation. It is
697 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
698 */
699
700#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
701 _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
702
703struct kgsl_drawctxt_set_bin_base_offset {
704 unsigned int drawctxt_id;
705 unsigned int offset;
706};
707
708#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
709 _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
710
711enum kgsl_cmdwindow_type {
712 KGSL_CMDWINDOW_MIN = 0x00000000,
713 KGSL_CMDWINDOW_2D = 0x00000000,
714 KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */
715 KGSL_CMDWINDOW_MMU = 0x00000002,
716 KGSL_CMDWINDOW_ARBITER = 0x000000FF,
717 KGSL_CMDWINDOW_MAX = 0x000000FF,
718};
719
720/* write to the command window */
721struct kgsl_cmdwindow_write {
722 enum kgsl_cmdwindow_type target;
723 unsigned int addr;
724 unsigned int data;
725};
726
727#define IOCTL_KGSL_CMDWINDOW_WRITE \
728 _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
729
730struct kgsl_gpumem_alloc {
731 unsigned long gpuaddr; /* output param */
732 size_t size;
733 unsigned int flags;
734};
735
736#define IOCTL_KGSL_GPUMEM_ALLOC \
737 _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
738
739struct kgsl_cff_syncmem {
740 unsigned long gpuaddr;
741 size_t len;
742 unsigned int __pad[2]; /* For future binary compatibility */
743};
744
745#define IOCTL_KGSL_CFF_SYNCMEM \
746 _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
747
748/*
749 * A timestamp event allows the user space to register an action following an
750 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
751 * _IOWR to support fences which need to return a fd for the priv parameter.
752 */
753
754struct kgsl_timestamp_event {
755 int type; /* Type of event (see list below) */
756 unsigned int timestamp; /* Timestamp to trigger event on */
757 unsigned int context_id; /* Context for the timestamp */
758 void __user *priv; /* Pointer to the event specific blob */
759 size_t len; /* Size of the event specific blob */
760};
761
762#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
763 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
764
765/* A genlock timestamp event releases an existing lock on timestamp expire */
766
767#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
768
769struct kgsl_timestamp_event_genlock {
770 int handle; /* Handle of the genlock lock to release */
771};
772
773/* A fence timestamp event releases an existing lock on timestamp expire */
774
775#define KGSL_TIMESTAMP_EVENT_FENCE 2
776
777struct kgsl_timestamp_event_fence {
778 int fence_fd; /* Fence to signal */
779};
780
781/*
782 * Set a property within the kernel. Uses the same structure as
783 * IOCTL_KGSL_GETPROPERTY
784 */
785
786#define IOCTL_KGSL_SETPROPERTY \
787 _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
788
789#define IOCTL_KGSL_TIMESTAMP_EVENT \
790 _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
791
792/**
793 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
794 * @id: returned id value for this allocation.
795 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
796 * @size: requested size of the allocation and actual size on return.
797 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
798 * @gpuaddr: returned GPU address for the allocation
799 *
800 * Allocate memory for access by the GPU. The flags and size fields are echoed
801 * back by the kernel, so that the caller can know if the request was
802 * adjusted.
803 *
804 * Supported flags:
805 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
806 * KGSL_MEMTYPE*: usage hint for debugging aid
807 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
808 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
809 * address will be 0. Calling mmap() will set the GPU address.
810 */
811struct kgsl_gpumem_alloc_id {
812 unsigned int id;
813 unsigned int flags;
814 size_t size;
815 size_t mmapsize;
816 unsigned long gpuaddr;
817/* private: reserved for future use*/
818 unsigned long __pad[2];
819};
820
821#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
822 _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
823
824/**
825 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
826 * @id: GPU allocation id to free
827 *
828 * Free an allocation by id, in case a GPU address has not been assigned or
829 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
830 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
831 */
832struct kgsl_gpumem_free_id {
833 unsigned int id;
834/* private: reserved for future use*/
835 unsigned int __pad;
836};
837
838#define IOCTL_KGSL_GPUMEM_FREE_ID \
839 _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
840
841/**
842 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
843 * @gpuaddr: GPU address to query. Also set on return.
844 * @id: GPU allocation id to query. Also set on return.
845 * @flags: returned mask of KGSL_MEM* values.
846 * @size: returned size of the allocation.
847 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
848 * @useraddr: returned address of the userspace mapping for this buffer
849 *
850 * This ioctl allows querying of all user visible attributes of an existing
851 * allocation, by either the GPU address or the id returned by a previous
852 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
853 * return all attributes so this ioctl can be used to look them up if needed.
854 *
855 */
856struct kgsl_gpumem_get_info {
857 unsigned long gpuaddr;
858 unsigned int id;
859 unsigned int flags;
860 size_t size;
861 size_t mmapsize;
862 unsigned long useraddr;
863/* private: reserved for future use*/
864 unsigned long __pad[4];
865};
866
867#define IOCTL_KGSL_GPUMEM_GET_INFO\
868 _IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
869
870/**
871 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
872 * @gpuaddr: GPU address of the buffer to sync.
873 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
874 * @op: a mask of KGSL_GPUMEM_CACHE_* values
875 * @offset: offset into the buffer
876 * @length: number of bytes starting from offset to perform
877 * the cache operation on
878 *
879 * Sync the L2 cache for memory headed to and from the GPU - this replaces
880 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
881 * directions
882 *
883 */
884struct kgsl_gpumem_sync_cache {
885 unsigned long gpuaddr;
886 unsigned int id;
887 unsigned int op;
888 size_t offset;
889 size_t length;
890};
891
892#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
893#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
894
895#define KGSL_GPUMEM_CACHE_INV (1 << 1)
896#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
897
898#define KGSL_GPUMEM_CACHE_FLUSH \
899 (KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
900
901/* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
902#define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
903
904#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
905 _IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
906
907/**
908 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
909 * @groupid: Performance counter group ID
910 * @countable: Countable to select within the group
911 * @offset: Return offset of the reserved LO counter
912 * @offset_hi: Return offset of the reserved HI counter
913 *
914 * Get an available performance counter from a specified groupid. The offset
915 * of the performance counter will be returned after successfully assigning
916 * the countable to the counter for the specified group. An error will be
917 * returned and an offset of 0 if the groupid is invalid or there are no
918 * more counters left. After successfully getting a perfcounter, the user
919 * must call kgsl_perfcounter_put(groupid, contable) when finished with
920 * the perfcounter to clear up perfcounter resources.
921 *
922 */
923struct kgsl_perfcounter_get {
924 unsigned int groupid;
925 unsigned int countable;
926 unsigned int offset;
927 unsigned int offset_hi;
928/* private: reserved for future use */
929 unsigned int __pad; /* For future binary compatibility */
930};
931
932#define IOCTL_KGSL_PERFCOUNTER_GET \
933 _IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
934
935/**
936 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
937 * @groupid: Performance counter group ID
938 * @countable: Countable to release within the group
939 *
940 * Put an allocated performance counter to allow others to have access to the
941 * resource that was previously taken. This is only to be called after
942 * successfully getting a performance counter from kgsl_perfcounter_get().
943 *
944 */
945struct kgsl_perfcounter_put {
946 unsigned int groupid;
947 unsigned int countable;
948/* private: reserved for future use */
949 unsigned int __pad[2]; /* For future binary compatibility */
950};
951
952#define IOCTL_KGSL_PERFCOUNTER_PUT \
953 _IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
954
955/**
956 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
957 * @groupid: Performance counter group ID
958 * @countable: Return active countables array
959 * @size: Size of active countables array
960 * @max_counters: Return total number counters for the group ID
961 *
962 * Query the available performance counters given a groupid. The array
963 * *countables is used to return the current active countables in counters.
964 * The size of the array is passed in so the kernel will only write at most
965 * size or counter->size for the group id. The total number of available
966 * counters for the group ID is returned in max_counters.
967 * If the array or size passed in are invalid, then only the maximum number
968 * of counters will be returned, no data will be written to *countables.
969 * If the groupid is invalid an error code will be returned.
970 *
971 */
972struct kgsl_perfcounter_query {
973 unsigned int groupid;
974 /* Array to return the current countable for up to size counters */
975 unsigned int __user *countables;
976 unsigned int count;
977 unsigned int max_counters;
978/* private: reserved for future use */
979 unsigned int __pad[2]; /* For future binary compatibility */
980};
981
982#define IOCTL_KGSL_PERFCOUNTER_QUERY \
983 _IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
984
985/**
986 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
987 * @groupid: Performance counter group IDs
988 * @countable: Performance counter countable IDs
989 * @value: Return performance counter reads
990 * @size: Size of all arrays (groupid/countable pair and return value)
991 *
992 * Read in the current value of a performance counter given by the groupid
993 * and countable.
994 *
995 */
996
997struct kgsl_perfcounter_read_group {
998 unsigned int groupid;
999 unsigned int countable;
1000 unsigned long long value;
1001};
1002
1003struct kgsl_perfcounter_read {
1004 struct kgsl_perfcounter_read_group __user *reads;
1005 unsigned int count;
1006/* private: reserved for future use */
1007 unsigned int __pad[2]; /* For future binary compatibility */
1008};
1009
1010#define IOCTL_KGSL_PERFCOUNTER_READ \
1011 _IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
1012/*
1013 * struct kgsl_gpumem_sync_cache_bulk - argument to
1014 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
1015 * @id_list: list of GPU buffer ids of the buffers to sync
1016 * @count: number of GPU buffer ids in id_list
1017 * @op: a mask of KGSL_GPUMEM_CACHE_* values
1018 *
1019 * Sync the cache for memory headed to and from the GPU. Certain
1020 * optimizations can be made on the cache operation based on the total
1021 * size of the working set of memory to be managed.
1022 */
1023struct kgsl_gpumem_sync_cache_bulk {
1024 unsigned int __user *id_list;
1025 unsigned int count;
1026 unsigned int op;
1027/* private: reserved for future use */
1028 unsigned int __pad[2]; /* For future binary compatibility */
1029};
1030
1031#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1032 _IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1033
1034/*
1035 * struct kgsl_cmd_syncpoint_timestamp
1036 * @context_id: ID of a KGSL context
1037 * @timestamp: GPU timestamp
1038 *
1039 * This structure defines a syncpoint comprising a context/timestamp pair. A
1040 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1041 * dependencies that must be met before the command can be submitted to the
1042 * hardware
1043 */
1044struct kgsl_cmd_syncpoint_timestamp {
1045 unsigned int context_id;
1046 unsigned int timestamp;
1047};
1048
1049struct kgsl_cmd_syncpoint_fence {
1050 int fd;
1051};
1052
1053/**
1054 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1055 * @type: type of sync point defined here
1056 * @priv: Pointer to the type specific buffer
1057 * @size: Size of the type specific buffer
1058 *
1059 * This structure contains pointers defining a specific command sync point.
1060 * The pointer and size should point to a type appropriate structure.
1061 */
1062struct kgsl_cmd_syncpoint {
1063 int type;
1064 void __user *priv;
1065 size_t size;
1066};
1067
1068/* Flag to indicate that the cmdlist may contain memlists */
1069#define KGSL_IBDESC_MEMLIST 0x1
1070
1071/* Flag to point out the cmdbatch profiling buffer in the memlist */
1072#define KGSL_IBDESC_PROFILING_BUFFER 0x2
1073
1074/**
1075 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1076 * @context_id: KGSL context ID that owns the commands
1077 * @flags:
1078 * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1079 * @numcmds: Number of commands listed in cmdlist
1080 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1081 * @numsyncs: Number of sync points listed in synclist
1082 * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1083 * assigned to the command batch
1084 *
1085 * This structure specifies a command to send to the GPU hardware. This is
1086 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1087 * submit IB lists and it adds sync points to block the IB until the
1088 * dependencies are satisified. This entry point is the new and preferred way
1089 * to submit commands to the GPU. The memory list can be used to specify all
1090 * memory that is referrenced in the current set of commands.
1091 */
1092
1093struct kgsl_submit_commands {
1094 unsigned int context_id;
1095 unsigned int flags;
1096 struct kgsl_ibdesc __user *cmdlist;
1097 unsigned int numcmds;
1098 struct kgsl_cmd_syncpoint __user *synclist;
1099 unsigned int numsyncs;
1100 unsigned int timestamp;
1101/* private: reserved for future use */
1102 unsigned int __pad[4];
1103};
1104
1105#define IOCTL_KGSL_SUBMIT_COMMANDS \
1106 _IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1107
1108/**
1109 * struct kgsl_device_constraint - device constraint argument
1110 * @context_id: KGSL context ID
1111 * @type: type of constraint i.e pwrlevel/none
1112 * @data: constraint data
1113 * @size: size of the constraint data
1114 */
1115struct kgsl_device_constraint {
1116 unsigned int type;
1117 unsigned int context_id;
1118 void __user *data;
1119 size_t size;
1120};
1121
1122/* Constraint Type*/
1123#define KGSL_CONSTRAINT_NONE 0
1124#define KGSL_CONSTRAINT_PWRLEVEL 1
1125
Urvashi Agrawal53245652018-03-08 14:42:15 -08001126/* L3 constraint Type */
1127#define KGSL_CONSTRAINT_L3_NONE 2
1128#define KGSL_CONSTRAINT_L3_PWRLEVEL 3
1129
Shrenuj Bansala419c792016-10-20 14:05:11 -07001130/* PWRLEVEL constraint level*/
1131/* set to min frequency */
Urvashi Agrawal53245652018-03-08 14:42:15 -08001132#define KGSL_CONSTRAINT_PWR_MIN 0
Shrenuj Bansala419c792016-10-20 14:05:11 -07001133/* set to max frequency */
Urvashi Agrawal53245652018-03-08 14:42:15 -08001134#define KGSL_CONSTRAINT_PWR_MAX 1
1135
1136/* L3 PWRLEVEL constraint level */
1137#define KGSL_CONSTRAINT_L3_PWR_MED 0
1138#define KGSL_CONSTRAINT_L3_PWR_MAX 1
Shrenuj Bansala419c792016-10-20 14:05:11 -07001139
1140struct kgsl_device_constraint_pwrlevel {
1141 unsigned int level;
1142};
1143
1144/**
1145 * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1146 * @id: returned id for the syncsource that was created.
1147 *
1148 * This ioctl creates a userspace sync timeline.
1149 */
1150
1151struct kgsl_syncsource_create {
1152 unsigned int id;
1153/* private: reserved for future use */
1154 unsigned int __pad[3];
1155};
1156
1157#define IOCTL_KGSL_SYNCSOURCE_CREATE \
1158 _IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1159
1160/**
1161 * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1162 * @id: syncsource id to destroy
1163 *
1164 * This ioctl creates a userspace sync timeline.
1165 */
1166
1167struct kgsl_syncsource_destroy {
1168 unsigned int id;
1169/* private: reserved for future use */
1170 unsigned int __pad[3];
1171};
1172
1173#define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1174 _IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1175
1176/**
1177 * struct kgsl_syncsource_create_fence - Argument to
1178 * IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1179 * @id: syncsource id
1180 * @fence_fd: returned sync_fence fd
1181 *
1182 * Create a fence that may be signaled by userspace by calling
1183 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1184 * these fences.
1185 */
1186struct kgsl_syncsource_create_fence {
1187 unsigned int id;
1188 int fence_fd;
1189/* private: reserved for future use */
1190 unsigned int __pad[4];
1191};
1192
1193/**
1194 * struct kgsl_syncsource_signal_fence - Argument to
1195 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1196 * @id: syncsource id
1197 * @fence_fd: sync_fence fd to signal
1198 *
1199 * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1200 * call using the same syncsource id. This allows a fence to be shared
1201 * to other processes but only signaled by the process owning the fd
1202 * used to create the fence.
1203 */
1204#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1205 _IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1206
1207struct kgsl_syncsource_signal_fence {
1208 unsigned int id;
1209 int fence_fd;
1210/* private: reserved for future use */
1211 unsigned int __pad[4];
1212};
1213
1214#define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1215 _IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1216
1217/**
1218 * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1219 * @offset: Offset into the GPU object to sync
1220 * @length: Number of bytes to sync
1221 * @id: ID of the GPU object to sync
1222 */
1223struct kgsl_cff_sync_gpuobj {
1224 uint64_t offset;
1225 uint64_t length;
1226 unsigned int id;
1227};
1228
1229#define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1230 _IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1231
1232/**
1233 * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1234 * @size: Size in bytes of the object to allocate
1235 * @flags: mask of KGSL_MEMFLAG_* bits
1236 * @va_len: Size in bytes of the virtual region to allocate
1237 * @mmapsize: Returns the mmap() size of the object
1238 * @id: Returns the GPU object ID of the new object
1239 * @metadata_len: Length of the metdata to copy from the user
1240 * @metadata: Pointer to the user specified metadata to store for the object
1241 */
1242struct kgsl_gpuobj_alloc {
1243 uint64_t size;
1244 uint64_t flags;
1245 uint64_t va_len;
1246 uint64_t mmapsize;
1247 unsigned int id;
1248 unsigned int metadata_len;
1249 uint64_t metadata;
1250};
1251
1252/* Let the user know that this header supports the gpuobj metadata */
1253#define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1254
1255#define IOCTL_KGSL_GPUOBJ_ALLOC \
1256 _IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1257
1258/**
1259 * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1260 * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1261 * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1262 * specified
1263 * @id: ID of the GPU object to free
1264 * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1265 * event to free on
1266 * @len: Length of the data passed in priv
1267 */
1268struct kgsl_gpuobj_free {
1269 uint64_t flags;
1270 uint64_t __user priv;
1271 unsigned int id;
1272 unsigned int type;
1273 unsigned int len;
1274};
1275
1276#define KGSL_GPUOBJ_FREE_ON_EVENT 1
1277
1278#define KGSL_GPU_EVENT_TIMESTAMP 1
1279#define KGSL_GPU_EVENT_FENCE 2
1280
1281/**
1282 * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1283 * object on
1284 * @context_id: ID of the timestamp event to wait for
1285 * @timestamp: Timestamp of the timestamp event to wait for
1286 */
1287struct kgsl_gpu_event_timestamp {
1288 unsigned int context_id;
1289 unsigned int timestamp;
1290};
1291
1292/**
1293 * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1294 * @fd: File descriptor for the fence
1295 */
1296struct kgsl_gpu_event_fence {
1297 int fd;
1298};
1299
1300#define IOCTL_KGSL_GPUOBJ_FREE \
1301 _IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1302
1303/**
1304 * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1305 * @gpuaddr: GPU address of the object
1306 * @flags: Current flags for the object
1307 * @size: Size of the object
1308 * @va_len: VA size of the object
1309 * @va_addr: Virtual address of the object (if it is mapped)
1310 * id - GPU object ID of the object to query
1311 */
1312struct kgsl_gpuobj_info {
1313 uint64_t gpuaddr;
1314 uint64_t flags;
1315 uint64_t size;
1316 uint64_t va_len;
1317 uint64_t va_addr;
1318 unsigned int id;
1319};
1320
1321#define IOCTL_KGSL_GPUOBJ_INFO \
1322 _IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1323
1324/**
1325 * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1326 * @priv: Pointer to the private data for the import type
1327 * @priv_len: Length of the private data
1328 * @flags: Mask of KGSL_MEMFLAG_ flags
1329 * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1330 * @id: Returns the ID of the new GPU object
1331 */
1332struct kgsl_gpuobj_import {
1333 uint64_t __user priv;
1334 uint64_t priv_len;
1335 uint64_t flags;
1336 unsigned int type;
1337 unsigned int id;
1338};
1339
1340/**
1341 * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1342 * @fd: File descriptor for the dma-buf object
1343 */
1344struct kgsl_gpuobj_import_dma_buf {
1345 int fd;
1346};
1347
1348/**
1349 * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1350 * @virtaddr: Virtual address of the object to import
1351 */
1352struct kgsl_gpuobj_import_useraddr {
1353 uint64_t virtaddr;
1354};
1355
1356#define IOCTL_KGSL_GPUOBJ_IMPORT \
1357 _IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1358
1359/**
1360 * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1361 * @offset: Offset within the GPU object to sync
1362 * @length: Number of bytes to sync
1363 * @id: ID of the GPU object to sync
1364 * @op: Cache operation to execute
1365 */
1366
1367struct kgsl_gpuobj_sync_obj {
1368 uint64_t offset;
1369 uint64_t length;
1370 unsigned int id;
1371 unsigned int op;
1372};
1373
1374/**
1375 * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1376 * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1377 * @obj_len: Size of each item in the array
1378 * @count: Number of items in the array
1379 */
1380
1381struct kgsl_gpuobj_sync {
1382 uint64_t __user objs;
1383 unsigned int obj_len;
1384 unsigned int count;
1385};
1386
1387#define IOCTL_KGSL_GPUOBJ_SYNC \
1388 _IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1389
1390/**
1391 * struct kgsl_command_object - GPU command object
1392 * @offset: GPU address offset of the object
1393 * @gpuaddr: GPU address of the object
1394 * @size: Size of the object
1395 * @flags: Current flags for the object
1396 * @id - GPU command object ID
1397 */
1398struct kgsl_command_object {
1399 uint64_t offset;
1400 uint64_t gpuaddr;
1401 uint64_t size;
1402 unsigned int flags;
1403 unsigned int id;
1404};
1405
1406/**
1407 * struct kgsl_command_syncpoint - GPU syncpoint object
1408 * @priv: Pointer to the type specific buffer
1409 * @size: Size of the type specific buffer
1410 * @type: type of sync point defined here
1411 */
1412struct kgsl_command_syncpoint {
1413 uint64_t __user priv;
1414 uint64_t size;
1415 unsigned int type;
1416};
1417
1418/**
1419 * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1420 * @flags: Current flags for the object
1421 * @cmdlist: List of kgsl_command_objects for submission
1422 * @cmd_size: Size of kgsl_command_objects structure
1423 * @numcmds: Number of kgsl_command_objects in command list
1424 * @objlist: List of kgsl_command_objects for tracking
1425 * @obj_size: Size of kgsl_command_objects structure
1426 * @numobjs: Number of kgsl_command_objects in object list
1427 * @synclist: List of kgsl_command_syncpoints
1428 * @sync_size: Size of kgsl_command_syncpoint structure
1429 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1430 * @context_id: Context ID submittin ghte kgsl_gpu_command
1431 * @timestamp: Timestamp for the submitted commands
1432 */
1433struct kgsl_gpu_command {
1434 uint64_t flags;
1435 uint64_t __user cmdlist;
1436 unsigned int cmdsize;
1437 unsigned int numcmds;
1438 uint64_t __user objlist;
1439 unsigned int objsize;
1440 unsigned int numobjs;
1441 uint64_t __user synclist;
1442 unsigned int syncsize;
1443 unsigned int numsyncs;
1444 unsigned int context_id;
1445 unsigned int timestamp;
1446};
1447
1448#define IOCTL_KGSL_GPU_COMMAND \
1449 _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1450
1451/**
1452 * struct kgsl_preemption_counters_query - argument to
1453 * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1454 * @counters: Return preemption counters array
1455 * @size_user: Size allocated by userspace
1456 * @size_priority_level: Size of preemption counters for each
1457 * priority level
1458 * @max_priority_level: Return max number of priority levels
1459 *
1460 * Query the available preemption counters. The array counters
1461 * is used to return preemption counters. The size of the array
1462 * is passed in so the kernel will only write at most size_user
1463 * or max available preemption counters. The total number of
1464 * preemption counters is returned in max_priority_level. If the
1465 * array or size passed in are invalid, then an error is
1466 * returned back.
1467 */
1468struct kgsl_preemption_counters_query {
1469 uint64_t __user counters;
1470 unsigned int size_user;
1471 unsigned int size_priority_level;
1472 unsigned int max_priority_level;
1473};
1474
1475#define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1476 _IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1477
1478/**
1479 * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1480 * @flags: Flags to indicate which paramaters to change
1481 * @metadata: If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1482 * metadata
1483 * @id: GPU memory object ID to change
1484 * @metadata_len: If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1485 * new metadata string
1486 * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1487 */
1488
1489#define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1490#define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1491
1492struct kgsl_gpuobj_set_info {
1493 uint64_t flags;
1494 uint64_t metadata;
1495 unsigned int id;
1496 unsigned int metadata_len;
1497 unsigned int type;
1498};
1499
1500#define IOCTL_KGSL_GPUOBJ_SET_INFO \
1501 _IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1502
1503/**
1504 * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1505 * @size: Size in bytes to back
1506 * @pagesize: Pagesize alignment required
1507 * @flags: Flags for this allocation
1508 * @id: Returned ID for this allocation
1509 */
1510struct kgsl_sparse_phys_alloc {
1511 uint64_t size;
1512 uint64_t pagesize;
1513 uint64_t flags;
1514 unsigned int id;
1515};
1516
1517#define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1518 _IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1519
1520/**
1521 * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1522 * @id: ID to free
1523 */
1524struct kgsl_sparse_phys_free {
1525 unsigned int id;
1526};
1527
1528#define IOCTL_KGSL_SPARSE_PHYS_FREE \
1529 _IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1530
1531/**
1532 * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1533 * @size: Size in bytes to reserve
1534 * @pagesize: Pagesize alignment required
1535 * @flags: Flags for this allocation
1536 * @id: Returned ID for this allocation
1537 * @gpuaddr: Returned GPU address for this allocation
1538 */
1539struct kgsl_sparse_virt_alloc {
1540 uint64_t size;
1541 uint64_t pagesize;
1542 uint64_t flags;
1543 uint64_t gpuaddr;
1544 unsigned int id;
1545};
1546
1547#define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1548 _IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1549
1550/**
1551 * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1552 * @id: ID to free
1553 */
1554struct kgsl_sparse_virt_free {
1555 unsigned int id;
1556};
1557
1558#define IOCTL_KGSL_SPARSE_VIRT_FREE \
1559 _IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1560
1561/**
1562 * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1563 * @virtoffset: Offset into the virtual ID
1564 * @physoffset: Offset into the physical ID (bind only)
1565 * @size: Size in bytes to reserve
1566 * @flags: Flags for this kgsl_sparse_binding_object
1567 * @id: Physical ID to bind (bind only)
1568 */
1569struct kgsl_sparse_binding_object {
1570 uint64_t virtoffset;
1571 uint64_t physoffset;
1572 uint64_t size;
1573 uint64_t flags;
1574 unsigned int id;
1575};
1576
1577/**
1578 * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1579 * @list: List of kgsl_sparse_bind_objects to bind/unbind
1580 * @id: Virtual ID to bind/unbind
1581 * @size: Size of kgsl_sparse_bind_object
1582 * @count: Number of elements in list
1583 *
1584 */
1585struct kgsl_sparse_bind {
1586 uint64_t __user list;
1587 unsigned int id;
1588 unsigned int size;
1589 unsigned int count;
1590};
1591
1592#define IOCTL_KGSL_SPARSE_BIND \
1593 _IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1594
Tarun Karra2b8b3632016-11-14 16:38:27 -08001595/**
1596 * struct kgsl_gpu_sparse_command - Argument for
1597 * IOCTL_KGSL_GPU_SPARSE_COMMAND
1598 * @flags: Current flags for the object
1599 * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1600 * @synclist: List of kgsl_command_syncpoints
1601 * @sparsesize: Size of kgsl_sparse_binding_object
1602 * @numsparse: Number of elements in list
1603 * @sync_size: Size of kgsl_command_syncpoint structure
1604 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1605 * @context_id: Context ID submitting the kgsl_gpu_command
1606 * @timestamp: Timestamp for the submitted commands
1607 * @id: Virtual ID to bind/unbind
1608 */
1609struct kgsl_gpu_sparse_command {
1610 uint64_t flags;
1611 uint64_t __user sparselist;
1612 uint64_t __user synclist;
1613 unsigned int sparsesize;
1614 unsigned int numsparse;
1615 unsigned int syncsize;
1616 unsigned int numsyncs;
1617 unsigned int context_id;
1618 unsigned int timestamp;
1619 unsigned int id;
1620};
1621
1622#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1623 _IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1624
Shrenuj Bansala419c792016-10-20 14:05:11 -07001625#endif /* _UAPI_MSM_KGSL_H */