blob: f05155b6fb580450bf6c32c7108fb4ac119c09ef [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001#ifndef _UAPI_MSM_KGSL_H
2#define _UAPI_MSM_KGSL_H
3
4#include <linux/types.h>
5#include <linux/ioctl.h>
6
7/*
8 * The KGSL version has proven not to be very useful in userspace if features
9 * are cherry picked into other trees out of order so it is frozen as of 3.14.
10 * It is left here for backwards compatabilty and as a reminder that
11 * software releases are never linear. Also, I like pie.
12 */
13
14#define KGSL_VERSION_MAJOR 3
15#define KGSL_VERSION_MINOR 14
16
17/*
18 * We have traditionally mixed context and issueibcmds / command batch flags
19 * together into a big flag stew. This worked fine until we started adding a
20 * lot more command batch flags and we started running out of bits. Turns out
21 * we have a bit of room in the context type / priority mask that we could use
22 * for command batches, but that means we need to split out the flags into two
23 * coherent sets.
24 *
25 * If any future definitions are for both context and cmdbatch add both defines
26 * and link the cmdbatch to the context define as we do below. Otherwise feel
27 * free to add exclusive bits to either set.
28 */
29
30/* --- context flags --- */
31#define KGSL_CONTEXT_SAVE_GMEM 0x00000001
32#define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002
33/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34#define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004
35#define KGSL_CONTEXT_CTX_SWITCH 0x00000008
36#define KGSL_CONTEXT_PREAMBLE 0x00000010
37#define KGSL_CONTEXT_TRASH_STATE 0x00000020
38#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
39#define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080
40/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41#define KGSL_CONTEXT_END_OF_FRAME 0x00000100
42#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44#define KGSL_CONTEXT_SYNC 0x00000400
45#define KGSL_CONTEXT_PWR_CONSTRAINT 0x00000800
46#define KGSL_CONTEXT_PRIORITY_MASK 0x0000F000
47#define KGSL_CONTEXT_PRIORITY_SHIFT 12
48#define KGSL_CONTEXT_PRIORITY_UNDEF 0
49
50#define KGSL_CONTEXT_IFH_NOP 0x00010000
51#define KGSL_CONTEXT_SECURE 0x00020000
52#define KGSL_CONTEXT_NO_SNAPSHOT 0x00040000
Tarun Karra2b8b3632016-11-14 16:38:27 -080053#define KGSL_CONTEXT_SPARSE 0x00080000
Shrenuj Bansala419c792016-10-20 14:05:11 -070054
55#define KGSL_CONTEXT_PREEMPT_STYLE_MASK 0x0E000000
56#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT 25
57#define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT 0x0
58#define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
59#define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN 0x2
60
61#define KGSL_CONTEXT_TYPE_MASK 0x01F00000
62#define KGSL_CONTEXT_TYPE_SHIFT 20
63#define KGSL_CONTEXT_TYPE_ANY 0
64#define KGSL_CONTEXT_TYPE_GL 1
65#define KGSL_CONTEXT_TYPE_CL 2
66#define KGSL_CONTEXT_TYPE_C2D 3
67#define KGSL_CONTEXT_TYPE_RS 4
68#define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E
69
Hareesh Gunduccfb89b2017-04-14 18:36:20 +053070#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
71
Shrenuj Bansala419c792016-10-20 14:05:11 -070072#define KGSL_CONTEXT_INVALID 0xffffffff
73
74/*
75 * --- command batch flags ---
76 * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
77 * definitions or bits that are valid for both contexts and cmdbatches. To be
78 * safe the other 8 bits that are still available in the context field should be
79 * omitted here in case we need to share - the other bits are available for
80 * cmdbatch only flags as needed
81 */
82#define KGSL_CMDBATCH_MEMLIST 0x00000001
83#define KGSL_CMDBATCH_MARKER 0x00000002
84#define KGSL_CMDBATCH_SUBMIT_IB_LIST KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
85#define KGSL_CMDBATCH_CTX_SWITCH KGSL_CONTEXT_CTX_SWITCH /* 0x008 */
86#define KGSL_CMDBATCH_PROFILING 0x00000010
87/*
88 * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
89 * to take effect, as the latter only affects the time data returned.
90 */
91#define KGSL_CMDBATCH_PROFILING_KTIME 0x00000020
92#define KGSL_CMDBATCH_END_OF_FRAME KGSL_CONTEXT_END_OF_FRAME /* 0x100 */
93#define KGSL_CMDBATCH_SYNC KGSL_CONTEXT_SYNC /* 0x400 */
94#define KGSL_CMDBATCH_PWR_CONSTRAINT KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
Tarun Karra2b8b3632016-11-14 16:38:27 -080095#define KGSL_CMDBATCH_SPARSE 0x1000 /* 0x1000 */
Shrenuj Bansala419c792016-10-20 14:05:11 -070096
97/*
98 * Reserve bits [16:19] and bits [28:31] for possible bits shared between
99 * contexts and command batches. Update this comment as new flags are added.
100 */
101
102/*
103 * gpu_command_object flags - these flags communicate the type of command or
104 * memory object being submitted for a GPU command
105 */
106
107/* Flags for GPU command objects */
108#define KGSL_CMDLIST_IB 0x00000001U
109#define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
110#define KGSL_CMDLIST_IB_PREAMBLE 0x00000004U
111
112/* Flags for GPU command memory objects */
113#define KGSL_OBJLIST_MEMOBJ 0x00000008U
114#define KGSL_OBJLIST_PROFILE 0x00000010U
115
116/* Flags for GPU command sync points */
117#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
118#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
119
120/* --- Memory allocation flags --- */
121
122/* General allocation hints */
123#define KGSL_MEMFLAGS_SECURE 0x00000008ULL
124#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
125#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
126#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
127
128/* Flag for binding all the virt range to single phys data */
129#define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
130#define KGSL_SPARSE_BIND 0x1ULL
131#define KGSL_SPARSE_UNBIND 0x2ULL
132
133/* Memory caching hints */
134#define KGSL_CACHEMODE_MASK 0x0C000000U
135#define KGSL_CACHEMODE_SHIFT 26
136
137#define KGSL_CACHEMODE_WRITECOMBINE 0
138#define KGSL_CACHEMODE_UNCACHED 1
139#define KGSL_CACHEMODE_WRITETHROUGH 2
140#define KGSL_CACHEMODE_WRITEBACK 3
141
142#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
143#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
144#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
145
146/* Memory types for which allocations are made */
147#define KGSL_MEMTYPE_MASK 0x0000FF00
148#define KGSL_MEMTYPE_SHIFT 8
149
150#define KGSL_MEMTYPE_OBJECTANY 0
151#define KGSL_MEMTYPE_FRAMEBUFFER 1
152#define KGSL_MEMTYPE_RENDERBUFFER 2
153#define KGSL_MEMTYPE_ARRAYBUFFER 3
154#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER 4
155#define KGSL_MEMTYPE_VERTEXARRAYBUFFER 5
156#define KGSL_MEMTYPE_TEXTURE 6
157#define KGSL_MEMTYPE_SURFACE 7
158#define KGSL_MEMTYPE_EGL_SURFACE 8
159#define KGSL_MEMTYPE_GL 9
160#define KGSL_MEMTYPE_CL 10
161#define KGSL_MEMTYPE_CL_BUFFER_MAP 11
162#define KGSL_MEMTYPE_CL_BUFFER_NOMAP 12
163#define KGSL_MEMTYPE_CL_IMAGE_MAP 13
164#define KGSL_MEMTYPE_CL_IMAGE_NOMAP 14
165#define KGSL_MEMTYPE_CL_KERNEL_STACK 15
166#define KGSL_MEMTYPE_COMMAND 16
167#define KGSL_MEMTYPE_2D 17
168#define KGSL_MEMTYPE_EGL_IMAGE 18
169#define KGSL_MEMTYPE_EGL_SHADOW 19
170#define KGSL_MEMTYPE_MULTISAMPLE 20
171#define KGSL_MEMTYPE_KERNEL 255
172
173/*
174 * Alignment hint, passed as the power of 2 exponent.
175 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
176 */
177#define KGSL_MEMALIGN_MASK 0x00FF0000
178#define KGSL_MEMALIGN_SHIFT 16
179
180enum kgsl_user_mem_type {
181 KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
182 KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
183 KGSL_USER_MEM_TYPE_ADDR = 0x00000002,
184 KGSL_USER_MEM_TYPE_ION = 0x00000003,
185 /*
186 * ION type is retained for backwards compatibility but Ion buffers are
187 * dma-bufs so try to use that naming if we can
188 */
189 KGSL_USER_MEM_TYPE_DMABUF = 0x00000003,
190 KGSL_USER_MEM_TYPE_MAX = 0x00000007,
191};
192#define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
193#define KGSL_MEMFLAGS_USERMEM_SHIFT 5
194
195/*
196 * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
197 * leave a good value for allocated memory. In the flags we use
198 * 0 to indicate allocated memory and thus need to add 1 to the enum
199 * values.
200 */
201#define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
202
203#define KGSL_MEMFLAGS_NOT_USERMEM 0
204#define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
205#define KGSL_MEMFLAGS_USERMEM_ASHMEM \
206 KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
207#define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
208#define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
209
210/* --- generic KGSL flag values --- */
211
212#define KGSL_FLAGS_NORMALMODE 0x00000000
213#define KGSL_FLAGS_SAFEMODE 0x00000001
214#define KGSL_FLAGS_INITIALIZED0 0x00000002
215#define KGSL_FLAGS_INITIALIZED 0x00000004
216#define KGSL_FLAGS_STARTED 0x00000008
217#define KGSL_FLAGS_ACTIVE 0x00000010
218#define KGSL_FLAGS_RESERVED0 0x00000020
219#define KGSL_FLAGS_RESERVED1 0x00000040
220#define KGSL_FLAGS_RESERVED2 0x00000080
221#define KGSL_FLAGS_SOFT_RESET 0x00000100
222#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
223
224/* Server Side Sync Timeout in milliseconds */
225#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
226
Shrenuj Bansala9ae9de2016-11-15 16:01:00 -0800227/* UBWC Modes */
228#define KGSL_UBWC_NONE 0
229#define KGSL_UBWC_1_0 1
230#define KGSL_UBWC_2_0 2
231#define KGSL_UBWC_3_0 3
232
Shrenuj Bansala419c792016-10-20 14:05:11 -0700233/*
234 * Reset status values for context
235 */
236enum kgsl_ctx_reset_stat {
237 KGSL_CTX_STAT_NO_ERROR = 0x00000000,
238 KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001,
239 KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002,
240 KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003
241};
242
243#define KGSL_CONVERT_TO_MBPS(val) \
244 (val*1000*1000U)
245
246/* device id */
247enum kgsl_deviceid {
248 KGSL_DEVICE_3D0 = 0x00000000,
249 KGSL_DEVICE_MAX
250};
251
252struct kgsl_devinfo {
253
254 unsigned int device_id;
255 /*
256 * chip revision id
257 * coreid:8 majorrev:8 minorrev:8 patch:8
258 */
259 unsigned int chip_id;
260 unsigned int mmu_enabled;
261 unsigned long gmem_gpubaseaddr;
262 /*
263 * This field contains the adreno revision
264 * number 200, 205, 220, etc...
265 */
266 unsigned int gpu_id;
267 size_t gmem_sizebytes;
268};
269
270/*
271 * struct kgsl_devmemstore - this structure defines the region of memory
272 * that can be mmap()ed from this driver. The timestamp fields are volatile
273 * because they are written by the GPU
274 * @soptimestamp: Start of pipeline timestamp written by GPU before the
275 * commands in concern are processed
276 * @sbz: Unused, kept for 8 byte alignment
277 * @eoptimestamp: End of pipeline timestamp written by GPU after the
278 * commands in concern are processed
279 * @sbz2: Unused, kept for 8 byte alignment
280 * @preempted: Indicates if the context was preempted
281 * @sbz3: Unused, kept for 8 byte alignment
282 * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
283 * @sbz4: Unused, kept for 8 byte alignment
284 * @current_context: The current context the GPU is working on
285 * @sbz5: Unused, kept for 8 byte alignment
286 */
287struct kgsl_devmemstore {
288 volatile unsigned int soptimestamp;
289 unsigned int sbz;
290 volatile unsigned int eoptimestamp;
291 unsigned int sbz2;
292 volatile unsigned int preempted;
293 unsigned int sbz3;
294 volatile unsigned int ref_wait_ts;
295 unsigned int sbz4;
296 unsigned int current_context;
297 unsigned int sbz5;
298};
299
300#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
301 ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
302 offsetof(struct kgsl_devmemstore, field))
303
304/* timestamp id*/
305enum kgsl_timestamp_type {
306 KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
307 KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/
308 KGSL_TIMESTAMP_QUEUED = 0x00000003,
309};
310
311/* property types - used with kgsl_device_getproperty */
312#define KGSL_PROP_DEVICE_INFO 0x1
313#define KGSL_PROP_DEVICE_SHADOW 0x2
314#define KGSL_PROP_DEVICE_POWER 0x3
315#define KGSL_PROP_SHMEM 0x4
316#define KGSL_PROP_SHMEM_APERTURES 0x5
317#define KGSL_PROP_MMU_ENABLE 0x6
318#define KGSL_PROP_INTERRUPT_WAITS 0x7
319#define KGSL_PROP_VERSION 0x8
320#define KGSL_PROP_GPU_RESET_STAT 0x9
321#define KGSL_PROP_PWRCTRL 0xE
322#define KGSL_PROP_PWR_CONSTRAINT 0x12
323#define KGSL_PROP_UCHE_GMEM_VADDR 0x13
324#define KGSL_PROP_SP_GENERIC_MEM 0x14
325#define KGSL_PROP_UCODE_VERSION 0x15
326#define KGSL_PROP_GPMU_VERSION 0x16
327#define KGSL_PROP_HIGHEST_BANK_BIT 0x17
328#define KGSL_PROP_DEVICE_BITNESS 0x18
329#define KGSL_PROP_DEVICE_QDSS_STM 0x19
Shrenuj Bansala9ae9de2016-11-15 16:01:00 -0800330#define KGSL_PROP_MIN_ACCESS_LENGTH 0x1A
331#define KGSL_PROP_UBWC_MODE 0x1B
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700332#define KGSL_PROP_DEVICE_QTIMER 0x20
Shrenuj Bansala419c792016-10-20 14:05:11 -0700333
334struct kgsl_shadowprop {
335 unsigned long gpuaddr;
336 size_t size;
337 unsigned int flags; /* contains KGSL_FLAGS_ values */
338};
339
340struct kgsl_qdss_stm_prop {
341 uint64_t gpuaddr;
342 uint64_t size;
343};
344
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700345struct kgsl_qtimer_prop {
346 uint64_t gpuaddr;
347 uint64_t size;
348};
349
Shrenuj Bansala419c792016-10-20 14:05:11 -0700350struct kgsl_version {
351 unsigned int drv_major;
352 unsigned int drv_minor;
353 unsigned int dev_major;
354 unsigned int dev_minor;
355};
356
357struct kgsl_sp_generic_mem {
358 uint64_t local;
359 uint64_t pvt;
360};
361
362struct kgsl_ucode_version {
363 unsigned int pfp;
364 unsigned int pm4;
365};
366
367struct kgsl_gpmu_version {
368 unsigned int major;
369 unsigned int minor;
370 unsigned int features;
371};
372
373/* Performance counter groups */
374
375#define KGSL_PERFCOUNTER_GROUP_CP 0x0
376#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
377#define KGSL_PERFCOUNTER_GROUP_PC 0x2
378#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
379#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
380#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
381#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
382#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
383#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
384#define KGSL_PERFCOUNTER_GROUP_TP 0x9
385#define KGSL_PERFCOUNTER_GROUP_SP 0xA
386#define KGSL_PERFCOUNTER_GROUP_RB 0xB
387#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
388#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
389#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
390#define KGSL_PERFCOUNTER_GROUP_MH 0xF
391#define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
392#define KGSL_PERFCOUNTER_GROUP_SQ 0x11
393#define KGSL_PERFCOUNTER_GROUP_SX 0x12
394#define KGSL_PERFCOUNTER_GROUP_TCF 0x13
395#define KGSL_PERFCOUNTER_GROUP_TCM 0x14
396#define KGSL_PERFCOUNTER_GROUP_TCR 0x15
397#define KGSL_PERFCOUNTER_GROUP_L2 0x16
398#define KGSL_PERFCOUNTER_GROUP_VSC 0x17
399#define KGSL_PERFCOUNTER_GROUP_CCU 0x18
400#define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
401#define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
402#define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
403#define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
404#define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
405#define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
406#define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
407#define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
408#define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
409#define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
410#define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
411#define KGSL_PERFCOUNTER_GROUP_MAX 0x24
412
413#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
414#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
415
416/* structure holds list of ibs */
417struct kgsl_ibdesc {
418 unsigned long gpuaddr;
419 unsigned long __pad;
420 size_t sizedwords;
421 unsigned int ctrl;
422};
423
424/**
425 * struct kgsl_cmdbatch_profiling_buffer
426 * @wall_clock_s: Ringbuffer submission time (seconds).
427 * If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
428 * in kernel clocks, otherwise wall clock time is used.
429 * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
430 * If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
431 * in kernel clocks, otherwise wall clock time is used.
432 * @gpu_ticks_queued: GPU ticks at ringbuffer submission
433 * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
434 * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
435 *
436 * This structure defines the profiling buffer used to measure cmdbatch
437 * execution time
438 */
439struct kgsl_cmdbatch_profiling_buffer {
440 uint64_t wall_clock_s;
441 uint64_t wall_clock_ns;
442 uint64_t gpu_ticks_queued;
443 uint64_t gpu_ticks_submitted;
444 uint64_t gpu_ticks_retired;
445};
446
447/* ioctls */
448#define KGSL_IOC_TYPE 0x09
449
450/*
451 * get misc info about the GPU
452 * type should be a value from enum kgsl_property_type
453 * value points to a structure that varies based on type
454 * sizebytes is sizeof() that structure
455 * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
456 * this structure contaings hardware versioning info.
457 * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
458 * this is used to find mmap() offset and sizes for mapping
459 * struct kgsl_memstore into userspace.
460 */
461struct kgsl_device_getproperty {
462 unsigned int type;
463 void __user *value;
464 size_t sizebytes;
465};
466
467#define IOCTL_KGSL_DEVICE_GETPROPERTY \
468 _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
469
470/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
471 */
472
473/* block until the GPU has executed past a given timestamp
474 * timeout is in milliseconds.
475 */
476struct kgsl_device_waittimestamp {
477 unsigned int timestamp;
478 unsigned int timeout;
479};
480
481#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
482 _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
483
484struct kgsl_device_waittimestamp_ctxtid {
485 unsigned int context_id;
486 unsigned int timestamp;
487 unsigned int timeout;
488};
489
490#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
491 _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
492
493/* DEPRECATED: issue indirect commands to the GPU.
494 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
495 * ibaddr and sizedwords must specify a subset of a buffer created
496 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
497 * flags may be a mask of KGSL_CONTEXT_ values
498 * timestamp is a returned counter value which can be passed to
499 * other ioctls to determine when the commands have been executed by
500 * the GPU.
501 *
502 * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
503 * instead
504 */
505struct kgsl_ringbuffer_issueibcmds {
506 unsigned int drawctxt_id;
507 unsigned long ibdesc_addr;
508 unsigned int numibs;
509 unsigned int timestamp; /*output param */
510 unsigned int flags;
511};
512
513#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
514 _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
515
516/* read the most recently executed timestamp value
517 * type should be a value from enum kgsl_timestamp_type
518 */
519struct kgsl_cmdstream_readtimestamp {
520 unsigned int type;
521 unsigned int timestamp; /*output param */
522};
523
524#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
525 _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
526
527#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
528 _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
529
530/* free memory when the GPU reaches a given timestamp.
531 * gpuaddr specify a memory region created by a
532 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
533 * type should be a value from enum kgsl_timestamp_type
534 */
535struct kgsl_cmdstream_freememontimestamp {
536 unsigned long gpuaddr;
537 unsigned int type;
538 unsigned int timestamp;
539};
540
541#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
542 _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
543
544/*
545 * Previous versions of this header had incorrectly defined
546 * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
547 * of a write only ioctl. To ensure binary compatibility, the following
548 * #define will be used to intercept the incorrect ioctl
549 */
550
551#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
552 _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
553
554/* create a draw context, which is used to preserve GPU state.
555 * The flags field may contain a mask KGSL_CONTEXT_* values
556 */
557struct kgsl_drawctxt_create {
558 unsigned int flags;
559 unsigned int drawctxt_id; /*output param */
560};
561
562#define IOCTL_KGSL_DRAWCTXT_CREATE \
563 _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
564
565/* destroy a draw context */
566struct kgsl_drawctxt_destroy {
567 unsigned int drawctxt_id;
568};
569
570#define IOCTL_KGSL_DRAWCTXT_DESTROY \
571 _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
572
573/*
574 * add a block of pmem, fb, ashmem or user allocated address
575 * into the GPU address space
576 */
577struct kgsl_map_user_mem {
578 int fd;
579 unsigned long gpuaddr; /*output param */
580 size_t len;
581 size_t offset;
582 unsigned long hostptr; /*input param */
583 enum kgsl_user_mem_type memtype;
584 unsigned int flags;
585};
586
587#define IOCTL_KGSL_MAP_USER_MEM \
588 _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
589
590struct kgsl_cmdstream_readtimestamp_ctxtid {
591 unsigned int context_id;
592 unsigned int type;
593 unsigned int timestamp; /*output param */
594};
595
596#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
597 _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
598
599struct kgsl_cmdstream_freememontimestamp_ctxtid {
600 unsigned int context_id;
601 unsigned long gpuaddr;
602 unsigned int type;
603 unsigned int timestamp;
604};
605
606#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
607 _IOW(KGSL_IOC_TYPE, 0x17, \
608 struct kgsl_cmdstream_freememontimestamp_ctxtid)
609
610/* add a block of pmem or fb into the GPU address space */
611struct kgsl_sharedmem_from_pmem {
612 int pmem_fd;
613 unsigned long gpuaddr; /*output param */
614 unsigned int len;
615 unsigned int offset;
616};
617
618#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
619 _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
620
621/* remove memory from the GPU's address space */
622struct kgsl_sharedmem_free {
623 unsigned long gpuaddr;
624};
625
626#define IOCTL_KGSL_SHAREDMEM_FREE \
627 _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
628
629struct kgsl_cff_user_event {
630 unsigned char cff_opcode;
631 unsigned int op1;
632 unsigned int op2;
633 unsigned int op3;
634 unsigned int op4;
635 unsigned int op5;
636 unsigned int __pad[2];
637};
638
639#define IOCTL_KGSL_CFF_USER_EVENT \
640 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
641
642struct kgsl_gmem_desc {
643 unsigned int x;
644 unsigned int y;
645 unsigned int width;
646 unsigned int height;
647 unsigned int pitch;
648};
649
650struct kgsl_buffer_desc {
651 void *hostptr;
652 unsigned long gpuaddr;
653 int size;
654 unsigned int format;
655 unsigned int pitch;
656 unsigned int enabled;
657};
658
659struct kgsl_bind_gmem_shadow {
660 unsigned int drawctxt_id;
661 struct kgsl_gmem_desc gmem_desc;
662 unsigned int shadow_x;
663 unsigned int shadow_y;
664 struct kgsl_buffer_desc shadow_buffer;
665 unsigned int buffer_id;
666};
667
668#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
669 _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
670
671/* add a block of memory into the GPU address space */
672
673/*
674 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
675 * use IOCTL_KGSL_GPUMEM_ALLOC instead
676 */
677
678struct kgsl_sharedmem_from_vmalloc {
679 unsigned long gpuaddr; /*output param */
680 unsigned int hostptr;
681 unsigned int flags;
682};
683
684#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
685 _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
686
687/*
688 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
689 * supports both directions (flush and invalidate). This code will still
690 * work, but by definition it will do a flush of the cache which might not be
691 * what you want to have happen on a buffer following a GPU operation. It is
692 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
693 */
694
695#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
696 _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
697
698struct kgsl_drawctxt_set_bin_base_offset {
699 unsigned int drawctxt_id;
700 unsigned int offset;
701};
702
703#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
704 _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
705
706enum kgsl_cmdwindow_type {
707 KGSL_CMDWINDOW_MIN = 0x00000000,
708 KGSL_CMDWINDOW_2D = 0x00000000,
709 KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */
710 KGSL_CMDWINDOW_MMU = 0x00000002,
711 KGSL_CMDWINDOW_ARBITER = 0x000000FF,
712 KGSL_CMDWINDOW_MAX = 0x000000FF,
713};
714
715/* write to the command window */
716struct kgsl_cmdwindow_write {
717 enum kgsl_cmdwindow_type target;
718 unsigned int addr;
719 unsigned int data;
720};
721
722#define IOCTL_KGSL_CMDWINDOW_WRITE \
723 _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
724
725struct kgsl_gpumem_alloc {
726 unsigned long gpuaddr; /* output param */
727 size_t size;
728 unsigned int flags;
729};
730
731#define IOCTL_KGSL_GPUMEM_ALLOC \
732 _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
733
734struct kgsl_cff_syncmem {
735 unsigned long gpuaddr;
736 size_t len;
737 unsigned int __pad[2]; /* For future binary compatibility */
738};
739
740#define IOCTL_KGSL_CFF_SYNCMEM \
741 _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
742
743/*
744 * A timestamp event allows the user space to register an action following an
745 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
746 * _IOWR to support fences which need to return a fd for the priv parameter.
747 */
748
749struct kgsl_timestamp_event {
750 int type; /* Type of event (see list below) */
751 unsigned int timestamp; /* Timestamp to trigger event on */
752 unsigned int context_id; /* Context for the timestamp */
753 void __user *priv; /* Pointer to the event specific blob */
754 size_t len; /* Size of the event specific blob */
755};
756
757#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
758 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
759
760/* A genlock timestamp event releases an existing lock on timestamp expire */
761
762#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
763
764struct kgsl_timestamp_event_genlock {
765 int handle; /* Handle of the genlock lock to release */
766};
767
768/* A fence timestamp event releases an existing lock on timestamp expire */
769
770#define KGSL_TIMESTAMP_EVENT_FENCE 2
771
772struct kgsl_timestamp_event_fence {
773 int fence_fd; /* Fence to signal */
774};
775
776/*
777 * Set a property within the kernel. Uses the same structure as
778 * IOCTL_KGSL_GETPROPERTY
779 */
780
781#define IOCTL_KGSL_SETPROPERTY \
782 _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
783
784#define IOCTL_KGSL_TIMESTAMP_EVENT \
785 _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
786
787/**
788 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
789 * @id: returned id value for this allocation.
790 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
791 * @size: requested size of the allocation and actual size on return.
792 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
793 * @gpuaddr: returned GPU address for the allocation
794 *
795 * Allocate memory for access by the GPU. The flags and size fields are echoed
796 * back by the kernel, so that the caller can know if the request was
797 * adjusted.
798 *
799 * Supported flags:
800 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
801 * KGSL_MEMTYPE*: usage hint for debugging aid
802 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
803 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
804 * address will be 0. Calling mmap() will set the GPU address.
805 */
806struct kgsl_gpumem_alloc_id {
807 unsigned int id;
808 unsigned int flags;
809 size_t size;
810 size_t mmapsize;
811 unsigned long gpuaddr;
812/* private: reserved for future use*/
813 unsigned long __pad[2];
814};
815
816#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
817 _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
818
819/**
820 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
821 * @id: GPU allocation id to free
822 *
823 * Free an allocation by id, in case a GPU address has not been assigned or
824 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
825 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
826 */
827struct kgsl_gpumem_free_id {
828 unsigned int id;
829/* private: reserved for future use*/
830 unsigned int __pad;
831};
832
833#define IOCTL_KGSL_GPUMEM_FREE_ID \
834 _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
835
836/**
837 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
838 * @gpuaddr: GPU address to query. Also set on return.
839 * @id: GPU allocation id to query. Also set on return.
840 * @flags: returned mask of KGSL_MEM* values.
841 * @size: returned size of the allocation.
842 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
843 * @useraddr: returned address of the userspace mapping for this buffer
844 *
845 * This ioctl allows querying of all user visible attributes of an existing
846 * allocation, by either the GPU address or the id returned by a previous
847 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
848 * return all attributes so this ioctl can be used to look them up if needed.
849 *
850 */
851struct kgsl_gpumem_get_info {
852 unsigned long gpuaddr;
853 unsigned int id;
854 unsigned int flags;
855 size_t size;
856 size_t mmapsize;
857 unsigned long useraddr;
858/* private: reserved for future use*/
859 unsigned long __pad[4];
860};
861
862#define IOCTL_KGSL_GPUMEM_GET_INFO\
863 _IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
864
865/**
866 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
867 * @gpuaddr: GPU address of the buffer to sync.
868 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
869 * @op: a mask of KGSL_GPUMEM_CACHE_* values
870 * @offset: offset into the buffer
871 * @length: number of bytes starting from offset to perform
872 * the cache operation on
873 *
874 * Sync the L2 cache for memory headed to and from the GPU - this replaces
875 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
876 * directions
877 *
878 */
879struct kgsl_gpumem_sync_cache {
880 unsigned long gpuaddr;
881 unsigned int id;
882 unsigned int op;
883 size_t offset;
884 size_t length;
885};
886
887#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
888#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
889
890#define KGSL_GPUMEM_CACHE_INV (1 << 1)
891#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
892
893#define KGSL_GPUMEM_CACHE_FLUSH \
894 (KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
895
896/* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
897#define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
898
899#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
900 _IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
901
902/**
903 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
904 * @groupid: Performance counter group ID
905 * @countable: Countable to select within the group
906 * @offset: Return offset of the reserved LO counter
907 * @offset_hi: Return offset of the reserved HI counter
908 *
909 * Get an available performance counter from a specified groupid. The offset
910 * of the performance counter will be returned after successfully assigning
911 * the countable to the counter for the specified group. An error will be
912 * returned and an offset of 0 if the groupid is invalid or there are no
913 * more counters left. After successfully getting a perfcounter, the user
914 * must call kgsl_perfcounter_put(groupid, contable) when finished with
915 * the perfcounter to clear up perfcounter resources.
916 *
917 */
918struct kgsl_perfcounter_get {
919 unsigned int groupid;
920 unsigned int countable;
921 unsigned int offset;
922 unsigned int offset_hi;
923/* private: reserved for future use */
924 unsigned int __pad; /* For future binary compatibility */
925};
926
927#define IOCTL_KGSL_PERFCOUNTER_GET \
928 _IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
929
930/**
931 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
932 * @groupid: Performance counter group ID
933 * @countable: Countable to release within the group
934 *
935 * Put an allocated performance counter to allow others to have access to the
936 * resource that was previously taken. This is only to be called after
937 * successfully getting a performance counter from kgsl_perfcounter_get().
938 *
939 */
940struct kgsl_perfcounter_put {
941 unsigned int groupid;
942 unsigned int countable;
943/* private: reserved for future use */
944 unsigned int __pad[2]; /* For future binary compatibility */
945};
946
947#define IOCTL_KGSL_PERFCOUNTER_PUT \
948 _IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
949
950/**
951 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
952 * @groupid: Performance counter group ID
953 * @countable: Return active countables array
954 * @size: Size of active countables array
955 * @max_counters: Return total number counters for the group ID
956 *
957 * Query the available performance counters given a groupid. The array
958 * *countables is used to return the current active countables in counters.
959 * The size of the array is passed in so the kernel will only write at most
960 * size or counter->size for the group id. The total number of available
961 * counters for the group ID is returned in max_counters.
962 * If the array or size passed in are invalid, then only the maximum number
963 * of counters will be returned, no data will be written to *countables.
964 * If the groupid is invalid an error code will be returned.
965 *
966 */
967struct kgsl_perfcounter_query {
968 unsigned int groupid;
969 /* Array to return the current countable for up to size counters */
970 unsigned int __user *countables;
971 unsigned int count;
972 unsigned int max_counters;
973/* private: reserved for future use */
974 unsigned int __pad[2]; /* For future binary compatibility */
975};
976
977#define IOCTL_KGSL_PERFCOUNTER_QUERY \
978 _IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
979
980/**
981 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
982 * @groupid: Performance counter group IDs
983 * @countable: Performance counter countable IDs
984 * @value: Return performance counter reads
985 * @size: Size of all arrays (groupid/countable pair and return value)
986 *
987 * Read in the current value of a performance counter given by the groupid
988 * and countable.
989 *
990 */
991
992struct kgsl_perfcounter_read_group {
993 unsigned int groupid;
994 unsigned int countable;
995 unsigned long long value;
996};
997
998struct kgsl_perfcounter_read {
999 struct kgsl_perfcounter_read_group __user *reads;
1000 unsigned int count;
1001/* private: reserved for future use */
1002 unsigned int __pad[2]; /* For future binary compatibility */
1003};
1004
1005#define IOCTL_KGSL_PERFCOUNTER_READ \
1006 _IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
1007/*
1008 * struct kgsl_gpumem_sync_cache_bulk - argument to
1009 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
1010 * @id_list: list of GPU buffer ids of the buffers to sync
1011 * @count: number of GPU buffer ids in id_list
1012 * @op: a mask of KGSL_GPUMEM_CACHE_* values
1013 *
1014 * Sync the cache for memory headed to and from the GPU. Certain
1015 * optimizations can be made on the cache operation based on the total
1016 * size of the working set of memory to be managed.
1017 */
1018struct kgsl_gpumem_sync_cache_bulk {
1019 unsigned int __user *id_list;
1020 unsigned int count;
1021 unsigned int op;
1022/* private: reserved for future use */
1023 unsigned int __pad[2]; /* For future binary compatibility */
1024};
1025
1026#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1027 _IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1028
1029/*
1030 * struct kgsl_cmd_syncpoint_timestamp
1031 * @context_id: ID of a KGSL context
1032 * @timestamp: GPU timestamp
1033 *
1034 * This structure defines a syncpoint comprising a context/timestamp pair. A
1035 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1036 * dependencies that must be met before the command can be submitted to the
1037 * hardware
1038 */
1039struct kgsl_cmd_syncpoint_timestamp {
1040 unsigned int context_id;
1041 unsigned int timestamp;
1042};
1043
1044struct kgsl_cmd_syncpoint_fence {
1045 int fd;
1046};
1047
1048/**
1049 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1050 * @type: type of sync point defined here
1051 * @priv: Pointer to the type specific buffer
1052 * @size: Size of the type specific buffer
1053 *
1054 * This structure contains pointers defining a specific command sync point.
1055 * The pointer and size should point to a type appropriate structure.
1056 */
1057struct kgsl_cmd_syncpoint {
1058 int type;
1059 void __user *priv;
1060 size_t size;
1061};
1062
1063/* Flag to indicate that the cmdlist may contain memlists */
1064#define KGSL_IBDESC_MEMLIST 0x1
1065
1066/* Flag to point out the cmdbatch profiling buffer in the memlist */
1067#define KGSL_IBDESC_PROFILING_BUFFER 0x2
1068
1069/**
1070 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1071 * @context_id: KGSL context ID that owns the commands
1072 * @flags:
1073 * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1074 * @numcmds: Number of commands listed in cmdlist
1075 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1076 * @numsyncs: Number of sync points listed in synclist
1077 * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1078 * assigned to the command batch
1079 *
1080 * This structure specifies a command to send to the GPU hardware. This is
1081 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1082 * submit IB lists and it adds sync points to block the IB until the
1083 * dependencies are satisified. This entry point is the new and preferred way
1084 * to submit commands to the GPU. The memory list can be used to specify all
1085 * memory that is referrenced in the current set of commands.
1086 */
1087
1088struct kgsl_submit_commands {
1089 unsigned int context_id;
1090 unsigned int flags;
1091 struct kgsl_ibdesc __user *cmdlist;
1092 unsigned int numcmds;
1093 struct kgsl_cmd_syncpoint __user *synclist;
1094 unsigned int numsyncs;
1095 unsigned int timestamp;
1096/* private: reserved for future use */
1097 unsigned int __pad[4];
1098};
1099
1100#define IOCTL_KGSL_SUBMIT_COMMANDS \
1101 _IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1102
1103/**
1104 * struct kgsl_device_constraint - device constraint argument
1105 * @context_id: KGSL context ID
1106 * @type: type of constraint i.e pwrlevel/none
1107 * @data: constraint data
1108 * @size: size of the constraint data
1109 */
1110struct kgsl_device_constraint {
1111 unsigned int type;
1112 unsigned int context_id;
1113 void __user *data;
1114 size_t size;
1115};
1116
1117/* Constraint Type*/
1118#define KGSL_CONSTRAINT_NONE 0
1119#define KGSL_CONSTRAINT_PWRLEVEL 1
1120
1121/* PWRLEVEL constraint level*/
1122/* set to min frequency */
1123#define KGSL_CONSTRAINT_PWR_MIN 0
1124/* set to max frequency */
1125#define KGSL_CONSTRAINT_PWR_MAX 1
1126
1127struct kgsl_device_constraint_pwrlevel {
1128 unsigned int level;
1129};
1130
1131/**
1132 * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1133 * @id: returned id for the syncsource that was created.
1134 *
1135 * This ioctl creates a userspace sync timeline.
1136 */
1137
1138struct kgsl_syncsource_create {
1139 unsigned int id;
1140/* private: reserved for future use */
1141 unsigned int __pad[3];
1142};
1143
1144#define IOCTL_KGSL_SYNCSOURCE_CREATE \
1145 _IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1146
1147/**
1148 * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1149 * @id: syncsource id to destroy
1150 *
1151 * This ioctl creates a userspace sync timeline.
1152 */
1153
1154struct kgsl_syncsource_destroy {
1155 unsigned int id;
1156/* private: reserved for future use */
1157 unsigned int __pad[3];
1158};
1159
1160#define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1161 _IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1162
1163/**
1164 * struct kgsl_syncsource_create_fence - Argument to
1165 * IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1166 * @id: syncsource id
1167 * @fence_fd: returned sync_fence fd
1168 *
1169 * Create a fence that may be signaled by userspace by calling
1170 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1171 * these fences.
1172 */
1173struct kgsl_syncsource_create_fence {
1174 unsigned int id;
1175 int fence_fd;
1176/* private: reserved for future use */
1177 unsigned int __pad[4];
1178};
1179
1180/**
1181 * struct kgsl_syncsource_signal_fence - Argument to
1182 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1183 * @id: syncsource id
1184 * @fence_fd: sync_fence fd to signal
1185 *
1186 * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1187 * call using the same syncsource id. This allows a fence to be shared
1188 * to other processes but only signaled by the process owning the fd
1189 * used to create the fence.
1190 */
1191#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1192 _IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1193
1194struct kgsl_syncsource_signal_fence {
1195 unsigned int id;
1196 int fence_fd;
1197/* private: reserved for future use */
1198 unsigned int __pad[4];
1199};
1200
1201#define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1202 _IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1203
1204/**
1205 * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1206 * @offset: Offset into the GPU object to sync
1207 * @length: Number of bytes to sync
1208 * @id: ID of the GPU object to sync
1209 */
1210struct kgsl_cff_sync_gpuobj {
1211 uint64_t offset;
1212 uint64_t length;
1213 unsigned int id;
1214};
1215
1216#define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1217 _IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1218
1219/**
1220 * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1221 * @size: Size in bytes of the object to allocate
1222 * @flags: mask of KGSL_MEMFLAG_* bits
1223 * @va_len: Size in bytes of the virtual region to allocate
1224 * @mmapsize: Returns the mmap() size of the object
1225 * @id: Returns the GPU object ID of the new object
1226 * @metadata_len: Length of the metdata to copy from the user
1227 * @metadata: Pointer to the user specified metadata to store for the object
1228 */
1229struct kgsl_gpuobj_alloc {
1230 uint64_t size;
1231 uint64_t flags;
1232 uint64_t va_len;
1233 uint64_t mmapsize;
1234 unsigned int id;
1235 unsigned int metadata_len;
1236 uint64_t metadata;
1237};
1238
1239/* Let the user know that this header supports the gpuobj metadata */
1240#define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1241
1242#define IOCTL_KGSL_GPUOBJ_ALLOC \
1243 _IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1244
1245/**
1246 * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1247 * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1248 * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1249 * specified
1250 * @id: ID of the GPU object to free
1251 * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1252 * event to free on
1253 * @len: Length of the data passed in priv
1254 */
1255struct kgsl_gpuobj_free {
1256 uint64_t flags;
1257 uint64_t __user priv;
1258 unsigned int id;
1259 unsigned int type;
1260 unsigned int len;
1261};
1262
1263#define KGSL_GPUOBJ_FREE_ON_EVENT 1
1264
1265#define KGSL_GPU_EVENT_TIMESTAMP 1
1266#define KGSL_GPU_EVENT_FENCE 2
1267
1268/**
1269 * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1270 * object on
1271 * @context_id: ID of the timestamp event to wait for
1272 * @timestamp: Timestamp of the timestamp event to wait for
1273 */
1274struct kgsl_gpu_event_timestamp {
1275 unsigned int context_id;
1276 unsigned int timestamp;
1277};
1278
1279/**
1280 * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1281 * @fd: File descriptor for the fence
1282 */
1283struct kgsl_gpu_event_fence {
1284 int fd;
1285};
1286
1287#define IOCTL_KGSL_GPUOBJ_FREE \
1288 _IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1289
1290/**
1291 * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1292 * @gpuaddr: GPU address of the object
1293 * @flags: Current flags for the object
1294 * @size: Size of the object
1295 * @va_len: VA size of the object
1296 * @va_addr: Virtual address of the object (if it is mapped)
1297 * id - GPU object ID of the object to query
1298 */
1299struct kgsl_gpuobj_info {
1300 uint64_t gpuaddr;
1301 uint64_t flags;
1302 uint64_t size;
1303 uint64_t va_len;
1304 uint64_t va_addr;
1305 unsigned int id;
1306};
1307
1308#define IOCTL_KGSL_GPUOBJ_INFO \
1309 _IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1310
1311/**
1312 * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1313 * @priv: Pointer to the private data for the import type
1314 * @priv_len: Length of the private data
1315 * @flags: Mask of KGSL_MEMFLAG_ flags
1316 * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1317 * @id: Returns the ID of the new GPU object
1318 */
1319struct kgsl_gpuobj_import {
1320 uint64_t __user priv;
1321 uint64_t priv_len;
1322 uint64_t flags;
1323 unsigned int type;
1324 unsigned int id;
1325};
1326
1327/**
1328 * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1329 * @fd: File descriptor for the dma-buf object
1330 */
1331struct kgsl_gpuobj_import_dma_buf {
1332 int fd;
1333};
1334
1335/**
1336 * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1337 * @virtaddr: Virtual address of the object to import
1338 */
1339struct kgsl_gpuobj_import_useraddr {
1340 uint64_t virtaddr;
1341};
1342
1343#define IOCTL_KGSL_GPUOBJ_IMPORT \
1344 _IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1345
1346/**
1347 * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1348 * @offset: Offset within the GPU object to sync
1349 * @length: Number of bytes to sync
1350 * @id: ID of the GPU object to sync
1351 * @op: Cache operation to execute
1352 */
1353
1354struct kgsl_gpuobj_sync_obj {
1355 uint64_t offset;
1356 uint64_t length;
1357 unsigned int id;
1358 unsigned int op;
1359};
1360
1361/**
1362 * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1363 * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1364 * @obj_len: Size of each item in the array
1365 * @count: Number of items in the array
1366 */
1367
1368struct kgsl_gpuobj_sync {
1369 uint64_t __user objs;
1370 unsigned int obj_len;
1371 unsigned int count;
1372};
1373
1374#define IOCTL_KGSL_GPUOBJ_SYNC \
1375 _IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1376
1377/**
1378 * struct kgsl_command_object - GPU command object
1379 * @offset: GPU address offset of the object
1380 * @gpuaddr: GPU address of the object
1381 * @size: Size of the object
1382 * @flags: Current flags for the object
1383 * @id - GPU command object ID
1384 */
1385struct kgsl_command_object {
1386 uint64_t offset;
1387 uint64_t gpuaddr;
1388 uint64_t size;
1389 unsigned int flags;
1390 unsigned int id;
1391};
1392
1393/**
1394 * struct kgsl_command_syncpoint - GPU syncpoint object
1395 * @priv: Pointer to the type specific buffer
1396 * @size: Size of the type specific buffer
1397 * @type: type of sync point defined here
1398 */
1399struct kgsl_command_syncpoint {
1400 uint64_t __user priv;
1401 uint64_t size;
1402 unsigned int type;
1403};
1404
1405/**
1406 * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1407 * @flags: Current flags for the object
1408 * @cmdlist: List of kgsl_command_objects for submission
1409 * @cmd_size: Size of kgsl_command_objects structure
1410 * @numcmds: Number of kgsl_command_objects in command list
1411 * @objlist: List of kgsl_command_objects for tracking
1412 * @obj_size: Size of kgsl_command_objects structure
1413 * @numobjs: Number of kgsl_command_objects in object list
1414 * @synclist: List of kgsl_command_syncpoints
1415 * @sync_size: Size of kgsl_command_syncpoint structure
1416 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1417 * @context_id: Context ID submittin ghte kgsl_gpu_command
1418 * @timestamp: Timestamp for the submitted commands
1419 */
1420struct kgsl_gpu_command {
1421 uint64_t flags;
1422 uint64_t __user cmdlist;
1423 unsigned int cmdsize;
1424 unsigned int numcmds;
1425 uint64_t __user objlist;
1426 unsigned int objsize;
1427 unsigned int numobjs;
1428 uint64_t __user synclist;
1429 unsigned int syncsize;
1430 unsigned int numsyncs;
1431 unsigned int context_id;
1432 unsigned int timestamp;
1433};
1434
1435#define IOCTL_KGSL_GPU_COMMAND \
1436 _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1437
1438/**
1439 * struct kgsl_preemption_counters_query - argument to
1440 * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1441 * @counters: Return preemption counters array
1442 * @size_user: Size allocated by userspace
1443 * @size_priority_level: Size of preemption counters for each
1444 * priority level
1445 * @max_priority_level: Return max number of priority levels
1446 *
1447 * Query the available preemption counters. The array counters
1448 * is used to return preemption counters. The size of the array
1449 * is passed in so the kernel will only write at most size_user
1450 * or max available preemption counters. The total number of
1451 * preemption counters is returned in max_priority_level. If the
1452 * array or size passed in are invalid, then an error is
1453 * returned back.
1454 */
1455struct kgsl_preemption_counters_query {
1456 uint64_t __user counters;
1457 unsigned int size_user;
1458 unsigned int size_priority_level;
1459 unsigned int max_priority_level;
1460};
1461
1462#define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1463 _IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1464
1465/**
1466 * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1467 * @flags: Flags to indicate which paramaters to change
1468 * @metadata: If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1469 * metadata
1470 * @id: GPU memory object ID to change
1471 * @metadata_len: If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1472 * new metadata string
1473 * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1474 */
1475
1476#define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1477#define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1478
1479struct kgsl_gpuobj_set_info {
1480 uint64_t flags;
1481 uint64_t metadata;
1482 unsigned int id;
1483 unsigned int metadata_len;
1484 unsigned int type;
1485};
1486
1487#define IOCTL_KGSL_GPUOBJ_SET_INFO \
1488 _IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1489
1490/**
1491 * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1492 * @size: Size in bytes to back
1493 * @pagesize: Pagesize alignment required
1494 * @flags: Flags for this allocation
1495 * @id: Returned ID for this allocation
1496 */
1497struct kgsl_sparse_phys_alloc {
1498 uint64_t size;
1499 uint64_t pagesize;
1500 uint64_t flags;
1501 unsigned int id;
1502};
1503
1504#define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1505 _IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1506
1507/**
1508 * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1509 * @id: ID to free
1510 */
1511struct kgsl_sparse_phys_free {
1512 unsigned int id;
1513};
1514
1515#define IOCTL_KGSL_SPARSE_PHYS_FREE \
1516 _IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1517
1518/**
1519 * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1520 * @size: Size in bytes to reserve
1521 * @pagesize: Pagesize alignment required
1522 * @flags: Flags for this allocation
1523 * @id: Returned ID for this allocation
1524 * @gpuaddr: Returned GPU address for this allocation
1525 */
1526struct kgsl_sparse_virt_alloc {
1527 uint64_t size;
1528 uint64_t pagesize;
1529 uint64_t flags;
1530 uint64_t gpuaddr;
1531 unsigned int id;
1532};
1533
1534#define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1535 _IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1536
1537/**
1538 * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1539 * @id: ID to free
1540 */
1541struct kgsl_sparse_virt_free {
1542 unsigned int id;
1543};
1544
1545#define IOCTL_KGSL_SPARSE_VIRT_FREE \
1546 _IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1547
1548/**
1549 * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1550 * @virtoffset: Offset into the virtual ID
1551 * @physoffset: Offset into the physical ID (bind only)
1552 * @size: Size in bytes to reserve
1553 * @flags: Flags for this kgsl_sparse_binding_object
1554 * @id: Physical ID to bind (bind only)
1555 */
1556struct kgsl_sparse_binding_object {
1557 uint64_t virtoffset;
1558 uint64_t physoffset;
1559 uint64_t size;
1560 uint64_t flags;
1561 unsigned int id;
1562};
1563
1564/**
1565 * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1566 * @list: List of kgsl_sparse_bind_objects to bind/unbind
1567 * @id: Virtual ID to bind/unbind
1568 * @size: Size of kgsl_sparse_bind_object
1569 * @count: Number of elements in list
1570 *
1571 */
1572struct kgsl_sparse_bind {
1573 uint64_t __user list;
1574 unsigned int id;
1575 unsigned int size;
1576 unsigned int count;
1577};
1578
1579#define IOCTL_KGSL_SPARSE_BIND \
1580 _IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1581
Tarun Karra2b8b3632016-11-14 16:38:27 -08001582/**
1583 * struct kgsl_gpu_sparse_command - Argument for
1584 * IOCTL_KGSL_GPU_SPARSE_COMMAND
1585 * @flags: Current flags for the object
1586 * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1587 * @synclist: List of kgsl_command_syncpoints
1588 * @sparsesize: Size of kgsl_sparse_binding_object
1589 * @numsparse: Number of elements in list
1590 * @sync_size: Size of kgsl_command_syncpoint structure
1591 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1592 * @context_id: Context ID submitting the kgsl_gpu_command
1593 * @timestamp: Timestamp for the submitted commands
1594 * @id: Virtual ID to bind/unbind
1595 */
1596struct kgsl_gpu_sparse_command {
1597 uint64_t flags;
1598 uint64_t __user sparselist;
1599 uint64_t __user synclist;
1600 unsigned int sparsesize;
1601 unsigned int numsparse;
1602 unsigned int syncsize;
1603 unsigned int numsyncs;
1604 unsigned int context_id;
1605 unsigned int timestamp;
1606 unsigned int id;
1607};
1608
1609#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1610 _IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1611
Shrenuj Bansala419c792016-10-20 14:05:11 -07001612#endif /* _UAPI_MSM_KGSL_H */