blob: 4117117b42042fe10e892f71f251af2b7b55507b [file] [log] [blame]
Eric Anholteeb23de2016-01-22 16:34:14 -08001/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Eric Anholte38de512017-11-08 11:04:31 -080024#ifndef _VC4_DRM_H_
25#define _VC4_DRM_H_
Eric Anholteeb23de2016-01-22 16:34:14 -080026
27#include "drm.h"
28
Eric Anholt2212a642016-07-02 14:18:43 -070029#if defined(__cplusplus)
30extern "C" {
31#endif
32
Eric Anholteeb23de2016-01-22 16:34:14 -080033#define DRM_VC4_SUBMIT_CL 0x00
34#define DRM_VC4_WAIT_SEQNO 0x01
35#define DRM_VC4_WAIT_BO 0x02
36#define DRM_VC4_CREATE_BO 0x03
37#define DRM_VC4_MMAP_BO 0x04
38#define DRM_VC4_CREATE_SHADER_BO 0x05
39#define DRM_VC4_GET_HANG_STATE 0x06
Eric Anholt2212a642016-07-02 14:18:43 -070040#define DRM_VC4_GET_PARAM 0x07
Eric Anholtb9549c92017-06-21 10:23:23 -070041#define DRM_VC4_SET_TILING 0x08
42#define DRM_VC4_GET_TILING 0x09
Eric Anholte38de512017-11-08 11:04:31 -080043#define DRM_VC4_LABEL_BO 0x0a
44#define DRM_VC4_GEM_MADVISE 0x0b
Daniel Stone8e535dd2018-03-30 13:04:30 +010045#define DRM_VC4_PERFMON_CREATE 0x0c
46#define DRM_VC4_PERFMON_DESTROY 0x0d
47#define DRM_VC4_PERFMON_GET_VALUES 0x0e
Eric Anholteeb23de2016-01-22 16:34:14 -080048
49#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
50#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
51#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
52#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
53#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
54#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
55#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
Eric Anholt2212a642016-07-02 14:18:43 -070056#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
Eric Anholtb9549c92017-06-21 10:23:23 -070057#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
58#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
Eric Anholte38de512017-11-08 11:04:31 -080059#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
60#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
Daniel Stone8e535dd2018-03-30 13:04:30 +010061#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
62#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
63#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
Eric Anholteeb23de2016-01-22 16:34:14 -080064
65struct drm_vc4_submit_rcl_surface {
66 __u32 hindex; /* Handle index, or ~0 if not present. */
67 __u32 offset; /* Offset to start of buffer. */
68 /*
69 * Bits for either render config (color_write) or load/store packet.
70 * Bits should all be 0 for MSAA load/stores.
71 */
72 __u16 bits;
73
74#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
75 __u16 flags;
76};
77
78/**
79 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
80 * engine.
81 *
82 * Drivers typically use GPU BOs to store batchbuffers / command lists and
83 * their associated state. However, because the VC4 lacks an MMU, we have to
84 * do validation of memory accesses by the GPU commands. If we were to store
85 * our commands in BOs, we'd need to do uncached readback from them to do the
86 * validation process, which is too expensive. Instead, userspace accumulates
87 * commands and associated state in plain memory, then the kernel copies the
88 * data to its own address space, and then validates and stores it in a GPU
89 * BO.
90 */
91struct drm_vc4_submit_cl {
92 /* Pointer to the binner command list.
93 *
94 * This is the first set of commands executed, which runs the
95 * coordinate shader to determine where primitives land on the screen,
96 * then writes out the state updates and draw calls necessary per tile
97 * to the tile allocation BO.
98 */
99 __u64 bin_cl;
100
101 /* Pointer to the shader records.
102 *
103 * Shader records are the structures read by the hardware that contain
104 * pointers to uniforms, shaders, and vertex attributes. The
105 * reference to the shader record has enough information to determine
106 * how many pointers are necessary (fixed number for shaders/uniforms,
107 * and an attribute count), so those BO indices into bo_handles are
108 * just stored as __u32s before each shader record passed in.
109 */
110 __u64 shader_rec;
111
112 /* Pointer to uniform data and texture handles for the textures
113 * referenced by the shader.
114 *
115 * For each shader state record, there is a set of uniform data in the
116 * order referenced by the record (FS, VS, then CS). Each set of
117 * uniform data has a __u32 index into bo_handles per texture
118 * sample operation, in the order the QPU_W_TMUn_S writes appear in
119 * the program. Following the texture BO handle indices is the actual
120 * uniform data.
121 *
122 * The individual uniform state blocks don't have sizes passed in,
123 * because the kernel has to determine the sizes anyway during shader
124 * code validation.
125 */
126 __u64 uniforms;
127 __u64 bo_handles;
128
129 /* Size in bytes of the binner command list. */
130 __u32 bin_cl_size;
131 /* Size in bytes of the set of shader records. */
132 __u32 shader_rec_size;
133 /* Number of shader records.
134 *
135 * This could just be computed from the contents of shader_records and
136 * the address bits of references to them from the bin CL, but it
137 * keeps the kernel from having to resize some allocations it makes.
138 */
139 __u32 shader_rec_count;
140 /* Size in bytes of the uniform state. */
141 __u32 uniforms_size;
142
143 /* Number of BO handles passed in (size is that times 4). */
144 __u32 bo_handle_count;
145
146 /* RCL setup: */
147 __u16 width;
148 __u16 height;
149 __u8 min_x_tile;
150 __u8 min_y_tile;
151 __u8 max_x_tile;
152 __u8 max_y_tile;
153 struct drm_vc4_submit_rcl_surface color_read;
154 struct drm_vc4_submit_rcl_surface color_write;
155 struct drm_vc4_submit_rcl_surface zs_read;
156 struct drm_vc4_submit_rcl_surface zs_write;
157 struct drm_vc4_submit_rcl_surface msaa_color_write;
158 struct drm_vc4_submit_rcl_surface msaa_zs_write;
159 __u32 clear_color[2];
160 __u32 clear_z;
161 __u8 clear_s;
162
163 __u32 pad:24;
164
165#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
Eric Anholte38de512017-11-08 11:04:31 -0800166/* By default, the kernel gets to choose the order that the tiles are
167 * rendered in. If this is set, then the tiles will be rendered in a
168 * raster order, with the right-to-left vs left-to-right and
169 * top-to-bottom vs bottom-to-top dictated by
170 * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
171 * blits to be implemented using the 3D engine.
172 */
173#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
174#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
175#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
Eric Anholteeb23de2016-01-22 16:34:14 -0800176 __u32 flags;
177
178 /* Returned value of the seqno of this render job (for the
179 * wait ioctl).
180 */
181 __u64 seqno;
Daniel Stone8e535dd2018-03-30 13:04:30 +0100182
183 /* ID of the perfmon to attach to this job. 0 means no perfmon. */
184 __u32 perfmonid;
185
186 /* Unused field to align this struct on 64 bits. Must be set to 0.
187 * If one ever needs to add an u32 field to this struct, this field
188 * can be used.
189 */
190 __u32 pad2;
Eric Anholteeb23de2016-01-22 16:34:14 -0800191};
192
193/**
194 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
195 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
196 *
197 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
198 * block, just return the status."
199 */
200struct drm_vc4_wait_seqno {
201 __u64 seqno;
202 __u64 timeout_ns;
203};
204
205/**
206 * struct drm_vc4_wait_bo - ioctl argument for waiting for
207 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
208 *
209 * This is useful for cases where multiple processes might be
210 * rendering to a BO and you want to wait for all rendering to be
211 * completed.
212 */
213struct drm_vc4_wait_bo {
214 __u32 handle;
215 __u32 pad;
216 __u64 timeout_ns;
217};
218
219/**
220 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
221 *
222 * There are currently no values for the flags argument, but it may be
223 * used in a future extension.
224 */
225struct drm_vc4_create_bo {
226 __u32 size;
227 __u32 flags;
228 /** Returned GEM handle for the BO. */
229 __u32 handle;
230 __u32 pad;
231};
232
233/**
234 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
235 *
236 * This doesn't actually perform an mmap. Instead, it returns the
237 * offset you need to use in an mmap on the DRM device node. This
238 * means that tools like valgrind end up knowing about the mapped
239 * memory.
240 *
241 * There are currently no values for the flags argument, but it may be
242 * used in a future extension.
243 */
244struct drm_vc4_mmap_bo {
245 /** Handle for the object being mapped. */
246 __u32 handle;
247 __u32 flags;
248 /** offset into the drm node to use for subsequent mmap call. */
249 __u64 offset;
250};
251
252/**
253 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
254 * shader BOs.
255 *
256 * Since allowing a shader to be overwritten while it's also being
257 * executed from would allow privlege escalation, shaders must be
258 * created using this ioctl, and they can't be mmapped later.
259 */
260struct drm_vc4_create_shader_bo {
261 /* Size of the data argument. */
262 __u32 size;
263 /* Flags, currently must be 0. */
264 __u32 flags;
265
266 /* Pointer to the data. */
267 __u64 data;
268
269 /** Returned GEM handle for the BO. */
270 __u32 handle;
271 /* Pad, must be 0. */
272 __u32 pad;
273};
274
275struct drm_vc4_get_hang_state_bo {
276 __u32 handle;
277 __u32 paddr;
278 __u32 size;
279 __u32 pad;
280};
281
282/**
283 * struct drm_vc4_hang_state - ioctl argument for collecting state
284 * from a GPU hang for analysis.
285*/
286struct drm_vc4_get_hang_state {
287 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
288 __u64 bo;
289 /**
290 * On input, the size of the bo array. Output is the number
291 * of bos to be returned.
292 */
293 __u32 bo_count;
294
295 __u32 start_bin, start_render;
296
297 __u32 ct0ca, ct0ea;
298 __u32 ct1ca, ct1ea;
299 __u32 ct0cs, ct1cs;
300 __u32 ct0ra0, ct1ra0;
301
302 __u32 bpca, bpcs;
303 __u32 bpoa, bpos;
304
305 __u32 vpmbase;
306
307 __u32 dbge;
308 __u32 fdbgo;
309 __u32 fdbgb;
310 __u32 fdbgr;
311 __u32 fdbgs;
312 __u32 errstat;
313
314 /* Pad that we may save more registers into in the future. */
315 __u32 pad[16];
316};
317
Eric Anholt2212a642016-07-02 14:18:43 -0700318#define DRM_VC4_PARAM_V3D_IDENT0 0
319#define DRM_VC4_PARAM_V3D_IDENT1 1
320#define DRM_VC4_PARAM_V3D_IDENT2 2
321#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
Eric Anholta8315832016-11-16 17:06:10 -0800322#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
323#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
Eric Anholte38de512017-11-08 11:04:31 -0800324#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
325#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
Daniel Stone8e535dd2018-03-30 13:04:30 +0100326#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
Eric Anholt2212a642016-07-02 14:18:43 -0700327
328struct drm_vc4_get_param {
329 __u32 param;
330 __u32 pad;
331 __u64 value;
332};
333
Eric Anholtb9549c92017-06-21 10:23:23 -0700334struct drm_vc4_get_tiling {
335 __u32 handle;
336 __u32 flags;
337 __u64 modifier;
338};
339
340struct drm_vc4_set_tiling {
341 __u32 handle;
342 __u32 flags;
343 __u64 modifier;
344};
345
Eric Anholte38de512017-11-08 11:04:31 -0800346/**
347 * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
348 */
349struct drm_vc4_label_bo {
350 __u32 handle;
351 __u32 len;
352 __u64 name;
353};
354
355/*
356 * States prefixed with '__' are internal states and cannot be passed to the
357 * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
358 */
359#define VC4_MADV_WILLNEED 0
360#define VC4_MADV_DONTNEED 1
361#define __VC4_MADV_PURGED 2
362#define __VC4_MADV_NOTSUPP 3
363
364struct drm_vc4_gem_madvise {
365 __u32 handle;
366 __u32 madv;
367 __u32 retained;
368 __u32 pad;
369};
370
Daniel Stone8e535dd2018-03-30 13:04:30 +0100371enum {
372 VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
373 VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
374 VC4_PERFCNT_FEP_CLIPPED_QUADS,
375 VC4_PERFCNT_FEP_VALID_QUADS,
376 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
377 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
378 VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
379 VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
380 VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
381 VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
382 VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
383 VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
384 VC4_PERFCNT_PSE_PRIMS_REVERSED,
385 VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
386 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
387 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
388 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
389 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
390 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
391 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
392 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
393 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
394 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
395 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
396 VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
397 VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
398 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
399 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
400 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
401 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
402 VC4_PERFCNT_NUM_EVENTS,
403};
404
405#define DRM_VC4_MAX_PERF_COUNTERS 16
406
407struct drm_vc4_perfmon_create {
408 __u32 id;
409 __u32 ncounters;
410 __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
411};
412
413struct drm_vc4_perfmon_destroy {
414 __u32 id;
415};
416
417/*
418 * Returns the values of the performance counters tracked by this
419 * perfmon (as an array of ncounters u64 values).
420 *
421 * No implicit synchronization is performed, so the user has to
422 * guarantee that any jobs using this perfmon have already been
423 * completed (probably by blocking on the seqno returned by the
424 * last exec that used the perfmon).
425 */
426struct drm_vc4_perfmon_get_values {
427 __u32 id;
428 __u64 values_ptr;
429};
430
Eric Anholt2212a642016-07-02 14:18:43 -0700431#if defined(__cplusplus)
432}
433#endif
434
Eric Anholte38de512017-11-08 11:04:31 -0800435#endif /* _VC4_DRM_H_ */