blob: afae870049636e9c5db9a10a03d40dc3beb4c6ee [file] [log] [blame]
Christopher Ferrisccfaccd2016-08-24 12:11:31 -07001/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef _UAPI_VC4_DRM_H_
25#define _UAPI_VC4_DRM_H_
26
27#include "drm.h"
28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
33#define DRM_VC4_SUBMIT_CL 0x00
34#define DRM_VC4_WAIT_SEQNO 0x01
35#define DRM_VC4_WAIT_BO 0x02
36#define DRM_VC4_CREATE_BO 0x03
37#define DRM_VC4_MMAP_BO 0x04
38#define DRM_VC4_CREATE_SHADER_BO 0x05
39#define DRM_VC4_GET_HANG_STATE 0x06
Christopher Ferris6e3550f2016-12-12 14:51:18 -080040#define DRM_VC4_GET_PARAM 0x07
Christopher Ferris25981132017-11-14 16:53:49 -080041#define DRM_VC4_SET_TILING 0x08
42#define DRM_VC4_GET_TILING 0x09
43#define DRM_VC4_LABEL_BO 0x0a
Christopher Ferrisccfaccd2016-08-24 12:11:31 -070044
45#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
46#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
47#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
48#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
49#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
50#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
51#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
Christopher Ferris6e3550f2016-12-12 14:51:18 -080052#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
Christopher Ferris25981132017-11-14 16:53:49 -080053#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
54#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
55#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
Christopher Ferrisccfaccd2016-08-24 12:11:31 -070056
57struct drm_vc4_submit_rcl_surface {
58 __u32 hindex; /* Handle index, or ~0 if not present. */
59 __u32 offset; /* Offset to start of buffer. */
60 /*
61 * Bits for either render config (color_write) or load/store packet.
62 * Bits should all be 0 for MSAA load/stores.
63 */
64 __u16 bits;
65
66#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
67 __u16 flags;
68};
69
70/**
71 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
72 * engine.
73 *
74 * Drivers typically use GPU BOs to store batchbuffers / command lists and
75 * their associated state. However, because the VC4 lacks an MMU, we have to
76 * do validation of memory accesses by the GPU commands. If we were to store
77 * our commands in BOs, we'd need to do uncached readback from them to do the
78 * validation process, which is too expensive. Instead, userspace accumulates
79 * commands and associated state in plain memory, then the kernel copies the
80 * data to its own address space, and then validates and stores it in a GPU
81 * BO.
82 */
83struct drm_vc4_submit_cl {
84 /* Pointer to the binner command list.
85 *
86 * This is the first set of commands executed, which runs the
87 * coordinate shader to determine where primitives land on the screen,
88 * then writes out the state updates and draw calls necessary per tile
89 * to the tile allocation BO.
90 */
91 __u64 bin_cl;
92
93 /* Pointer to the shader records.
94 *
95 * Shader records are the structures read by the hardware that contain
96 * pointers to uniforms, shaders, and vertex attributes. The
97 * reference to the shader record has enough information to determine
98 * how many pointers are necessary (fixed number for shaders/uniforms,
99 * and an attribute count), so those BO indices into bo_handles are
100 * just stored as __u32s before each shader record passed in.
101 */
102 __u64 shader_rec;
103
104 /* Pointer to uniform data and texture handles for the textures
105 * referenced by the shader.
106 *
107 * For each shader state record, there is a set of uniform data in the
108 * order referenced by the record (FS, VS, then CS). Each set of
109 * uniform data has a __u32 index into bo_handles per texture
110 * sample operation, in the order the QPU_W_TMUn_S writes appear in
111 * the program. Following the texture BO handle indices is the actual
112 * uniform data.
113 *
114 * The individual uniform state blocks don't have sizes passed in,
115 * because the kernel has to determine the sizes anyway during shader
116 * code validation.
117 */
118 __u64 uniforms;
119 __u64 bo_handles;
120
121 /* Size in bytes of the binner command list. */
122 __u32 bin_cl_size;
123 /* Size in bytes of the set of shader records. */
124 __u32 shader_rec_size;
125 /* Number of shader records.
126 *
127 * This could just be computed from the contents of shader_records and
128 * the address bits of references to them from the bin CL, but it
129 * keeps the kernel from having to resize some allocations it makes.
130 */
131 __u32 shader_rec_count;
132 /* Size in bytes of the uniform state. */
133 __u32 uniforms_size;
134
135 /* Number of BO handles passed in (size is that times 4). */
136 __u32 bo_handle_count;
137
138 /* RCL setup: */
139 __u16 width;
140 __u16 height;
141 __u8 min_x_tile;
142 __u8 min_y_tile;
143 __u8 max_x_tile;
144 __u8 max_y_tile;
145 struct drm_vc4_submit_rcl_surface color_read;
146 struct drm_vc4_submit_rcl_surface color_write;
147 struct drm_vc4_submit_rcl_surface zs_read;
148 struct drm_vc4_submit_rcl_surface zs_write;
149 struct drm_vc4_submit_rcl_surface msaa_color_write;
150 struct drm_vc4_submit_rcl_surface msaa_zs_write;
151 __u32 clear_color[2];
152 __u32 clear_z;
153 __u8 clear_s;
154
155 __u32 pad:24;
156
157#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
Christopher Ferris25981132017-11-14 16:53:49 -0800158/* By default, the kernel gets to choose the order that the tiles are
159 * rendered in. If this is set, then the tiles will be rendered in a
160 * raster order, with the right-to-left vs left-to-right and
161 * top-to-bottom vs bottom-to-top dictated by
162 * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
163 * blits to be implemented using the 3D engine.
164 */
165#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
166#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
167#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
Christopher Ferrisccfaccd2016-08-24 12:11:31 -0700168 __u32 flags;
169
170 /* Returned value of the seqno of this render job (for the
171 * wait ioctl).
172 */
173 __u64 seqno;
174};
175
176/**
177 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
178 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
179 *
180 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
181 * block, just return the status."
182 */
183struct drm_vc4_wait_seqno {
184 __u64 seqno;
185 __u64 timeout_ns;
186};
187
188/**
189 * struct drm_vc4_wait_bo - ioctl argument for waiting for
190 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
191 *
192 * This is useful for cases where multiple processes might be
193 * rendering to a BO and you want to wait for all rendering to be
194 * completed.
195 */
196struct drm_vc4_wait_bo {
197 __u32 handle;
198 __u32 pad;
199 __u64 timeout_ns;
200};
201
202/**
203 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
204 *
205 * There are currently no values for the flags argument, but it may be
206 * used in a future extension.
207 */
208struct drm_vc4_create_bo {
209 __u32 size;
210 __u32 flags;
211 /** Returned GEM handle for the BO. */
212 __u32 handle;
213 __u32 pad;
214};
215
216/**
217 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
218 *
219 * This doesn't actually perform an mmap. Instead, it returns the
220 * offset you need to use in an mmap on the DRM device node. This
221 * means that tools like valgrind end up knowing about the mapped
222 * memory.
223 *
224 * There are currently no values for the flags argument, but it may be
225 * used in a future extension.
226 */
227struct drm_vc4_mmap_bo {
228 /** Handle for the object being mapped. */
229 __u32 handle;
230 __u32 flags;
231 /** offset into the drm node to use for subsequent mmap call. */
232 __u64 offset;
233};
234
235/**
236 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
237 * shader BOs.
238 *
239 * Since allowing a shader to be overwritten while it's also being
240 * executed from would allow privlege escalation, shaders must be
241 * created using this ioctl, and they can't be mmapped later.
242 */
243struct drm_vc4_create_shader_bo {
244 /* Size of the data argument. */
245 __u32 size;
246 /* Flags, currently must be 0. */
247 __u32 flags;
248
249 /* Pointer to the data. */
250 __u64 data;
251
252 /** Returned GEM handle for the BO. */
253 __u32 handle;
254 /* Pad, must be 0. */
255 __u32 pad;
256};
257
258struct drm_vc4_get_hang_state_bo {
259 __u32 handle;
260 __u32 paddr;
261 __u32 size;
262 __u32 pad;
263};
264
265/**
266 * struct drm_vc4_hang_state - ioctl argument for collecting state
267 * from a GPU hang for analysis.
268*/
269struct drm_vc4_get_hang_state {
270 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
271 __u64 bo;
272 /**
273 * On input, the size of the bo array. Output is the number
274 * of bos to be returned.
275 */
276 __u32 bo_count;
277
278 __u32 start_bin, start_render;
279
280 __u32 ct0ca, ct0ea;
281 __u32 ct1ca, ct1ea;
282 __u32 ct0cs, ct1cs;
283 __u32 ct0ra0, ct1ra0;
284
285 __u32 bpca, bpcs;
286 __u32 bpoa, bpos;
287
288 __u32 vpmbase;
289
290 __u32 dbge;
291 __u32 fdbgo;
292 __u32 fdbgb;
293 __u32 fdbgr;
294 __u32 fdbgs;
295 __u32 errstat;
296
297 /* Pad that we may save more registers into in the future. */
298 __u32 pad[16];
299};
300
Christopher Ferris6e3550f2016-12-12 14:51:18 -0800301#define DRM_VC4_PARAM_V3D_IDENT0 0
302#define DRM_VC4_PARAM_V3D_IDENT1 1
303#define DRM_VC4_PARAM_V3D_IDENT2 2
304#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
Christopher Ferris2fd4b3c2017-02-21 12:32:08 -0800305#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
306#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
Christopher Ferris25981132017-11-14 16:53:49 -0800307#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
Christopher Ferris6e3550f2016-12-12 14:51:18 -0800308
309struct drm_vc4_get_param {
310 __u32 param;
311 __u32 pad;
312 __u64 value;
313};
314
Christopher Ferris25981132017-11-14 16:53:49 -0800315struct drm_vc4_get_tiling {
316 __u32 handle;
317 __u32 flags;
318 __u64 modifier;
319};
320
321struct drm_vc4_set_tiling {
322 __u32 handle;
323 __u32 flags;
324 __u64 modifier;
325};
326
327/**
328 * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
329 */
330struct drm_vc4_label_bo {
331 __u32 handle;
332 __u32 len;
333 __u64 name;
334};
335
Christopher Ferrisccfaccd2016-08-24 12:11:31 -0700336#if defined(__cplusplus)
337}
338#endif
339
340#endif /* _UAPI_VC4_DRM_H_ */