Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 1 | #ifndef INTEL_BATCHBUFFER_H |
| 2 | #define INTEL_BATCHBUFFER_H |
| 3 | |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 4 | #include <stdint.h> |
Daniel Vetter | 924115b | 2014-03-22 20:18:51 +0100 | [diff] [blame] | 5 | #include <intel_bufmgr.h> |
Damien Lespiau | 672e88a | 2015-03-03 14:11:00 +0000 | [diff] [blame] | 6 | #include <i915_drm.h> |
| 7 | |
Daniel Vetter | 0e22f14 | 2014-03-22 19:27:04 +0100 | [diff] [blame] | 8 | #include "igt_core.h" |
Daniel Vetter | 6cfcd71 | 2014-03-22 20:07:35 +0100 | [diff] [blame] | 9 | #include "intel_reg.h" |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 10 | |
| 11 | #define BATCH_SZ 4096 |
| 12 | #define BATCH_RESERVED 16 |
| 13 | |
Chris Wilson | 719ffef | 2011-05-22 10:34:12 +0100 | [diff] [blame] | 14 | struct intel_batchbuffer { |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 15 | drm_intel_bufmgr *bufmgr; |
Chris Wilson | d4d769a | 2010-10-26 10:59:18 +0100 | [diff] [blame] | 16 | uint32_t devid; |
Chris Wilson | 23d961e | 2014-08-29 14:49:59 +0100 | [diff] [blame] | 17 | int gen; |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 18 | |
Chris Wilson | 107151c | 2014-09-09 16:27:57 +0100 | [diff] [blame] | 19 | drm_intel_context *ctx; |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 20 | drm_intel_bo *bo; |
| 21 | |
Chris Wilson | 371f87f | 2011-02-01 10:53:57 +0000 | [diff] [blame] | 22 | uint8_t buffer[BATCH_SZ]; |
Chris Wilson | 982f7eb | 2014-08-29 15:19:57 +0100 | [diff] [blame] | 23 | uint8_t *ptr, *end; |
Chris Wilson | 1945e2a | 2012-12-06 17:18:52 +0000 | [diff] [blame] | 24 | uint8_t *state; |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 25 | }; |
| 26 | |
Chris Wilson | d4d769a | 2010-10-26 10:59:18 +0100 | [diff] [blame] | 27 | struct intel_batchbuffer *intel_batchbuffer_alloc(drm_intel_bufmgr *bufmgr, |
| 28 | uint32_t devid); |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 29 | |
Chris Wilson | 107151c | 2014-09-09 16:27:57 +0100 | [diff] [blame] | 30 | void intel_batchbuffer_set_context(struct intel_batchbuffer *batch, |
| 31 | drm_intel_context *ctx); |
| 32 | |
| 33 | |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 34 | void intel_batchbuffer_free(struct intel_batchbuffer *batch); |
| 35 | |
| 36 | |
| 37 | void intel_batchbuffer_flush(struct intel_batchbuffer *batch); |
Daniel Vetter | d42b7f9 | 2011-09-07 09:20:36 +0200 | [diff] [blame] | 38 | void intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring); |
Ben Widawsky | a635a5a | 2012-01-15 14:52:33 -0800 | [diff] [blame] | 39 | void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch, |
| 40 | drm_intel_context *context); |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 41 | |
| 42 | void intel_batchbuffer_reset(struct intel_batchbuffer *batch); |
| 43 | |
| 44 | void intel_batchbuffer_data(struct intel_batchbuffer *batch, |
| 45 | const void *data, unsigned int bytes); |
| 46 | |
| 47 | void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, |
| 48 | drm_intel_bo *buffer, |
Chris Wilson | 982f7eb | 2014-08-29 15:19:57 +0100 | [diff] [blame] | 49 | uint64_t delta, |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 50 | uint32_t read_domains, |
Daniel Vetter | 8ab88c9 | 2011-03-27 16:17:54 +0200 | [diff] [blame] | 51 | uint32_t write_domain, |
| 52 | int fenced); |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 53 | |
| 54 | /* Inline functions - might actually be better off with these |
| 55 | * non-inlined. Certainly better off switching all command packets to |
| 56 | * be passed as structs rather than dwords, but that's a little bit of |
| 57 | * work... |
| 58 | */ |
Ben Widawsky | 802bd74 | 2012-01-15 13:41:42 -0800 | [diff] [blame] | 59 | #pragma GCC diagnostic ignored "-Winline" |
Oscar Mateo | 5032e7b | 2013-11-12 11:50:42 +0000 | [diff] [blame] | 60 | static inline unsigned int |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 61 | intel_batchbuffer_space(struct intel_batchbuffer *batch) |
| 62 | { |
Chris Wilson | 371f87f | 2011-02-01 10:53:57 +0000 | [diff] [blame] | 63 | return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - batch->buffer); |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | |
| 67 | static inline void |
| 68 | intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, uint32_t dword) |
| 69 | { |
Daniel Vetter | 0e22f14 | 2014-03-22 19:27:04 +0100 | [diff] [blame] | 70 | igt_assert(intel_batchbuffer_space(batch) >= 4); |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 71 | *(uint32_t *) (batch->ptr) = dword; |
| 72 | batch->ptr += 4; |
| 73 | } |
| 74 | |
| 75 | static inline void |
| 76 | intel_batchbuffer_require_space(struct intel_batchbuffer *batch, |
| 77 | unsigned int sz) |
| 78 | { |
Daniel Vetter | 0e22f14 | 2014-03-22 19:27:04 +0100 | [diff] [blame] | 79 | igt_assert(sz < BATCH_SZ - BATCH_RESERVED); |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 80 | if (intel_batchbuffer_space(batch) < sz) |
| 81 | intel_batchbuffer_flush(batch); |
| 82 | } |
| 83 | |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 84 | /** |
| 85 | * BEGIN_BATCH: |
| 86 | * @n: number of DWORDS to emit |
Chris Wilson | 10552b5 | 2014-08-30 11:44:51 +0100 | [diff] [blame] | 87 | * @r: number of RELOCS to emit |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 88 | * |
| 89 | * Prepares a batch to emit @n DWORDS, flushing it if there's not enough space |
| 90 | * available. |
| 91 | * |
| 92 | * This macro needs a pointer to an #intel_batchbuffer structure called batch in |
| 93 | * scope. |
| 94 | */ |
Chris Wilson | 10552b5 | 2014-08-30 11:44:51 +0100 | [diff] [blame] | 95 | #define BEGIN_BATCH(n, r) do { \ |
| 96 | int __n = (n); \ |
Chris Wilson | 982f7eb | 2014-08-29 15:19:57 +0100 | [diff] [blame] | 97 | igt_assert(batch->end == NULL); \ |
Chris Wilson | 10552b5 | 2014-08-30 11:44:51 +0100 | [diff] [blame] | 98 | if (batch->gen >= 8) __n += r; \ |
| 99 | __n *= 4; \ |
| 100 | intel_batchbuffer_require_space(batch, __n); \ |
| 101 | batch->end = batch->ptr + __n; \ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 102 | } while (0) |
| 103 | |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 104 | /** |
| 105 | * OUT_BATCH: |
| 106 | * @d: DWORD to emit |
| 107 | * |
| 108 | * Emits @d into a batch. |
| 109 | * |
| 110 | * This macro needs a pointer to an #intel_batchbuffer structure called batch in |
| 111 | * scope. |
| 112 | */ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 113 | #define OUT_BATCH(d) intel_batchbuffer_emit_dword(batch, d) |
| 114 | |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 115 | /** |
| 116 | * OUT_RELOC_FENCED: |
| 117 | * @buf: relocation target libdrm buffer object |
| 118 | * @read_domains: gem domain bits for the relocation |
| 119 | * @write_domain: gem domain bit for the relocation |
| 120 | * @delta: delta value to add to @buffer's gpu address |
| 121 | * |
| 122 | * Emits a fenced relocation into a batch. |
| 123 | * |
| 124 | * This macro needs a pointer to an #intel_batchbuffer structure called batch in |
| 125 | * scope. |
| 126 | */ |
Daniel Vetter | 8ab88c9 | 2011-03-27 16:17:54 +0200 | [diff] [blame] | 127 | #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \ |
Daniel Vetter | 0e22f14 | 2014-03-22 19:27:04 +0100 | [diff] [blame] | 128 | igt_assert((delta) >= 0); \ |
Daniel Vetter | 8ab88c9 | 2011-03-27 16:17:54 +0200 | [diff] [blame] | 129 | intel_batchbuffer_emit_reloc(batch, buf, delta, \ |
| 130 | read_domains, write_domain, 1); \ |
| 131 | } while (0) |
| 132 | |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 133 | /** |
Mika Kuoppala | db25973 | 2014-03-26 17:24:43 +0200 | [diff] [blame] | 134 | * OUT_RELOC: |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 135 | * @buf: relocation target libdrm buffer object |
| 136 | * @read_domains: gem domain bits for the relocation |
| 137 | * @write_domain: gem domain bit for the relocation |
| 138 | * @delta: delta value to add to @buffer's gpu address |
| 139 | * |
| 140 | * Emits a normal, unfenced relocation into a batch. |
| 141 | * |
| 142 | * This macro needs a pointer to an #intel_batchbuffer structure called batch in |
| 143 | * scope. |
| 144 | */ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 145 | #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ |
Daniel Vetter | 0e22f14 | 2014-03-22 19:27:04 +0100 | [diff] [blame] | 146 | igt_assert((delta) >= 0); \ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 147 | intel_batchbuffer_emit_reloc(batch, buf, delta, \ |
Daniel Vetter | 8ab88c9 | 2011-03-27 16:17:54 +0200 | [diff] [blame] | 148 | read_domains, write_domain, 0); \ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 149 | } while (0) |
| 150 | |
Daniel Vetter | ec5f9e8 | 2014-03-13 01:13:28 +0100 | [diff] [blame] | 151 | /** |
| 152 | * ADVANCE_BATCH: |
| 153 | * |
| 154 | * Completes the batch command emission sequence started with #BEGIN_BATCH. |
| 155 | * |
| 156 | * This macro needs a pointer to an #intel_batchbuffer structure called batch in |
| 157 | * scope. |
| 158 | */ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 159 | #define ADVANCE_BATCH() do { \ |
Chris Wilson | 982f7eb | 2014-08-29 15:19:57 +0100 | [diff] [blame] | 160 | igt_assert(batch->ptr == batch->end); \ |
| 161 | batch->end = NULL; \ |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 162 | } while(0) |
| 163 | |
Chris Wilson | 10552b5 | 2014-08-30 11:44:51 +0100 | [diff] [blame] | 164 | #define BLIT_COPY_BATCH_START(flags) do { \ |
| 165 | BEGIN_BATCH(8, 2); \ |
| 166 | OUT_BATCH(XY_SRC_COPY_BLT_CMD | \ |
| 167 | XY_SRC_COPY_BLT_WRITE_ALPHA | \ |
| 168 | XY_SRC_COPY_BLT_WRITE_RGB | \ |
| 169 | (flags) | \ |
Chris Wilson | d6af004 | 2014-08-30 14:48:36 +0100 | [diff] [blame] | 170 | (6 + 2*(batch->gen >= 8))); \ |
Ben Widawsky | f4dfa37 | 2013-10-08 15:02:07 -0700 | [diff] [blame] | 171 | } while(0) |
| 172 | |
Chris Wilson | 10552b5 | 2014-08-30 11:44:51 +0100 | [diff] [blame] | 173 | #define COLOR_BLIT_COPY_BATCH_START(flags) do { \ |
| 174 | BEGIN_BATCH(6, 1); \ |
| 175 | OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | \ |
| 176 | COLOR_BLT_WRITE_ALPHA | \ |
| 177 | XY_COLOR_BLT_WRITE_RGB | \ |
Daniel Vetter | 9bb2ca3 | 2015-03-25 21:15:34 +0100 | [diff] [blame] | 178 | (flags) | \ |
Chris Wilson | 10552b5 | 2014-08-30 11:44:51 +0100 | [diff] [blame] | 179 | (4 + (batch->gen >= 8))); \ |
Ben Widawsky | 672911d | 2013-12-05 14:14:35 -0800 | [diff] [blame] | 180 | } while(0) |
| 181 | |
Imre Deak | c1ee0bb | 2013-07-29 16:43:31 +0300 | [diff] [blame] | 182 | void |
| 183 | intel_blt_copy(struct intel_batchbuffer *batch, |
| 184 | drm_intel_bo *src_bo, int src_x1, int src_y1, int src_pitch, |
| 185 | drm_intel_bo *dst_bo, int dst_x1, int dst_y1, int dst_pitch, |
| 186 | int width, int height, int bpp); |
Chris Wilson | 9537422 | 2010-04-08 11:56:57 +0100 | [diff] [blame] | 187 | void intel_copy_bo(struct intel_batchbuffer *batch, |
| 188 | drm_intel_bo *dst_bo, drm_intel_bo *src_bo, |
Daniel Vetter | eaccd44 | 2014-03-13 03:35:02 +0100 | [diff] [blame] | 189 | long int size); |
Chris Wilson | 9537422 | 2010-04-08 11:56:57 +0100 | [diff] [blame] | 190 | |
Thomas Wood | 3b8e121 | 2015-03-12 17:01:57 +0000 | [diff] [blame] | 191 | /* |
Damien Lespiau | cbd927c | 2015-03-03 14:10:57 +0000 | [diff] [blame] | 192 | * Yf/Ys tiling |
| 193 | * |
| 194 | * Tiling mode in the I915_TILING_... namespace for new tiling modes which are |
| 195 | * defined in the kernel. (They are not fenceable so the kernel does not need |
| 196 | * to know about them.) |
| 197 | * |
| 198 | * They are to be used the the blitting routines below. |
| 199 | */ |
| 200 | #define I915_TILING_Yf 3 |
| 201 | #define I915_TILING_Ys 4 |
| 202 | |
| 203 | /** |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 204 | * igt_buf: |
| 205 | * @bo: underlying libdrm buffer object |
| 206 | * @stride: stride of the buffer |
| 207 | * @tiling: tiling mode bits |
| 208 | * @data: pointer to the memory mapping of the buffer |
| 209 | * @size: size of the buffer object |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 210 | * |
| 211 | * This is a i-g-t buffer object wrapper structure which augments the baseline |
| 212 | * libdrm buffer object with suitable data needed by the render copy and the |
Zhenyu Wang | 106f0bf | 2014-12-03 18:56:39 +0800 | [diff] [blame] | 213 | * fill functions. |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 214 | */ |
Daniel Vetter | 83a4c7d | 2014-03-22 15:44:48 +0100 | [diff] [blame] | 215 | struct igt_buf { |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 216 | drm_intel_bo *bo; |
| 217 | uint32_t stride; |
| 218 | uint32_t tiling; |
| 219 | uint32_t *data; |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 220 | uint32_t size; |
Thomas Wood | 52a3a2e | 2014-06-09 17:23:21 +0100 | [diff] [blame] | 221 | /*< private >*/ |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 222 | unsigned num_tiles; |
| 223 | }; |
| 224 | |
Daniel Vetter | 83a4c7d | 2014-03-22 15:44:48 +0100 | [diff] [blame] | 225 | unsigned igt_buf_width(struct igt_buf *buf); |
| 226 | unsigned igt_buf_height(struct igt_buf *buf); |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 227 | |
Damien Lespiau | cbd927c | 2015-03-03 14:10:57 +0000 | [diff] [blame] | 228 | void igt_blitter_fast_copy(struct intel_batchbuffer *batch, |
| 229 | struct igt_buf *src, unsigned src_x, unsigned src_y, |
| 230 | unsigned width, unsigned height, |
| 231 | struct igt_buf *dst, unsigned dst_x, unsigned dst_y); |
| 232 | |
Damien Lespiau | 672e88a | 2015-03-03 14:11:00 +0000 | [diff] [blame] | 233 | void igt_blitter_fast_copy__raw(int fd, |
| 234 | /* src */ |
| 235 | uint32_t src_handle, |
| 236 | unsigned int src_stride, |
| 237 | unsigned int src_tiling, |
| 238 | unsigned int src_x, unsigned src_y, |
| 239 | |
| 240 | /* size */ |
| 241 | unsigned int width, unsigned int height, |
| 242 | |
| 243 | /* dst */ |
| 244 | uint32_t dst_handle, |
| 245 | unsigned int dst_stride, |
| 246 | unsigned int dst_tiling, |
| 247 | unsigned int dst_x, unsigned dst_y); |
| 248 | |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 249 | /** |
| 250 | * igt_render_copyfunc_t: |
| 251 | * @batch: batchbuffer object |
| 252 | * @context: libdrm hardware context to use |
| 253 | * @src: source i-g-t buffer object |
| 254 | * @src_x: source pixel x-coordination |
| 255 | * @src_y: source pixel y-coordination |
| 256 | * @width: width of the copied rectangle |
| 257 | * @height: height of the copied rectangle |
| 258 | * @dst: destination i-g-t buffer object |
| 259 | * @dst_x: destination pixel x-coordination |
| 260 | * @dst_y: destination pixel y-coordination |
| 261 | * |
| 262 | * This is the type of the per-platform render copy functions. The |
| 263 | * platform-specific implementation can be obtained by calling |
| 264 | * igt_get_render_copyfunc(). |
| 265 | * |
| 266 | * A render copy function will emit a batchbuffer to the kernel which executes |
| 267 | * the specified blit copy operation using the render engine. @context is |
| 268 | * optional and can be NULL. |
| 269 | */ |
Daniel Vetter | 53a4d9e | 2014-03-22 15:49:02 +0100 | [diff] [blame] | 270 | typedef void (*igt_render_copyfunc_t)(struct intel_batchbuffer *batch, |
| 271 | drm_intel_context *context, |
| 272 | struct igt_buf *src, unsigned src_x, unsigned src_y, |
| 273 | unsigned width, unsigned height, |
| 274 | struct igt_buf *dst, unsigned dst_x, unsigned dst_y); |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 275 | |
Daniel Vetter | 53a4d9e | 2014-03-22 15:49:02 +0100 | [diff] [blame] | 276 | igt_render_copyfunc_t igt_get_render_copyfunc(int devid); |
Daniel Vetter | 7dc0001 | 2014-03-22 15:31:15 +0100 | [diff] [blame] | 277 | |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 278 | /** |
Zhenyu Wang | 106f0bf | 2014-12-03 18:56:39 +0800 | [diff] [blame] | 279 | * igt_fillfunc_t: |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 280 | * @batch: batchbuffer object |
| 281 | * @dst: destination i-g-t buffer object |
| 282 | * @x: destination pixel x-coordination |
| 283 | * @y: destination pixel y-coordination |
| 284 | * @width: width of the filled rectangle |
| 285 | * @height: height of the filled rectangle |
| 286 | * @color: fill color to use |
| 287 | * |
Zhenyu Wang | 106f0bf | 2014-12-03 18:56:39 +0800 | [diff] [blame] | 288 | * This is the type of the per-platform fill functions using media |
Zhenyu Wang | 10c6ad3 | 2014-12-03 19:05:09 +0800 | [diff] [blame] | 289 | * or gpgpu pipeline. The platform-specific implementation can be obtained |
| 290 | * by calling igt_get_media_fillfunc() or igt_get_gpgpu_fillfunc(). |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 291 | * |
Zhenyu Wang | 106f0bf | 2014-12-03 18:56:39 +0800 | [diff] [blame] | 292 | * A fill function will emit a batchbuffer to the kernel which executes |
Zhenyu Wang | 10c6ad3 | 2014-12-03 19:05:09 +0800 | [diff] [blame] | 293 | * the specified blit fill operation using the media/gpgpu engine. |
Daniel Vetter | 7754c4d | 2014-03-22 18:16:30 +0100 | [diff] [blame] | 294 | */ |
Zhenyu Wang | 106f0bf | 2014-12-03 18:56:39 +0800 | [diff] [blame] | 295 | typedef void (*igt_fillfunc_t)(struct intel_batchbuffer *batch, |
| 296 | struct igt_buf *dst, |
| 297 | unsigned x, unsigned y, |
| 298 | unsigned width, unsigned height, |
| 299 | uint8_t color); |
Daniel Vetter | aaebbc5 | 2014-03-22 15:35:16 +0100 | [diff] [blame] | 300 | |
Zhenyu Wang | 106f0bf | 2014-12-03 18:56:39 +0800 | [diff] [blame] | 301 | igt_fillfunc_t igt_get_media_fillfunc(int devid); |
Zhenyu Wang | 10c6ad3 | 2014-12-03 19:05:09 +0800 | [diff] [blame] | 302 | igt_fillfunc_t igt_get_gpgpu_fillfunc(int devid); |
Daniel Vetter | aaebbc5 | 2014-03-22 15:35:16 +0100 | [diff] [blame] | 303 | |
Jeff McGee | aef4605 | 2015-03-12 10:52:08 -0700 | [diff] [blame] | 304 | /** |
| 305 | * igt_media_spinfunc_t: |
| 306 | * @batch: batchbuffer object |
| 307 | * @dst: destination i-g-t buffer object |
| 308 | * @spins: number of loops to execute |
| 309 | * |
| 310 | * This is the type of the per-platform media spin functions. The |
| 311 | * platform-specific implementation can be obtained by calling |
| 312 | * igt_get_media_spinfunc(). |
| 313 | * |
| 314 | * The media spin function emits a batchbuffer for the render engine with |
| 315 | * the media pipeline selected. The workload consists of a single thread |
| 316 | * which spins in a tight loop the requested number of times. Each spin |
| 317 | * increments a counter whose final 32-bit value is written to the |
| 318 | * destination buffer on completion. This utility provides a simple way |
| 319 | * to keep the render engine busy for a set time for various tests. |
| 320 | */ |
| 321 | typedef void (*igt_media_spinfunc_t)(struct intel_batchbuffer *batch, |
| 322 | struct igt_buf *dst, uint32_t spins); |
| 323 | |
| 324 | igt_media_spinfunc_t igt_get_media_spinfunc(int devid); |
| 325 | |
Eric Anholt | 8c64183 | 2009-03-26 17:15:11 -0700 | [diff] [blame] | 326 | #endif |