blob: 92c5c59d7e760545cd1c827063dd8414acb156fe [file] [log] [blame]
Marek Olšák65f2e332017-10-07 22:54:31 +02001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Marek Olšák <maraeo@gmail.com>
24 *
25 */
26
27#include "si_pipe.h"
28#include "si_state.h"
29#include "radeon/r600_cs.h"
30
31#include "util/u_memory.h"
32
33static void si_set_streamout_enable(struct si_context *sctx, bool enable);
34
Marek Olšáka86c9322017-10-07 23:04:25 +020035static inline void si_so_target_reference(struct si_streamout_target **dst,
36 struct pipe_stream_output_target *src)
37{
38 pipe_so_target_reference((struct pipe_stream_output_target**)dst, src);
39}
40
Marek Olšák65f2e332017-10-07 22:54:31 +020041static struct pipe_stream_output_target *
42si_create_so_target(struct pipe_context *ctx,
43 struct pipe_resource *buffer,
44 unsigned buffer_offset,
45 unsigned buffer_size)
46{
47 struct si_context *sctx = (struct si_context *)ctx;
48 struct si_streamout_target *t;
49 struct r600_resource *rbuffer = (struct r600_resource*)buffer;
50
51 t = CALLOC_STRUCT(si_streamout_target);
52 if (!t) {
53 return NULL;
54 }
55
56 u_suballocator_alloc(sctx->b.allocator_zeroed_memory, 4, 4,
57 &t->buf_filled_size_offset,
58 (struct pipe_resource**)&t->buf_filled_size);
59 if (!t->buf_filled_size) {
60 FREE(t);
61 return NULL;
62 }
63
64 t->b.reference.count = 1;
65 t->b.context = ctx;
66 pipe_resource_reference(&t->b.buffer, buffer);
67 t->b.buffer_offset = buffer_offset;
68 t->b.buffer_size = buffer_size;
69
70 util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
71 buffer_offset + buffer_size);
72 return &t->b;
73}
74
75static void si_so_target_destroy(struct pipe_context *ctx,
76 struct pipe_stream_output_target *target)
77{
78 struct si_streamout_target *t = (struct si_streamout_target*)target;
79 pipe_resource_reference(&t->b.buffer, NULL);
80 r600_resource_reference(&t->buf_filled_size, NULL);
81 FREE(t);
82}
83
84void si_streamout_buffers_dirty(struct si_context *sctx)
85{
86 if (!sctx->streamout.enabled_mask)
87 return;
88
89 si_mark_atom_dirty(sctx, &sctx->streamout.begin_atom);
90 si_set_streamout_enable(sctx, true);
91}
92
93void si_common_set_streamout_targets(struct pipe_context *ctx,
94 unsigned num_targets,
95 struct pipe_stream_output_target **targets,
96 const unsigned *offsets)
97{
98 struct si_context *sctx = (struct si_context *)ctx;
99 unsigned i;
100 unsigned enabled_mask = 0, append_bitmask = 0;
101
102 /* Stop streamout. */
103 if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
104 si_emit_streamout_end(sctx);
105 }
106
107 /* Set the new targets. */
108 for (i = 0; i < num_targets; i++) {
Marek Olšáka86c9322017-10-07 23:04:25 +0200109 si_so_target_reference(&sctx->streamout.targets[i], targets[i]);
Marek Olšák65f2e332017-10-07 22:54:31 +0200110 if (!targets[i])
111 continue;
112
113 r600_context_add_resource_size(ctx, targets[i]->buffer);
114 enabled_mask |= 1 << i;
115 if (offsets[i] == ((unsigned)-1))
116 append_bitmask |= 1 << i;
117 }
118 for (; i < sctx->streamout.num_targets; i++) {
Marek Olšáka86c9322017-10-07 23:04:25 +0200119 si_so_target_reference(&sctx->streamout.targets[i], NULL);
Marek Olšák65f2e332017-10-07 22:54:31 +0200120 }
121
122 sctx->streamout.enabled_mask = enabled_mask;
123
124 sctx->streamout.num_targets = num_targets;
125 sctx->streamout.append_bitmask = append_bitmask;
126
127 if (num_targets) {
128 si_streamout_buffers_dirty(sctx);
129 } else {
130 si_set_atom_dirty(sctx, &sctx->streamout.begin_atom, false);
131 si_set_streamout_enable(sctx, false);
132 }
133}
134
135static void si_flush_vgt_streamout(struct si_context *sctx)
136{
137 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
138 unsigned reg_strmout_cntl;
139
140 /* The register is at different places on different ASICs. */
141 if (sctx->b.chip_class >= CIK) {
142 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
143 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
144 } else {
145 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
146 radeon_set_config_reg(cs, reg_strmout_cntl, 0);
147 }
148
149 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
150 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
151
152 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
153 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
154 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
155 radeon_emit(cs, 0);
156 radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */
157 radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */
158 radeon_emit(cs, 4); /* poll interval */
159}
160
161static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
162{
163 struct si_context *sctx = (struct si_context*)rctx;
164 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
165 struct si_streamout_target **t = sctx->streamout.targets;
166 uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
167 unsigned i;
168
169 si_flush_vgt_streamout(sctx);
170
171 for (i = 0; i < sctx->streamout.num_targets; i++) {
172 if (!t[i])
173 continue;
174
175 t[i]->stride_in_dw = stride_in_dw[i];
176
177 /* SI binds streamout buffers as shader resources.
178 * VGT only counts primitives and tells the shader
179 * through SGPRs what to do. */
180 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
181 radeon_emit(cs, (t[i]->b.buffer_offset +
182 t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
183 radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
184
185 if (sctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
186 uint64_t va = t[i]->buf_filled_size->gpu_address +
187 t[i]->buf_filled_size_offset;
188
189 /* Append. */
190 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
191 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
192 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
193 radeon_emit(cs, 0); /* unused */
194 radeon_emit(cs, 0); /* unused */
195 radeon_emit(cs, va); /* src address lo */
196 radeon_emit(cs, va >> 32); /* src address hi */
197
198 r600_emit_reloc(&sctx->b, &sctx->b.gfx, t[i]->buf_filled_size,
199 RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
200 } else {
201 /* Start from the beginning. */
202 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
203 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
204 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
205 radeon_emit(cs, 0); /* unused */
206 radeon_emit(cs, 0); /* unused */
207 radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
208 radeon_emit(cs, 0); /* unused */
209 }
210 }
211
212 sctx->streamout.begin_emitted = true;
213}
214
215void si_emit_streamout_end(struct si_context *sctx)
216{
217 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
218 struct si_streamout_target **t = sctx->streamout.targets;
219 unsigned i;
220 uint64_t va;
221
222 si_flush_vgt_streamout(sctx);
223
224 for (i = 0; i < sctx->streamout.num_targets; i++) {
225 if (!t[i])
226 continue;
227
228 va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
229 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
230 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
231 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
232 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
233 radeon_emit(cs, va); /* dst address lo */
234 radeon_emit(cs, va >> 32); /* dst address hi */
235 radeon_emit(cs, 0); /* unused */
236 radeon_emit(cs, 0); /* unused */
237
238 r600_emit_reloc(&sctx->b, &sctx->b.gfx, t[i]->buf_filled_size,
239 RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
240
241 /* Zero the buffer size. The counters (primitives generated,
242 * primitives emitted) may be enabled even if there is not
243 * buffer bound. This ensures that the primitives-emitted query
244 * won't increment. */
245 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
246
247 t[i]->buf_filled_size_valid = true;
248 }
249
250 sctx->streamout.begin_emitted = false;
251 sctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH;
252}
253
254/* STREAMOUT CONFIG DERIVED STATE
255 *
256 * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
257 * The buffer mask is an independent state, so no writes occur if there
258 * are no buffers bound.
259 */
260
261static void si_emit_streamout_enable(struct r600_common_context *rctx,
262 struct r600_atom *atom)
263{
264 struct si_context *sctx = (struct si_context*)rctx;
265
266 radeon_set_context_reg_seq(sctx->b.gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
267 radeon_emit(sctx->b.gfx.cs,
268 S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
269 S_028B94_RAST_STREAM(0) |
270 S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
271 S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
272 S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
273 radeon_emit(sctx->b.gfx.cs,
274 sctx->streamout.hw_enabled_mask &
275 sctx->streamout.enabled_stream_buffers_mask);
276}
277
278static void si_set_streamout_enable(struct si_context *sctx, bool enable)
279{
280 bool old_strmout_en = si_get_strmout_en(sctx);
281 unsigned old_hw_enabled_mask = sctx->streamout.hw_enabled_mask;
282
283 sctx->streamout.streamout_enabled = enable;
284
285 sctx->streamout.hw_enabled_mask = sctx->streamout.enabled_mask |
286 (sctx->streamout.enabled_mask << 4) |
287 (sctx->streamout.enabled_mask << 8) |
288 (sctx->streamout.enabled_mask << 12);
289
290 if ((old_strmout_en != si_get_strmout_en(sctx)) ||
291 (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
292 si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
293}
294
295void si_update_prims_generated_query_state(struct si_context *sctx,
296 unsigned type, int diff)
297{
298 if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
299 bool old_strmout_en = si_get_strmout_en(sctx);
300
301 sctx->streamout.num_prims_gen_queries += diff;
302 assert(sctx->streamout.num_prims_gen_queries >= 0);
303
304 sctx->streamout.prims_gen_query_enabled =
305 sctx->streamout.num_prims_gen_queries != 0;
306
307 if (old_strmout_en != si_get_strmout_en(sctx))
308 si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
309 }
310}
311
312void si_init_streamout_functions(struct si_context *sctx)
313{
314 sctx->b.b.create_stream_output_target = si_create_so_target;
315 sctx->b.b.stream_output_target_destroy = si_so_target_destroy;
316 sctx->streamout.begin_atom.emit = si_emit_streamout_begin;
317 sctx->streamout.enable_atom.emit = si_emit_streamout_enable;
318}