blob: 269245243723f72bfd8beeeab562bd570279e5fc [file] [log] [blame]
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001/**********************************************************
Brian Paule0542512015-08-13 11:00:58 -07002 * Copyright 2008-2015 VMware, Inc. All rights reserved.
Jakob Bornecrantz31926332009-11-16 19:56:18 +01003 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "pipe/p_state.h"
27#include "pipe/p_context.h"
Brian Paule0542512015-08-13 11:00:58 -070028
29#include "util/u_bitmask.h"
Jakob Bornecrantz31926332009-11-16 19:56:18 +010030#include "util/u_memory.h"
31
32#include "svga_cmd.h"
33#include "svga_context.h"
34#include "svga_screen.h"
Keith Whitwell287c94e2010-04-10 16:05:54 +010035#include "svga_resource_buffer.h"
Jakob Bornecrantz31926332009-11-16 19:56:18 +010036#include "svga_winsys.h"
Jakob Bornecrantz31926332009-11-16 19:56:18 +010037#include "svga_debug.h"
38
39
40/* Fixme: want a public base class for all pipe structs, even if there
41 * isn't much in them.
42 */
43struct pipe_query {
44 int dummy;
45};
46
47struct svga_query {
48 struct pipe_query base;
Brian Paul49ed1f32013-04-01 17:49:31 -060049 unsigned type; /**< PIPE_QUERY_x or SVGA_QUERY_x */
50 SVGA3dQueryType svga_type; /**< SVGA3D_QUERYTYPE_x or unused */
51
Brian Paule0542512015-08-13 11:00:58 -070052 unsigned id; /** Per-context query identifier */
53
54 struct pipe_fence_handle *fence;
55
Brian Paul49ed1f32013-04-01 17:49:31 -060056 /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
Brian Paule0542512015-08-13 11:00:58 -070057
58 /* For VGPU9 */
Jakob Bornecrantz31926332009-11-16 19:56:18 +010059 struct svga_winsys_buffer *hwbuf;
60 volatile SVGA3dQueryResult *queryResult;
Brian Paule0542512015-08-13 11:00:58 -070061
62 /** For VGPU10 */
63 struct svga_winsys_gb_query *gb_query;
64 SVGA3dDXQueryFlags flags;
65 unsigned offset; /**< offset to the gb_query memory */
66 struct pipe_query *predicate; /** The associated query that can be used for predicate */
Brian Paul3838eda2013-04-01 17:51:43 -060067
68 /** For non-GPU SVGA_QUERY_x queries */
69 uint64_t begin_count, end_count;
Jakob Bornecrantz31926332009-11-16 19:56:18 +010070};
71
Brian Paulcecbfce2013-04-18 16:09:27 -060072
73/** cast wrapper */
Ilia Mirkina2a1a582015-07-20 19:58:43 -040074static inline struct svga_query *
Brian Paul299f8ca2016-03-21 13:23:04 -060075svga_query(struct pipe_query *q)
Jakob Bornecrantz31926332009-11-16 19:56:18 +010076{
77 return (struct svga_query *)q;
78}
79
Brian Paul299f8ca2016-03-21 13:23:04 -060080/**
81 * VGPU9
82 */
Jakob Bornecrantz31926332009-11-16 19:56:18 +010083
Brian Paulcecbfce2013-04-18 16:09:27 -060084static boolean
85svga_get_query_result(struct pipe_context *pipe,
86 struct pipe_query *q,
87 boolean wait,
88 union pipe_query_result *result);
89
Brian Paule0542512015-08-13 11:00:58 -070090static enum pipe_error
91define_query_vgpu9(struct svga_context *svga,
92 struct svga_query *sq)
93{
94 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
95
96 sq->hwbuf = svga_winsys_buffer_create(svga, 1,
97 SVGA_BUFFER_USAGE_PINNED,
98 sizeof *sq->queryResult);
99 if (!sq->hwbuf)
100 return PIPE_ERROR_OUT_OF_MEMORY;
101
102 sq->queryResult = (SVGA3dQueryResult *)
103 sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
104 if (!sq->queryResult) {
105 sws->buffer_destroy(sws, sq->hwbuf);
106 return PIPE_ERROR_OUT_OF_MEMORY;
107 }
108
109 sq->queryResult->totalSize = sizeof *sq->queryResult;
110 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
111
112 /* We request the buffer to be pinned and assume it is always mapped.
113 * The reason is that we don't want to wait for fences when checking the
114 * query status.
115 */
116 sws->buffer_unmap(sws, sq->hwbuf);
117
118 return PIPE_OK;
119}
120
121static enum pipe_error
122begin_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
123{
124 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
125 enum pipe_error ret = PIPE_OK;
126
127 if (sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
128 /* The application doesn't care for the pending query result.
129 * We cannot let go of the existing buffer and just get a new one
130 * because its storage may be reused for other purposes and clobbered
131 * by the host when it determines the query result. So the only
132 * option here is to wait for the existing query's result -- not a
133 * big deal, given that no sane application would do this.
134 */
135 uint64_t result;
136 svga_get_query_result(&svga->pipe, &sq->base, TRUE, (void*)&result);
137 assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
138 }
139
140 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
141 sws->fence_reference(sws, &sq->fence, NULL);
142
143 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
144 if (ret != PIPE_OK) {
145 svga_context_flush(svga, NULL);
146 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
147 }
148 return ret;
149}
150
151static enum pipe_error
152end_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
153{
154 enum pipe_error ret = PIPE_OK;
155
156 /* Set to PENDING before sending EndQuery. */
157 sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;
158
159 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
160 if (ret != PIPE_OK) {
161 svga_context_flush(svga, NULL);
162 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
163 }
164 return ret;
165}
166
167static boolean
168get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
169 boolean wait, uint64_t *result)
170{
171 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
172 enum pipe_error ret;
173 SVGA3dQueryState state;
174
175 if (!sq->fence) {
176 /* The query status won't be updated by the host unless
177 * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
178 * a synchronous wait on the host.
179 */
180 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
181 if (ret != PIPE_OK) {
182 svga_context_flush(svga, NULL);
183 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
184 }
185 assert (ret == PIPE_OK);
186 svga_context_flush(svga, &sq->fence);
187 assert(sq->fence);
188 }
189
190 state = sq->queryResult->state;
191 if (state == SVGA3D_QUERYSTATE_PENDING) {
192 if (!wait)
193 return FALSE;
Sinclair Yeh65175df2017-05-03 11:48:25 -0700194 sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
195 SVGA_FENCE_FLAG_QUERY);
Brian Paule0542512015-08-13 11:00:58 -0700196 state = sq->queryResult->state;
197 }
198
199 assert(state == SVGA3D_QUERYSTATE_SUCCEEDED ||
200 state == SVGA3D_QUERYSTATE_FAILED);
201
202 *result = (uint64_t)sq->queryResult->result32;
203 return TRUE;
204}
205
206
207/**
208 * VGPU10
209 *
210 * There is one query mob allocated for each context to be shared by all
211 * query types. The mob is used to hold queries's state and result. Since
212 * each query result type is of different length, to ease the query allocation
213 * management, the mob is divided into memory blocks. Each memory block
214 * will hold queries of the same type. Multiple memory blocks can be allocated
215 * for a particular query type.
216 *
217 * Currently each memory block is of 184 bytes. We support up to 128
218 * memory blocks. The query memory size is arbitrary right now.
219 * Each occlusion query takes about 8 bytes. One memory block can accomodate
220 * 23 occlusion queries. 128 of those blocks can support up to 2944 occlusion
221 * queries. That seems reasonable for now. If we think this limit is
222 * not enough, we can increase the limit or try to grow the mob in runtime.
223 * Note, SVGA device does not impose one mob per context for queries,
224 * we could allocate multiple mobs for queries; however, wddm KMD does not
225 * currently support that.
226 *
227 * Also note that the GL guest driver does not issue any of the
228 * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
229 */
230#define SVGA_QUERY_MEM_BLOCK_SIZE (sizeof(SVGADXQueryResultUnion) * 2)
231#define SVGA_QUERY_MEM_SIZE (128 * SVGA_QUERY_MEM_BLOCK_SIZE)
232
233struct svga_qmem_alloc_entry
234{
235 unsigned start_offset; /* start offset of the memory block */
236 unsigned block_index; /* block index of the memory block */
237 unsigned query_size; /* query size in this memory block */
238 unsigned nquery; /* number of queries allocated */
239 struct util_bitmask *alloc_mask; /* allocation mask */
240 struct svga_qmem_alloc_entry *next; /* next memory block */
241};
242
243
244/**
245 * Allocate a memory block from the query object memory
246 * \return -1 if out of memory, else index of the query memory block
247 */
248static int
249allocate_query_block(struct svga_context *svga)
250{
251 int index;
252 unsigned offset;
253
254 /* Find the next available query block */
255 index = util_bitmask_add(svga->gb_query_alloc_mask);
256
257 if (index == UTIL_BITMASK_INVALID_INDEX)
258 return -1;
259
260 offset = index * SVGA_QUERY_MEM_BLOCK_SIZE;
261 if (offset >= svga->gb_query_len) {
262 unsigned i;
263
264 /**
265 * All the memory blocks are allocated, lets see if there is
266 * any empty memory block around that can be freed up.
267 */
268 index = -1;
Charmaine Lee50359dd2016-09-01 11:07:59 -0700269 for (i = 0; i < SVGA3D_QUERYTYPE_MAX && index == -1; i++) {
Brian Paule0542512015-08-13 11:00:58 -0700270 struct svga_qmem_alloc_entry *alloc_entry;
271 struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
272
273 alloc_entry = svga->gb_query_map[i];
274 while (alloc_entry && index == -1) {
275 if (alloc_entry->nquery == 0) {
276 /* This memory block is empty, it can be recycled. */
277 if (prev_alloc_entry) {
278 prev_alloc_entry->next = alloc_entry->next;
279 } else {
280 svga->gb_query_map[i] = alloc_entry->next;
281 }
282 index = alloc_entry->block_index;
283 } else {
284 prev_alloc_entry = alloc_entry;
285 alloc_entry = alloc_entry->next;
286 }
287 }
288 }
289 }
290
291 return index;
292}
293
294/**
295 * Allocate a slot in the specified memory block.
296 * All slots in this memory block are of the same size.
297 *
298 * \return -1 if out of memory, else index of the query slot
299 */
300static int
301allocate_query_slot(struct svga_context *svga,
302 struct svga_qmem_alloc_entry *alloc)
303{
304 int index;
305 unsigned offset;
306
307 /* Find the next available slot */
308 index = util_bitmask_add(alloc->alloc_mask);
309
310 if (index == UTIL_BITMASK_INVALID_INDEX)
311 return -1;
312
313 offset = index * alloc->query_size;
314 if (offset >= SVGA_QUERY_MEM_BLOCK_SIZE)
315 return -1;
316
317 alloc->nquery++;
318
319 return index;
320}
321
322/**
323 * Deallocate the specified slot in the memory block.
324 * If all slots are freed up, then deallocate the memory block
325 * as well, so it can be allocated for other query type
326 */
327static void
328deallocate_query_slot(struct svga_context *svga,
329 struct svga_qmem_alloc_entry *alloc,
330 unsigned index)
331{
332 assert(index != UTIL_BITMASK_INVALID_INDEX);
333
334 util_bitmask_clear(alloc->alloc_mask, index);
335 alloc->nquery--;
336
337 /**
338 * Don't worry about deallocating the empty memory block here.
339 * The empty memory block will be recycled when no more memory block
340 * can be allocated.
341 */
342}
343
344static struct svga_qmem_alloc_entry *
345allocate_query_block_entry(struct svga_context *svga,
346 unsigned len)
347{
348 struct svga_qmem_alloc_entry *alloc_entry;
349 int block_index = -1;
350
351 block_index = allocate_query_block(svga);
352 if (block_index == -1)
353 return NULL;
354 alloc_entry = CALLOC_STRUCT(svga_qmem_alloc_entry);
Edward O'Callaghan13eb5f52015-12-04 22:08:22 +1100355 if (!alloc_entry)
Brian Paule0542512015-08-13 11:00:58 -0700356 return NULL;
357
358 alloc_entry->block_index = block_index;
359 alloc_entry->start_offset = block_index * SVGA_QUERY_MEM_BLOCK_SIZE;
360 alloc_entry->nquery = 0;
361 alloc_entry->alloc_mask = util_bitmask_create();
362 alloc_entry->next = NULL;
363 alloc_entry->query_size = len;
364
365 return alloc_entry;
366}
367
368/**
369 * Allocate a memory slot for a query of the specified type.
370 * It will first search through the memory blocks that are allocated
371 * for the query type. If no memory slot is available, it will try
372 * to allocate another memory block within the query object memory for
373 * this query type.
374 */
375static int
376allocate_query(struct svga_context *svga,
377 SVGA3dQueryType type,
378 unsigned len)
379{
380 struct svga_qmem_alloc_entry *alloc_entry;
381 int slot_index = -1;
382 unsigned offset;
383
Charmaine Lee50359dd2016-09-01 11:07:59 -0700384 assert(type < SVGA3D_QUERYTYPE_MAX);
Brian Paule0542512015-08-13 11:00:58 -0700385
386 alloc_entry = svga->gb_query_map[type];
387
Edward O'Callaghan13eb5f52015-12-04 22:08:22 +1100388 if (!alloc_entry) {
Brian Paule0542512015-08-13 11:00:58 -0700389 /**
390 * No query memory block has been allocated for this query type,
391 * allocate one now
392 */
393 alloc_entry = allocate_query_block_entry(svga, len);
Edward O'Callaghan13eb5f52015-12-04 22:08:22 +1100394 if (!alloc_entry)
Brian Paule0542512015-08-13 11:00:58 -0700395 return -1;
396 svga->gb_query_map[type] = alloc_entry;
397 }
398
399 /* Allocate a slot within the memory block allocated for this query type */
400 slot_index = allocate_query_slot(svga, alloc_entry);
401
402 if (slot_index == -1) {
403 /* This query memory block is full, allocate another one */
404 alloc_entry = allocate_query_block_entry(svga, len);
Edward O'Callaghan13eb5f52015-12-04 22:08:22 +1100405 if (!alloc_entry)
Brian Paule0542512015-08-13 11:00:58 -0700406 return -1;
407 alloc_entry->next = svga->gb_query_map[type];
408 svga->gb_query_map[type] = alloc_entry;
409 slot_index = allocate_query_slot(svga, alloc_entry);
410 }
411
412 assert(slot_index != -1);
413 offset = slot_index * len + alloc_entry->start_offset;
414
415 return offset;
416}
417
418
419/**
420 * Deallocate memory slot allocated for the specified query
421 */
422static void
423deallocate_query(struct svga_context *svga,
424 struct svga_query *sq)
425{
426 struct svga_qmem_alloc_entry *alloc_entry;
427 unsigned slot_index;
428 unsigned offset = sq->offset;
429
430 alloc_entry = svga->gb_query_map[sq->svga_type];
431
432 while (alloc_entry) {
433 if (offset >= alloc_entry->start_offset &&
434 offset < alloc_entry->start_offset + SVGA_QUERY_MEM_BLOCK_SIZE) {
435
436 /* The slot belongs to this memory block, deallocate it */
437 slot_index = (offset - alloc_entry->start_offset) /
438 alloc_entry->query_size;
439 deallocate_query_slot(svga, alloc_entry, slot_index);
440 alloc_entry = NULL;
441 } else {
442 alloc_entry = alloc_entry->next;
443 }
444 }
445}
446
447
448/**
449 * Destroy the gb query object and all the related query structures
450 */
451static void
452destroy_gb_query_obj(struct svga_context *svga)
453{
454 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
455 unsigned i;
456
Charmaine Lee50359dd2016-09-01 11:07:59 -0700457 for (i = 0; i < SVGA3D_QUERYTYPE_MAX; i++) {
Brian Paule0542512015-08-13 11:00:58 -0700458 struct svga_qmem_alloc_entry *alloc_entry, *next;
459 alloc_entry = svga->gb_query_map[i];
460 while (alloc_entry) {
461 next = alloc_entry->next;
462 util_bitmask_destroy(alloc_entry->alloc_mask);
463 FREE(alloc_entry);
464 alloc_entry = next;
465 }
466 svga->gb_query_map[i] = NULL;
467 }
468
469 if (svga->gb_query)
470 sws->query_destroy(sws, svga->gb_query);
471 svga->gb_query = NULL;
472
473 util_bitmask_destroy(svga->gb_query_alloc_mask);
474}
475
476/**
477 * Define query and create the gb query object if it is not already created.
478 * There is only one gb query object per context which will be shared by
479 * queries of all types.
480 */
481static enum pipe_error
482define_query_vgpu10(struct svga_context *svga,
483 struct svga_query *sq, int resultLen)
484{
485 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
486 int qlen;
487 enum pipe_error ret = PIPE_OK;
488
489 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
490
491 if (svga->gb_query == NULL) {
492 /* Create a gb query object */
493 svga->gb_query = sws->query_create(sws, SVGA_QUERY_MEM_SIZE);
494 if (!svga->gb_query)
495 return PIPE_ERROR_OUT_OF_MEMORY;
496 svga->gb_query_len = SVGA_QUERY_MEM_SIZE;
497 memset (svga->gb_query_map, 0, sizeof(svga->gb_query_map));
498 svga->gb_query_alloc_mask = util_bitmask_create();
499
500 /* Bind the query object to the context */
501 if (svga->swc->query_bind(svga->swc, svga->gb_query,
502 SVGA_QUERY_FLAG_SET) != PIPE_OK) {
503 svga_context_flush(svga, NULL);
504 svga->swc->query_bind(svga->swc, svga->gb_query,
505 SVGA_QUERY_FLAG_SET);
506 }
507 }
508
509 sq->gb_query = svga->gb_query;
510
511 /* Allocate an integer ID for this query */
512 sq->id = util_bitmask_add(svga->query_id_bm);
513 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
514 return PIPE_ERROR_OUT_OF_MEMORY;
515
516 /* Find a slot for this query in the gb object */
517 qlen = resultLen + sizeof(SVGA3dQueryState);
518 sq->offset = allocate_query(svga, sq->svga_type, qlen);
519 if (sq->offset == -1)
520 return PIPE_ERROR_OUT_OF_MEMORY;
521
522 SVGA_DBG(DEBUG_QUERY, " query type=%d qid=0x%x offset=%d\n",
523 sq->svga_type, sq->id, sq->offset);
524
525 /**
526 * Send SVGA3D commands to define the query
527 */
528 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
529 if (ret != PIPE_OK) {
530 svga_context_flush(svga, NULL);
531 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
532 }
533 if (ret != PIPE_OK)
534 return PIPE_ERROR_OUT_OF_MEMORY;
535
536 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
537 if (ret != PIPE_OK) {
538 svga_context_flush(svga, NULL);
539 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
540 }
541 assert(ret == PIPE_OK);
542
543 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
544 if (ret != PIPE_OK) {
545 svga_context_flush(svga, NULL);
546 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
547 }
548 assert(ret == PIPE_OK);
549
550 return PIPE_OK;
551}
552
553static enum pipe_error
554destroy_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
555{
556 enum pipe_error ret;
557
558 ret = SVGA3D_vgpu10_DestroyQuery(svga->swc, sq->id);
559
560 /* Deallocate the memory slot allocated for this query */
561 deallocate_query(svga, sq);
562
563 return ret;
564}
565
566
567/**
568 * Rebind queryies to the context.
569 */
570static void
571rebind_vgpu10_query(struct svga_context *svga)
572{
573 if (svga->swc->query_bind(svga->swc, svga->gb_query,
574 SVGA_QUERY_FLAG_REF) != PIPE_OK) {
575 svga_context_flush(svga, NULL);
576 svga->swc->query_bind(svga->swc, svga->gb_query,
577 SVGA_QUERY_FLAG_REF);
578 }
579
580 svga->rebind.flags.query = FALSE;
581}
582
583
584static enum pipe_error
585begin_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
586{
587 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
588 enum pipe_error ret = PIPE_OK;
589 int status = 0;
590
591 sws->fence_reference(sws, &sq->fence, NULL);
592
593 /* Initialize the query state to NEW */
594 status = sws->query_init(sws, sq->gb_query, sq->offset, SVGA3D_QUERYSTATE_NEW);
595 if (status)
596 return PIPE_ERROR;
597
598 if (svga->rebind.flags.query) {
599 rebind_vgpu10_query(svga);
600 }
601
602 /* Send the BeginQuery command to the device */
603 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
604 if (ret != PIPE_OK) {
605 svga_context_flush(svga, NULL);
606 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
607 }
608 return ret;
609}
610
611static enum pipe_error
612end_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
613{
Brian Paule0542512015-08-13 11:00:58 -0700614 enum pipe_error ret = PIPE_OK;
615
616 if (svga->rebind.flags.query) {
617 rebind_vgpu10_query(svga);
618 }
619
620 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
621 if (ret != PIPE_OK) {
622 svga_context_flush(svga, NULL);
623 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
624 }
625
Brian Paule0542512015-08-13 11:00:58 -0700626 return ret;
627}
628
629static boolean
630get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
631 boolean wait, void *result, int resultLen)
632{
633 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
634 SVGA3dQueryState queryState;
635
636 if (svga->rebind.flags.query) {
637 rebind_vgpu10_query(svga);
638 }
639
640 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
641
Brian Paul15dee0f2016-09-15 12:07:18 -0600642 if (queryState != SVGA3D_QUERYSTATE_SUCCEEDED && !sq->fence) {
643 /* We don't have the query result yet, and the query hasn't been
644 * submitted. We need to submit it now since the GL spec says
645 * "Querying the state for a given occlusion query forces that
646 * occlusion query to complete within a finite amount of time."
Brian Paul99d8fe22016-08-18 10:15:46 -0600647 */
648 svga_context_flush(svga, &sq->fence);
649 }
650
651 if (queryState == SVGA3D_QUERYSTATE_PENDING ||
652 queryState == SVGA3D_QUERYSTATE_NEW) {
Brian Paule0542512015-08-13 11:00:58 -0700653 if (!wait)
654 return FALSE;
Sinclair Yeh65175df2017-05-03 11:48:25 -0700655 sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
656 SVGA_FENCE_FLAG_QUERY);
Brian Paule0542512015-08-13 11:00:58 -0700657 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
658 }
659
660 assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
661 queryState == SVGA3D_QUERYSTATE_FAILED);
662
663 return TRUE;
664}
Brian Paulcecbfce2013-04-18 16:09:27 -0600665
666static struct pipe_query *
Ilia Mirkin43e4b3e2014-06-26 19:33:07 -0400667svga_create_query(struct pipe_context *pipe,
668 unsigned query_type,
669 unsigned index)
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100670{
Brian Paule0542512015-08-13 11:00:58 -0700671 struct svga_context *svga = svga_context(pipe);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100672 struct svga_query *sq;
673
Brian Paule0542512015-08-13 11:00:58 -0700674 assert(query_type < SVGA_QUERY_MAX);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100675
676 sq = CALLOC_STRUCT(svga_query);
677 if (!sq)
Brian Paule0542512015-08-13 11:00:58 -0700678 goto fail;
679
680 /* Allocate an integer ID for the query */
681 sq->id = util_bitmask_add(svga->query_id_bm);
682 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
683 goto fail;
684
685 SVGA_DBG(DEBUG_QUERY, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__,
686 query_type, sq, sq->id);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100687
Brian Paul49ed1f32013-04-01 17:49:31 -0600688 switch (query_type) {
689 case PIPE_QUERY_OCCLUSION_COUNTER:
690 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
Brian Paule0542512015-08-13 11:00:58 -0700691 if (svga_have_vgpu10(svga)) {
692 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionQueryResult));
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100693
Brian Paule0542512015-08-13 11:00:58 -0700694 /**
695 * In OpenGL, occlusion counter query can be used in conditional
696 * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
697 * be used for predication. Hence, we need to create an occlusion
698 * predicate query along with the occlusion counter query. So when
699 * the occlusion counter query is used for predication, the associated
700 * query of occlusion predicate type will be used
701 * in the SetPredication command.
702 */
703 sq->predicate = svga_create_query(pipe, PIPE_QUERY_OCCLUSION_PREDICATE, index);
704
705 } else {
706 define_query_vgpu9(svga, sq);
Brian Paul8bbc84d2014-03-24 17:20:54 -0600707 }
Brian Paule0542512015-08-13 11:00:58 -0700708 break;
709 case PIPE_QUERY_OCCLUSION_PREDICATE:
Nicolai Hähnle3f6b3d92017-09-12 18:46:46 +0200710 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
Charmaine Lee67693162016-04-19 18:12:17 -0700711 if (svga_have_vgpu10(svga)) {
712 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
713 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
714 } else {
715 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
716 define_query_vgpu9(svga, sq);
717 }
Brian Paule0542512015-08-13 11:00:58 -0700718 break;
719 case PIPE_QUERY_PRIMITIVES_GENERATED:
720 case PIPE_QUERY_PRIMITIVES_EMITTED:
721 case PIPE_QUERY_SO_STATISTICS:
722 assert(svga_have_vgpu10(svga));
723 sq->svga_type = SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS;
724 define_query_vgpu10(svga, sq,
725 sizeof(SVGADXStreamOutStatisticsQueryResult));
726 break;
727 case PIPE_QUERY_TIMESTAMP:
728 assert(svga_have_vgpu10(svga));
729 sq->svga_type = SVGA3D_QUERYTYPE_TIMESTAMP;
730 define_query_vgpu10(svga, sq,
731 sizeof(SVGADXTimestampQueryResult));
Brian Paul49ed1f32013-04-01 17:49:31 -0600732 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -0600733 case SVGA_QUERY_NUM_DRAW_CALLS:
734 case SVGA_QUERY_NUM_FALLBACKS:
735 case SVGA_QUERY_NUM_FLUSHES:
Brian Paul6fc8d902016-02-29 12:01:10 -0700736 case SVGA_QUERY_NUM_VALIDATIONS:
Charmaine Leeee398142016-08-31 14:49:52 -0700737 case SVGA_QUERY_NUM_BUFFERS_MAPPED:
738 case SVGA_QUERY_NUM_TEXTURES_MAPPED:
Brian Paul6fc8d902016-02-29 12:01:10 -0700739 case SVGA_QUERY_NUM_BYTES_UPLOADED:
Brian Paul192ee9a2016-02-29 14:26:12 -0700740 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
Brian Paul3af78b42016-03-04 15:59:32 -0700741 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
Brian Paulac114c62013-04-03 10:23:57 -0600742 case SVGA_QUERY_MEMORY_USED:
Neha Bhende9bc7e312015-10-09 16:10:16 -0600743 case SVGA_QUERY_NUM_SHADERS:
744 case SVGA_QUERY_NUM_RESOURCES:
745 case SVGA_QUERY_NUM_STATE_OBJECTS:
Neha Bhende9bc7e312015-10-09 16:10:16 -0600746 case SVGA_QUERY_NUM_SURFACE_VIEWS:
Charmaine Lee78e628a2015-12-21 11:07:08 -0800747 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
Charmaine Lee79e343b2016-03-10 10:57:24 -0800748 case SVGA_QUERY_NUM_READBACKS:
Charmaine Lee0a1d91e2016-03-11 14:33:39 -0800749 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
750 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
751 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
752 case SVGA_QUERY_NUM_CONST_UPDATES:
Brian Paule3f5b8a2017-06-16 16:36:43 -0600753 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
Brian Paul3838eda2013-04-01 17:51:43 -0600754 break;
Brian Paulf9341172016-08-02 14:27:33 -0600755 case SVGA_QUERY_FLUSH_TIME:
756 case SVGA_QUERY_MAP_BUFFER_TIME:
757 /* These queries need os_time_get() */
758 svga->hud.uses_time = TRUE;
759 break;
Brian Paul49ed1f32013-04-01 17:49:31 -0600760 default:
761 assert(!"unexpected query type in svga_create_query()");
762 }
763
764 sq->type = query_type;
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100765
766 return &sq->base;
767
Brian Paule0542512015-08-13 11:00:58 -0700768fail:
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100769 FREE(sq);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100770 return NULL;
771}
772
Brian Paulcecbfce2013-04-18 16:09:27 -0600773static void
774svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100775{
Brian Paule0542512015-08-13 11:00:58 -0700776 struct svga_context *svga = svga_context(pipe);
777 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
778 struct svga_query *sq;
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100779
Edward O'Callaghan13eb5f52015-12-04 22:08:22 +1100780 if (!q) {
Jose Fonsecaa11955b2015-11-25 13:34:18 +0000781 destroy_gb_query_obj(svga);
782 return;
Brian Paule0542512015-08-13 11:00:58 -0700783 }
784
785 sq = svga_query(q);
786
787 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
788 sq, sq->id);
Brian Paul49ed1f32013-04-01 17:49:31 -0600789
790 switch (sq->type) {
791 case PIPE_QUERY_OCCLUSION_COUNTER:
Charmaine Lee67693162016-04-19 18:12:17 -0700792 case PIPE_QUERY_OCCLUSION_PREDICATE:
Nicolai Hähnle3f6b3d92017-09-12 18:46:46 +0200793 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
Brian Paule0542512015-08-13 11:00:58 -0700794 if (svga_have_vgpu10(svga)) {
795 /* make sure to also destroy any associated predicate query */
796 if (sq->predicate)
797 svga_destroy_query(pipe, sq->predicate);
798 destroy_query_vgpu10(svga, sq);
799 } else {
800 sws->buffer_destroy(sws, sq->hwbuf);
801 }
802 sws->fence_reference(sws, &sq->fence, NULL);
803 break;
Brian Paule0542512015-08-13 11:00:58 -0700804 case PIPE_QUERY_PRIMITIVES_GENERATED:
805 case PIPE_QUERY_PRIMITIVES_EMITTED:
806 case PIPE_QUERY_SO_STATISTICS:
807 case PIPE_QUERY_TIMESTAMP:
808 assert(svga_have_vgpu10(svga));
809 destroy_query_vgpu10(svga, sq);
Brian Paul49ed1f32013-04-01 17:49:31 -0600810 sws->fence_reference(sws, &sq->fence, NULL);
811 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -0600812 case SVGA_QUERY_NUM_DRAW_CALLS:
813 case SVGA_QUERY_NUM_FALLBACKS:
814 case SVGA_QUERY_NUM_FLUSHES:
Brian Paul6fc8d902016-02-29 12:01:10 -0700815 case SVGA_QUERY_NUM_VALIDATIONS:
816 case SVGA_QUERY_MAP_BUFFER_TIME:
Charmaine Leeee398142016-08-31 14:49:52 -0700817 case SVGA_QUERY_NUM_BUFFERS_MAPPED:
818 case SVGA_QUERY_NUM_TEXTURES_MAPPED:
Brian Paul6fc8d902016-02-29 12:01:10 -0700819 case SVGA_QUERY_NUM_BYTES_UPLOADED:
Brian Paul192ee9a2016-02-29 14:26:12 -0700820 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
Brian Paul7e8cf342016-03-04 09:14:34 -0700821 case SVGA_QUERY_FLUSH_TIME:
Brian Paul3af78b42016-03-04 15:59:32 -0700822 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
Brian Paulac114c62013-04-03 10:23:57 -0600823 case SVGA_QUERY_MEMORY_USED:
Neha Bhende9bc7e312015-10-09 16:10:16 -0600824 case SVGA_QUERY_NUM_SHADERS:
825 case SVGA_QUERY_NUM_RESOURCES:
826 case SVGA_QUERY_NUM_STATE_OBJECTS:
Neha Bhende9bc7e312015-10-09 16:10:16 -0600827 case SVGA_QUERY_NUM_SURFACE_VIEWS:
Charmaine Lee78e628a2015-12-21 11:07:08 -0800828 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
Charmaine Lee79e343b2016-03-10 10:57:24 -0800829 case SVGA_QUERY_NUM_READBACKS:
Charmaine Lee0a1d91e2016-03-11 14:33:39 -0800830 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
831 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
832 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
833 case SVGA_QUERY_NUM_CONST_UPDATES:
Brian Paule3f5b8a2017-06-16 16:36:43 -0600834 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
Brian Paul3838eda2013-04-01 17:51:43 -0600835 /* nothing */
836 break;
Brian Paul49ed1f32013-04-01 17:49:31 -0600837 default:
838 assert(!"svga: unexpected query type in svga_destroy_query()");
839 }
840
Brian Paule0542512015-08-13 11:00:58 -0700841 /* Free the query id */
842 util_bitmask_clear(svga->query_id_bm, sq->id);
843
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100844 FREE(sq);
845}
846
Brian Paulcecbfce2013-04-18 16:09:27 -0600847
Samuel Pitoiset96f164f2014-07-05 12:46:03 +0200848static boolean
Brian Paulcecbfce2013-04-18 16:09:27 -0600849svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100850{
Brian Paule0542512015-08-13 11:00:58 -0700851 struct svga_context *svga = svga_context(pipe);
852 struct svga_query *sq = svga_query(q);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100853 enum pipe_error ret;
854
Brian Paule0542512015-08-13 11:00:58 -0700855 assert(sq);
856 assert(sq->type < SVGA_QUERY_MAX);
857
858 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
859 sq, sq->id);
Brian Paulcecbfce2013-04-18 16:09:27 -0600860
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100861 /* Need to flush out buffered drawing commands so that they don't
862 * get counted in the query results.
863 */
864 svga_hwtnl_flush_retry(svga);
Brian Paulcecbfce2013-04-18 16:09:27 -0600865
Brian Paul49ed1f32013-04-01 17:49:31 -0600866 switch (sq->type) {
867 case PIPE_QUERY_OCCLUSION_COUNTER:
Charmaine Lee67693162016-04-19 18:12:17 -0700868 case PIPE_QUERY_OCCLUSION_PREDICATE:
Nicolai Hähnle3f6b3d92017-09-12 18:46:46 +0200869 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
Brian Paule0542512015-08-13 11:00:58 -0700870 if (svga_have_vgpu10(svga)) {
871 ret = begin_query_vgpu10(svga, sq);
872 /* also need to start the associated occlusion predicate query */
873 if (sq->predicate) {
874 enum pipe_error status;
875 status = begin_query_vgpu10(svga, svga_query(sq->predicate));
876 assert(status == PIPE_OK);
877 (void) status;
878 }
879 } else {
880 ret = begin_query_vgpu9(svga, sq);
Brian Paul49ed1f32013-04-01 17:49:31 -0600881 }
Brian Paule0542512015-08-13 11:00:58 -0700882 assert(ret == PIPE_OK);
883 (void) ret;
884 break;
Brian Paule0542512015-08-13 11:00:58 -0700885 case PIPE_QUERY_PRIMITIVES_GENERATED:
886 case PIPE_QUERY_PRIMITIVES_EMITTED:
887 case PIPE_QUERY_SO_STATISTICS:
888 case PIPE_QUERY_TIMESTAMP:
889 assert(svga_have_vgpu10(svga));
890 ret = begin_query_vgpu10(svga, sq);
891 assert(ret == PIPE_OK);
Brian Paul49ed1f32013-04-01 17:49:31 -0600892 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -0600893 case SVGA_QUERY_NUM_DRAW_CALLS:
894 sq->begin_count = svga->hud.num_draw_calls;
Brian Paul3838eda2013-04-01 17:51:43 -0600895 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -0600896 case SVGA_QUERY_NUM_FALLBACKS:
897 sq->begin_count = svga->hud.num_fallbacks;
898 break;
899 case SVGA_QUERY_NUM_FLUSHES:
900 sq->begin_count = svga->hud.num_flushes;
901 break;
902 case SVGA_QUERY_NUM_VALIDATIONS:
903 sq->begin_count = svga->hud.num_validations;
904 break;
905 case SVGA_QUERY_MAP_BUFFER_TIME:
906 sq->begin_count = svga->hud.map_buffer_time;
907 break;
Charmaine Leeee398142016-08-31 14:49:52 -0700908 case SVGA_QUERY_NUM_BUFFERS_MAPPED:
909 sq->begin_count = svga->hud.num_buffers_mapped;
910 break;
911 case SVGA_QUERY_NUM_TEXTURES_MAPPED:
912 sq->begin_count = svga->hud.num_textures_mapped;
Brian Paul3838eda2013-04-01 17:51:43 -0600913 break;
Brian Paul527466d2015-11-19 10:39:49 -0700914 case SVGA_QUERY_NUM_BYTES_UPLOADED:
915 sq->begin_count = svga->hud.num_bytes_uploaded;
916 break;
Brian Paul192ee9a2016-02-29 14:26:12 -0700917 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
918 sq->begin_count = svga->hud.command_buffer_size;
919 break;
Brian Paul7e8cf342016-03-04 09:14:34 -0700920 case SVGA_QUERY_FLUSH_TIME:
921 sq->begin_count = svga->hud.flush_time;
922 break;
Brian Paul3af78b42016-03-04 15:59:32 -0700923 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
924 sq->begin_count = svga->hud.surface_write_flushes;
925 break;
Charmaine Lee79e343b2016-03-10 10:57:24 -0800926 case SVGA_QUERY_NUM_READBACKS:
927 sq->begin_count = svga->hud.num_readbacks;
928 break;
Charmaine Lee0a1d91e2016-03-11 14:33:39 -0800929 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
930 sq->begin_count = svga->hud.num_resource_updates;
931 break;
932 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
933 sq->begin_count = svga->hud.num_buffer_uploads;
934 break;
935 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
936 sq->begin_count = svga->hud.num_const_buf_updates;
937 break;
938 case SVGA_QUERY_NUM_CONST_UPDATES:
939 sq->begin_count = svga->hud.num_const_updates;
940 break;
Brian Paulac114c62013-04-03 10:23:57 -0600941 case SVGA_QUERY_MEMORY_USED:
Neha Bhende9bc7e312015-10-09 16:10:16 -0600942 case SVGA_QUERY_NUM_SHADERS:
943 case SVGA_QUERY_NUM_RESOURCES:
944 case SVGA_QUERY_NUM_STATE_OBJECTS:
945 case SVGA_QUERY_NUM_SURFACE_VIEWS:
Charmaine Lee78e628a2015-12-21 11:07:08 -0800946 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
Brian Paule3f5b8a2017-06-16 16:36:43 -0600947 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
Brian Paulac114c62013-04-03 10:23:57 -0600948 /* nothing */
949 break;
Brian Paul49ed1f32013-04-01 17:49:31 -0600950 default:
951 assert(!"unexpected query type in svga_begin_query()");
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100952 }
Brian Paule0542512015-08-13 11:00:58 -0700953
954 svga->sq[sq->type] = sq;
955
Samuel Pitoiset96f164f2014-07-05 12:46:03 +0200956 return true;
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100957}
958
Brian Paulcecbfce2013-04-18 16:09:27 -0600959
Nicolai Hähnle32214e02016-04-20 09:22:48 -0500960static bool
Brian Paulcecbfce2013-04-18 16:09:27 -0600961svga_end_query(struct pipe_context *pipe, struct pipe_query *q)
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100962{
Brian Paule0542512015-08-13 11:00:58 -0700963 struct svga_context *svga = svga_context(pipe);
964 struct svga_query *sq = svga_query(q);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100965 enum pipe_error ret;
966
Brian Paule0542512015-08-13 11:00:58 -0700967 assert(sq);
968 assert(sq->type < SVGA_QUERY_MAX);
969
970 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
971 sq, sq->id);
972
973 if (sq->type == PIPE_QUERY_TIMESTAMP && svga->sq[sq->type] != sq)
974 svga_begin_query(pipe, q);
Jakob Bornecrantz31926332009-11-16 19:56:18 +0100975
976 svga_hwtnl_flush_retry(svga);
Brian Paulcecbfce2013-04-18 16:09:27 -0600977
Brian Paule0542512015-08-13 11:00:58 -0700978 assert(svga->sq[sq->type] == sq);
979
Brian Paul49ed1f32013-04-01 17:49:31 -0600980 switch (sq->type) {
981 case PIPE_QUERY_OCCLUSION_COUNTER:
Charmaine Lee67693162016-04-19 18:12:17 -0700982 case PIPE_QUERY_OCCLUSION_PREDICATE:
Nicolai Hähnle3f6b3d92017-09-12 18:46:46 +0200983 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
Brian Paule0542512015-08-13 11:00:58 -0700984 if (svga_have_vgpu10(svga)) {
985 ret = end_query_vgpu10(svga, sq);
986 /* also need to end the associated occlusion predicate query */
987 if (sq->predicate) {
988 enum pipe_error status;
989 status = end_query_vgpu10(svga, svga_query(sq->predicate));
990 assert(status == PIPE_OK);
991 (void) status;
992 }
993 } else {
994 ret = end_query_vgpu9(svga, sq);
Brian Paul49ed1f32013-04-01 17:49:31 -0600995 }
Brian Paule0542512015-08-13 11:00:58 -0700996 assert(ret == PIPE_OK);
997 (void) ret;
Brian Paule0542512015-08-13 11:00:58 -0700998 break;
Brian Paule0542512015-08-13 11:00:58 -0700999 case PIPE_QUERY_PRIMITIVES_GENERATED:
1000 case PIPE_QUERY_PRIMITIVES_EMITTED:
1001 case PIPE_QUERY_SO_STATISTICS:
1002 case PIPE_QUERY_TIMESTAMP:
1003 assert(svga_have_vgpu10(svga));
1004 ret = end_query_vgpu10(svga, sq);
1005 assert(ret == PIPE_OK);
Brian Paul49ed1f32013-04-01 17:49:31 -06001006 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -06001007 case SVGA_QUERY_NUM_DRAW_CALLS:
1008 sq->end_count = svga->hud.num_draw_calls;
Brian Paul3838eda2013-04-01 17:51:43 -06001009 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -06001010 case SVGA_QUERY_NUM_FALLBACKS:
1011 sq->end_count = svga->hud.num_fallbacks;
1012 break;
1013 case SVGA_QUERY_NUM_FLUSHES:
1014 sq->end_count = svga->hud.num_flushes;
1015 break;
1016 case SVGA_QUERY_NUM_VALIDATIONS:
1017 sq->end_count = svga->hud.num_validations;
1018 break;
1019 case SVGA_QUERY_MAP_BUFFER_TIME:
1020 sq->end_count = svga->hud.map_buffer_time;
1021 break;
Charmaine Leeee398142016-08-31 14:49:52 -07001022 case SVGA_QUERY_NUM_BUFFERS_MAPPED:
1023 sq->end_count = svga->hud.num_buffers_mapped;
1024 break;
1025 case SVGA_QUERY_NUM_TEXTURES_MAPPED:
1026 sq->end_count = svga->hud.num_textures_mapped;
Brian Paul3838eda2013-04-01 17:51:43 -06001027 break;
Brian Paul527466d2015-11-19 10:39:49 -07001028 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1029 sq->end_count = svga->hud.num_bytes_uploaded;
1030 break;
Brian Paul192ee9a2016-02-29 14:26:12 -07001031 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1032 sq->end_count = svga->hud.command_buffer_size;
1033 break;
Brian Paul7e8cf342016-03-04 09:14:34 -07001034 case SVGA_QUERY_FLUSH_TIME:
1035 sq->end_count = svga->hud.flush_time;
1036 break;
Brian Paul3af78b42016-03-04 15:59:32 -07001037 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1038 sq->end_count = svga->hud.surface_write_flushes;
1039 break;
Charmaine Lee79e343b2016-03-10 10:57:24 -08001040 case SVGA_QUERY_NUM_READBACKS:
1041 sq->end_count = svga->hud.num_readbacks;
1042 break;
Charmaine Lee0a1d91e2016-03-11 14:33:39 -08001043 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1044 sq->end_count = svga->hud.num_resource_updates;
1045 break;
1046 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1047 sq->end_count = svga->hud.num_buffer_uploads;
1048 break;
1049 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1050 sq->end_count = svga->hud.num_const_buf_updates;
1051 break;
1052 case SVGA_QUERY_NUM_CONST_UPDATES:
1053 sq->end_count = svga->hud.num_const_updates;
1054 break;
Brian Paulac114c62013-04-03 10:23:57 -06001055 case SVGA_QUERY_MEMORY_USED:
Neha Bhende9bc7e312015-10-09 16:10:16 -06001056 case SVGA_QUERY_NUM_SHADERS:
1057 case SVGA_QUERY_NUM_RESOURCES:
1058 case SVGA_QUERY_NUM_STATE_OBJECTS:
1059 case SVGA_QUERY_NUM_SURFACE_VIEWS:
Charmaine Lee78e628a2015-12-21 11:07:08 -08001060 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
Brian Paule3f5b8a2017-06-16 16:36:43 -06001061 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
Brian Paulac114c62013-04-03 10:23:57 -06001062 /* nothing */
1063 break;
Brian Paul49ed1f32013-04-01 17:49:31 -06001064 default:
1065 assert(!"unexpected query type in svga_end_query()");
1066 }
Brian Paule0542512015-08-13 11:00:58 -07001067 svga->sq[sq->type] = NULL;
Nicolai Hähnle32214e02016-04-20 09:22:48 -05001068 return true;
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001069}
1070
Brian Paulcecbfce2013-04-18 16:09:27 -06001071
1072static boolean
1073svga_get_query_result(struct pipe_context *pipe,
1074 struct pipe_query *q,
1075 boolean wait,
1076 union pipe_query_result *vresult)
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001077{
Brian Paule0542512015-08-13 11:00:58 -07001078 struct svga_screen *svgascreen = svga_screen(pipe->screen);
1079 struct svga_context *svga = svga_context(pipe);
1080 struct svga_query *sq = svga_query(q);
1081 uint64_t *result = (uint64_t *)vresult;
1082 boolean ret = TRUE;
Brian Paulcecbfce2013-04-18 16:09:27 -06001083
Brian Paule0542512015-08-13 11:00:58 -07001084 assert(sq);
1085
1086 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d wait: %d\n",
1087 __FUNCTION__, sq, sq->id, wait);
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001088
Brian Paul49ed1f32013-04-01 17:49:31 -06001089 switch (sq->type) {
1090 case PIPE_QUERY_OCCLUSION_COUNTER:
Brian Paule0542512015-08-13 11:00:58 -07001091 if (svga_have_vgpu10(svga)) {
1092 SVGADXOcclusionQueryResult occResult;
1093 ret = get_query_result_vgpu10(svga, sq, wait,
1094 (void *)&occResult, sizeof(occResult));
1095 *result = (uint64_t)occResult.samplesRendered;
1096 } else {
Brian Paul2318d202016-05-26 18:58:16 -06001097 ret = get_query_result_vgpu9(svga, sq, wait, result);
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001098 }
Brian Paul49ed1f32013-04-01 17:49:31 -06001099 break;
Nicolai Hähnle3f6b3d92017-09-12 18:46:46 +02001100 case PIPE_QUERY_OCCLUSION_PREDICATE:
1101 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
Charmaine Lee67693162016-04-19 18:12:17 -07001102 if (svga_have_vgpu10(svga)) {
1103 SVGADXOcclusionPredicateQueryResult occResult;
1104 ret = get_query_result_vgpu10(svga, sq, wait,
1105 (void *)&occResult, sizeof(occResult));
1106 vresult->b = occResult.anySamplesRendered != 0;
1107 } else {
Brian Paul55417142016-08-17 08:20:33 -06001108 uint64_t count = 0;
Brian Paul2318d202016-05-26 18:58:16 -06001109 ret = get_query_result_vgpu9(svga, sq, wait, &count);
Charmaine Lee67693162016-04-19 18:12:17 -07001110 vresult->b = count != 0;
1111 }
Brian Paule0542512015-08-13 11:00:58 -07001112 break;
1113 }
1114 case PIPE_QUERY_SO_STATISTICS: {
1115 SVGADXStreamOutStatisticsQueryResult sResult;
1116 struct pipe_query_data_so_statistics *pResult =
1117 (struct pipe_query_data_so_statistics *)vresult;
1118
1119 assert(svga_have_vgpu10(svga));
1120 ret = get_query_result_vgpu10(svga, sq, wait,
1121 (void *)&sResult, sizeof(sResult));
1122 pResult->num_primitives_written = sResult.numPrimitivesWritten;
1123 pResult->primitives_storage_needed = sResult.numPrimitivesRequired;
1124 break;
1125 }
1126 case PIPE_QUERY_TIMESTAMP: {
1127 SVGADXTimestampQueryResult sResult;
1128
1129 assert(svga_have_vgpu10(svga));
1130 ret = get_query_result_vgpu10(svga, sq, wait,
1131 (void *)&sResult, sizeof(sResult));
1132 *result = (uint64_t)sResult.timestamp;
1133 break;
1134 }
1135 case PIPE_QUERY_PRIMITIVES_GENERATED: {
1136 SVGADXStreamOutStatisticsQueryResult sResult;
1137
1138 assert(svga_have_vgpu10(svga));
1139 ret = get_query_result_vgpu10(svga, sq, wait,
1140 (void *)&sResult, sizeof sResult);
1141 *result = (uint64_t)sResult.numPrimitivesRequired;
1142 break;
1143 }
1144 case PIPE_QUERY_PRIMITIVES_EMITTED: {
1145 SVGADXStreamOutStatisticsQueryResult sResult;
1146
1147 assert(svga_have_vgpu10(svga));
1148 ret = get_query_result_vgpu10(svga, sq, wait,
1149 (void *)&sResult, sizeof sResult);
1150 *result = (uint64_t)sResult.numPrimitivesWritten;
1151 break;
1152 }
Neha Bhende9bc7e312015-10-09 16:10:16 -06001153 /* These are per-frame counters */
1154 case SVGA_QUERY_NUM_DRAW_CALLS:
1155 case SVGA_QUERY_NUM_FALLBACKS:
1156 case SVGA_QUERY_NUM_FLUSHES:
1157 case SVGA_QUERY_NUM_VALIDATIONS:
Brian Paul6fc8d902016-02-29 12:01:10 -07001158 case SVGA_QUERY_MAP_BUFFER_TIME:
Charmaine Leeee398142016-08-31 14:49:52 -07001159 case SVGA_QUERY_NUM_BUFFERS_MAPPED:
1160 case SVGA_QUERY_NUM_TEXTURES_MAPPED:
Brian Paul527466d2015-11-19 10:39:49 -07001161 case SVGA_QUERY_NUM_BYTES_UPLOADED:
Brian Paul192ee9a2016-02-29 14:26:12 -07001162 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
Brian Paul7e8cf342016-03-04 09:14:34 -07001163 case SVGA_QUERY_FLUSH_TIME:
Brian Paul3af78b42016-03-04 15:59:32 -07001164 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
Charmaine Lee79e343b2016-03-10 10:57:24 -08001165 case SVGA_QUERY_NUM_READBACKS:
Charmaine Lee0a1d91e2016-03-11 14:33:39 -08001166 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1167 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1168 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1169 case SVGA_QUERY_NUM_CONST_UPDATES:
Brian Paul3838eda2013-04-01 17:51:43 -06001170 vresult->u64 = sq->end_count - sq->begin_count;
1171 break;
Neha Bhende9bc7e312015-10-09 16:10:16 -06001172 /* These are running total counters */
Brian Paulac114c62013-04-03 10:23:57 -06001173 case SVGA_QUERY_MEMORY_USED:
Neha Bhende9bc7e312015-10-09 16:10:16 -06001174 vresult->u64 = svgascreen->hud.total_resource_bytes;
1175 break;
1176 case SVGA_QUERY_NUM_SHADERS:
1177 vresult->u64 = svga->hud.num_shaders;
1178 break;
1179 case SVGA_QUERY_NUM_RESOURCES:
1180 vresult->u64 = svgascreen->hud.num_resources;
1181 break;
1182 case SVGA_QUERY_NUM_STATE_OBJECTS:
Brian Paul464d6082016-04-15 15:30:34 -06001183 vresult->u64 = (svga->hud.num_blend_objects +
1184 svga->hud.num_depthstencil_objects +
1185 svga->hud.num_rasterizer_objects +
1186 svga->hud.num_sampler_objects +
1187 svga->hud.num_samplerview_objects +
1188 svga->hud.num_vertexelement_objects);
Neha Bhende9bc7e312015-10-09 16:10:16 -06001189 break;
1190 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1191 vresult->u64 = svga->hud.num_surface_views;
Brian Paulac114c62013-04-03 10:23:57 -06001192 break;
Charmaine Lee78e628a2015-12-21 11:07:08 -08001193 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1194 vresult->u64 = svga->hud.num_generate_mipmap;
1195 break;
Brian Paule3f5b8a2017-06-16 16:36:43 -06001196 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
1197 vresult->u64 = svgascreen->hud.num_failed_allocations;
1198 break;
Brian Paul49ed1f32013-04-01 17:49:31 -06001199 default:
1200 assert(!"unexpected query type in svga_get_query_result");
1201 }
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001202
Brian Paule0542512015-08-13 11:00:58 -07001203 SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, *((uint64_t *)vresult));
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001204
Brian Paule0542512015-08-13 11:00:58 -07001205 return ret;
1206}
1207
1208static void
1209svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
Brian Paulb0d39382017-03-05 13:07:28 -07001210 boolean condition, enum pipe_render_cond_flag mode)
Brian Paule0542512015-08-13 11:00:58 -07001211{
1212 struct svga_context *svga = svga_context(pipe);
1213 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
1214 struct svga_query *sq = svga_query(q);
1215 SVGA3dQueryId queryId;
1216 enum pipe_error ret;
1217
1218 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
1219
1220 assert(svga_have_vgpu10(svga));
1221 if (sq == NULL) {
1222 queryId = SVGA3D_INVALID_ID;
1223 }
1224 else {
1225 assert(sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION ||
1226 sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE);
1227
1228 if (sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION) {
1229 assert(sq->predicate);
1230 /**
1231 * For conditional rendering, make sure to use the associated
1232 * predicate query.
1233 */
1234 sq = svga_query(sq->predicate);
1235 }
1236 queryId = sq->id;
1237
1238 if ((mode == PIPE_RENDER_COND_WAIT ||
1239 mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
Sinclair Yeh65175df2017-05-03 11:48:25 -07001240 sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
1241 SVGA_FENCE_FLAG_QUERY);
Brian Paule0542512015-08-13 11:00:58 -07001242 }
1243 }
Neha Bhende79885132016-06-28 12:59:19 -07001244 /*
1245 * if the kernel module doesn't support the predication command,
1246 * we'll just render unconditionally.
1247 * This is probably acceptable for the typical case of occlusion culling.
1248 */
1249 if (sws->have_set_predication_cmd) {
Brian Paule0542512015-08-13 11:00:58 -07001250 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1251 (uint32) condition);
Neha Bhende79885132016-06-28 12:59:19 -07001252 if (ret != PIPE_OK) {
1253 svga_context_flush(svga, NULL);
1254 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1255 (uint32) condition);
1256 }
Thomas Hellstromd787ce72016-10-28 11:33:53 -07001257 svga->pred.query_id = queryId;
1258 svga->pred.cond = condition;
Brian Paule0542512015-08-13 11:00:58 -07001259 }
Neha Bhende03e1b7c2016-09-28 10:48:45 -07001260
1261 svga->render_condition = (sq != NULL);
Brian Paule0542512015-08-13 11:00:58 -07001262}
1263
1264
1265/*
1266 * This function is a workaround because we lack the ability to query
1267 * renderer's time synchornously.
1268 */
1269static uint64_t
1270svga_get_timestamp(struct pipe_context *pipe)
1271{
1272 struct pipe_query *q = svga_create_query(pipe, PIPE_QUERY_TIMESTAMP, 0);
1273 union pipe_query_result result;
1274
1275 svga_begin_query(pipe, q);
1276 svga_end_query(pipe,q);
1277 svga_get_query_result(pipe, q, TRUE, &result);
1278 svga_destroy_query(pipe, q);
1279
1280 return result.u64;
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001281}
1282
1283
Marek Olšák26171bd2016-04-08 01:42:00 +02001284static void
1285svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
1286{
1287}
1288
1289
Thomas Hellstrom4c3e8f12017-04-12 10:38:23 +02001290/**
1291 * \brief Toggle conditional rendering if already enabled
1292 *
1293 * \param svga[in] The svga context
1294 * \param render_condition_enabled[in] Whether to ignore requests to turn
1295 * conditional rendering off
1296 * \param on[in] Whether to turn conditional rendering on or off
1297 */
1298void
1299svga_toggle_render_condition(struct svga_context *svga,
1300 boolean render_condition_enabled,
1301 boolean on)
1302{
1303 SVGA3dQueryId query_id;
1304 enum pipe_error ret;
1305
1306 if (render_condition_enabled ||
1307 svga->pred.query_id == SVGA3D_INVALID_ID) {
1308 return;
1309 }
1310
1311 /*
1312 * If we get here, it means that the system supports
1313 * conditional rendering since svga->pred.query_id has already been
1314 * modified for this context and thus support has already been
1315 * verified.
1316 */
1317 query_id = on ? svga->pred.query_id : SVGA3D_INVALID_ID;
1318
1319 ret = SVGA3D_vgpu10_SetPredication(svga->swc, query_id,
1320 (uint32) svga->pred.cond);
1321 if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
1322 svga_context_flush(svga, NULL);
1323 ret = SVGA3D_vgpu10_SetPredication(svga->swc, query_id,
1324 (uint32) svga->pred.cond);
1325 assert(ret == PIPE_OK);
1326 }
1327}
1328
1329
Brian Paulcecbfce2013-04-18 16:09:27 -06001330void
1331svga_init_query_functions(struct svga_context *svga)
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001332{
1333 svga->pipe.create_query = svga_create_query;
1334 svga->pipe.destroy_query = svga_destroy_query;
1335 svga->pipe.begin_query = svga_begin_query;
1336 svga->pipe.end_query = svga_end_query;
1337 svga->pipe.get_query_result = svga_get_query_result;
Marek Olšák26171bd2016-04-08 01:42:00 +02001338 svga->pipe.set_active_query_state = svga_set_active_query_state;
Brian Paule0542512015-08-13 11:00:58 -07001339 svga->pipe.render_condition = svga_render_condition;
1340 svga->pipe.get_timestamp = svga_get_timestamp;
Jakob Bornecrantz31926332009-11-16 19:56:18 +01001341}