blob: 9c3766f1543d4b0bb85f20004a00e2098374e551 [file] [log] [blame]
Chia-I Wue54854a2014-08-05 10:23:50 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wue54854a2014-08-05 10:23:50 +080026 */
27
Chia-I Wu82d3d8b2014-08-09 13:07:44 +080028#include <stdarg.h>
Chia-I Wue54854a2014-08-05 10:23:50 +080029#include "kmd/winsys.h"
Chia-I Wude2bb862014-08-19 14:32:47 +080030#include "dispatch.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080031#include "gpu.h"
Chia-I Wue09b5362014-08-07 09:25:14 +080032#include "queue.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080033#include "dev.h"
34
Chia-I Wue54854a2014-08-05 10:23:50 +080035static XGL_RESULT dev_create_queues(struct intel_dev *dev,
36 const XGL_DEVICE_QUEUE_CREATE_INFO *queues,
37 XGL_UINT count)
38{
39 XGL_UINT i;
40
41 if (!count)
42 return XGL_ERROR_INVALID_POINTER;
43
44 for (i = 0; i < count; i++) {
45 const XGL_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
46 XGL_RESULT ret = XGL_SUCCESS;
47
Chia-I Wu9ae59c12014-08-07 10:08:49 +080048 if (q->queueNodeIndex < INTEL_GPU_ENGINE_COUNT &&
49 q->queueCount == 1 && !dev->queues[q->queueNodeIndex]) {
50 ret = intel_queue_create(dev, q->queueNodeIndex,
51 &dev->queues[q->queueNodeIndex]);
Chia-I Wue54854a2014-08-05 10:23:50 +080052 }
53 else {
Chia-I Wu9ae59c12014-08-07 10:08:49 +080054 ret = XGL_ERROR_INVALID_POINTER;
Chia-I Wue54854a2014-08-05 10:23:50 +080055 }
56
57 if (ret != XGL_SUCCESS) {
58 XGL_UINT j;
59 for (j = 0; j < i; j++)
Chia-I Wue09b5362014-08-07 09:25:14 +080060 intel_queue_destroy(dev->queues[j]);
Chia-I Wue54854a2014-08-05 10:23:50 +080061
62 return ret;
63 }
64 }
65
66 return XGL_SUCCESS;
67}
68
69XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
70 const XGL_DEVICE_CREATE_INFO *info,
71 struct intel_dev **dev_ret)
72{
Chia-I Wue54854a2014-08-05 10:23:50 +080073 struct intel_dev *dev;
Chia-I Wu1db76e02014-09-15 14:21:14 +080074 XGL_UINT i;
Chia-I Wue54854a2014-08-05 10:23:50 +080075 XGL_RESULT ret;
76
Chia-I Wud8965932014-10-13 13:32:37 +080077 if (gpu->winsys)
Chia-I Wue54854a2014-08-05 10:23:50 +080078 return XGL_ERROR_DEVICE_ALREADY_CREATED;
79
Courtney Goeltzenleuchterfb4fb532014-08-14 09:35:21 -060080 dev = (struct intel_dev *) intel_base_create(NULL, sizeof(*dev),
Chia-I Wubbf2c932014-08-07 12:20:08 +080081 info->flags & XGL_DEVICE_CREATE_VALIDATION_BIT,
82 XGL_DBG_OBJECT_DEVICE, info, sizeof(struct intel_dev_dbg));
Chia-I Wue54854a2014-08-05 10:23:50 +080083 if (!dev)
84 return XGL_ERROR_OUT_OF_MEMORY;
85
Chia-I Wu1db76e02014-09-15 14:21:14 +080086 for (i = 0; i < info->extensionCount; i++) {
87 const enum intel_ext_type ext = intel_gpu_lookup_extension(gpu,
88 (const char *) info->ppEnabledExtensionNames[i]);
89
90 if (ext == INTEL_EXT_INVALID)
91 return XGL_ERROR_INVALID_EXTENSION;
92
93 dev->exts[ext] = true;
94 }
95
Chia-I Wue54854a2014-08-05 10:23:50 +080096 dev->gpu = gpu;
97
98 ret = intel_gpu_open(gpu);
99 if (ret != XGL_SUCCESS) {
100 intel_dev_destroy(dev);
101 return ret;
102 }
103
Chia-I Wud8965932014-10-13 13:32:37 +0800104 dev->winsys = gpu->winsys;
Chia-I Wue54854a2014-08-05 10:23:50 +0800105
Chia-I Wu0b784442014-08-25 22:54:16 +0800106 dev->cmd_scratch_bo = intel_winsys_alloc_buffer(dev->winsys,
Chia-I Wu32a22462014-08-26 14:13:46 +0800107 "command buffer scratch", 4096, false);
Chia-I Wu0b784442014-08-25 22:54:16 +0800108 if (!dev->cmd_scratch_bo) {
109 intel_dev_destroy(dev);
110 return XGL_ERROR_OUT_OF_GPU_MEMORY;
111 }
112
Chia-I Wue54854a2014-08-05 10:23:50 +0800113 ret = dev_create_queues(dev, info->pRequestedQueues,
114 info->queueRecordCount);
115 if (ret != XGL_SUCCESS) {
116 intel_dev_destroy(dev);
117 return ret;
118 }
119
Chia-I Wue54854a2014-08-05 10:23:50 +0800120 *dev_ret = dev;
121
122 return XGL_SUCCESS;
123}
124
Chia-I Wubbf2c932014-08-07 12:20:08 +0800125static void dev_clear_msg_filters(struct intel_dev *dev)
126{
127 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
128 struct intel_dev_dbg_msg_filter *filter;
129
130 filter = dbg->filters;
131 while (filter) {
132 struct intel_dev_dbg_msg_filter *next = filter->next;
133 icd_free(filter);
134 filter = next;
135 }
136
137 dbg->filters = NULL;
138}
139
Chia-I Wue54854a2014-08-05 10:23:50 +0800140void intel_dev_destroy(struct intel_dev *dev)
141{
Chia-I Wud8965932014-10-13 13:32:37 +0800142 struct intel_gpu *gpu = dev->gpu;
Chia-I Wue54854a2014-08-05 10:23:50 +0800143 XGL_UINT i;
144
145 if (dev->base.dbg)
Chia-I Wubbf2c932014-08-07 12:20:08 +0800146 dev_clear_msg_filters(dev);
Chia-I Wue54854a2014-08-05 10:23:50 +0800147
148 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
149 if (dev->queues[i])
Chia-I Wue09b5362014-08-07 09:25:14 +0800150 intel_queue_destroy(dev->queues[i]);
Chia-I Wue54854a2014-08-05 10:23:50 +0800151 }
152
Chia-I Wu0b784442014-08-25 22:54:16 +0800153 if (dev->cmd_scratch_bo)
154 intel_bo_unreference(dev->cmd_scratch_bo);
155
Chia-I Wubbf2c932014-08-07 12:20:08 +0800156 intel_base_destroy(&dev->base);
Chia-I Wud8965932014-10-13 13:32:37 +0800157
158 if (gpu->winsys)
159 intel_gpu_close(dev->gpu);
Chia-I Wue54854a2014-08-05 10:23:50 +0800160}
161
162void intel_dev_get_heap_props(const struct intel_dev *dev,
163 XGL_MEMORY_HEAP_PROPERTIES *props)
164{
165 props->structSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
166
167 props->heapMemoryType = XGL_HEAP_MEMORY_LOCAL;
168
169 props->heapSize = 0xffffffff; /* TODO system memory size */
170
171 props->pageSize = 4096;
172 props->flags = XGL_MEMORY_HEAP_CPU_VISIBLE_BIT |
173 XGL_MEMORY_HEAP_CPU_GPU_COHERENT_BIT |
174 XGL_MEMORY_HEAP_CPU_WRITE_COMBINED_BIT |
175 XGL_MEMORY_HEAP_HOLDS_PINNED_BIT |
176 XGL_MEMORY_HEAP_SHAREABLE_BIT;
177
178 props->gpuReadPerfRating = 100.0f;
179 props->gpuWritePerfRating = 100.0f;
180 props->cpuReadPerfRating = 10.0f;
181 props->cpuWritePerfRating = 80.0f;
182}
183
184XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
185 XGL_INT msg_code,
186 XGL_DBG_MSG_FILTER filter)
187{
188 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
189 struct intel_dev_dbg_msg_filter *f = dbg->filters;
190
191 assert(filter != XGL_DBG_MSG_FILTER_NONE);
192
193 while (f) {
194 if (f->msg_code == msg_code)
195 break;
196 f = f->next;
197 }
198
199 if (f) {
200 if (f->filter != filter) {
201 f->filter = filter;
202 f->triggered = false;
203 }
204 } else {
205 f = icd_alloc(sizeof(*f), 0, XGL_SYSTEM_ALLOC_DEBUG);
206 if (!f)
207 return XGL_ERROR_OUT_OF_MEMORY;
208
209 f->msg_code = msg_code;
210 f->filter = filter;
211 f->triggered = false;
212
213 f->next = dbg->filters;
214 dbg->filters = f;
215 }
216
217 return XGL_SUCCESS;
218}
219
220void intel_dev_remove_msg_filter(struct intel_dev *dev,
221 XGL_INT msg_code)
222{
223 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
224 struct intel_dev_dbg_msg_filter *f = dbg->filters, *prev = NULL;
225
226 while (f) {
227 if (f->msg_code == msg_code) {
228 if (prev)
229 prev->next = f->next;
230 else
231 dbg->filters = f->next;
232
233 icd_free(f);
234 break;
235 }
236
237 prev = f;
238 f = f->next;
239 }
240}
Chia-I Wua207aba2014-08-05 15:13:37 +0800241
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800242static bool dev_filter_msg(struct intel_dev *dev,
243 XGL_INT msg_code)
244{
245 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
246 struct intel_dev_dbg_msg_filter *filter;
247
248 if (!dbg)
249 return false;
250
251 filter = dbg->filters;
252 while (filter) {
253 if (filter->msg_code != msg_code) {
254 filter = filter->next;
255 continue;
256 }
257
258 if (filter->filter == XGL_DBG_MSG_FILTER_ALL)
259 return true;
260
261 if (filter->filter == XGL_DBG_MSG_FILTER_REPEATED &&
262 filter->triggered)
263 return true;
264
265 filter->triggered = true;
266 break;
267 }
268
269 return false;
270}
271
272void intel_dev_log(struct intel_dev *dev,
273 XGL_DBG_MSG_TYPE msg_type,
274 XGL_VALIDATION_LEVEL validation_level,
Chia-I Wuaabb3602014-08-19 14:18:23 +0800275 struct intel_base *src_object,
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800276 XGL_SIZE location,
277 XGL_INT msg_code,
278 const char *format, ...)
279{
280 va_list ap;
281
282 if (dev_filter_msg(dev, msg_code))
283 return;
284
285 va_start(ap, format);
Chia-I Wuaabb3602014-08-19 14:18:23 +0800286 icd_vlog(msg_type, validation_level, (XGL_BASE_OBJECT) src_object,
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800287 location, msg_code, format, ap);
288 va_end(ap);
289}
290
Chia-I Wua207aba2014-08-05 15:13:37 +0800291XGL_RESULT XGLAPI intelCreateDevice(
292 XGL_PHYSICAL_GPU gpu_,
293 const XGL_DEVICE_CREATE_INFO* pCreateInfo,
294 XGL_DEVICE* pDevice)
295{
296 struct intel_gpu *gpu = intel_gpu(gpu_);
297
298 return intel_dev_create(gpu, pCreateInfo, (struct intel_dev **) pDevice);
299}
300
301XGL_RESULT XGLAPI intelDestroyDevice(
302 XGL_DEVICE device)
303{
304 struct intel_dev *dev = intel_dev(device);
305
306 intel_dev_destroy(dev);
307
308 return XGL_SUCCESS;
309}
310
311XGL_RESULT XGLAPI intelGetMemoryHeapCount(
312 XGL_DEVICE device,
313 XGL_UINT* pCount)
314{
315 *pCount = 1;
316 return XGL_SUCCESS;
317}
318
319XGL_RESULT XGLAPI intelGetMemoryHeapInfo(
320 XGL_DEVICE device,
321 XGL_UINT heapId,
322 XGL_MEMORY_HEAP_INFO_TYPE infoType,
323 XGL_SIZE* pDataSize,
324 XGL_VOID* pData)
325{
326 struct intel_dev *dev = intel_dev(device);
327
328 intel_dev_get_heap_props(dev, pData);
329 *pDataSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
330
331 return XGL_SUCCESS;
332}
Chia-I Wu49dbee82014-08-06 12:48:47 +0800333
334XGL_RESULT XGLAPI intelGetDeviceQueue(
335 XGL_DEVICE device,
336 XGL_QUEUE_TYPE queueType,
337 XGL_UINT queueIndex,
338 XGL_QUEUE* pQueue)
339{
340 struct intel_dev *dev = intel_dev(device);
341
342 switch (queueType) {
343 case XGL_QUEUE_TYPE_GRAPHICS:
344 case XGL_QUEUE_TYPE_COMPUTE:
345 if (queueIndex > 0)
346 return XGL_ERROR_UNAVAILABLE;
347 *pQueue = dev->queues[INTEL_GPU_ENGINE_3D];
348 return XGL_SUCCESS;
349 case XGL_QUEUE_TYPE_DMA:
350 default:
351 return XGL_ERROR_UNAVAILABLE;
352 }
353}
354
Chia-I Wu49dbee82014-08-06 12:48:47 +0800355XGL_RESULT XGLAPI intelDeviceWaitIdle(
356 XGL_DEVICE device)
357{
358 struct intel_dev *dev = intel_dev(device);
359 XGL_RESULT ret = XGL_SUCCESS;
360 XGL_UINT i;
361
362 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
363 if (dev->queues[i]) {
Chia-I Wue09b5362014-08-07 09:25:14 +0800364 const XGL_RESULT r = intel_queue_wait(dev->queues[i], -1);
Chia-I Wu49dbee82014-08-06 12:48:47 +0800365 if (r != XGL_SUCCESS)
366 ret = r;
367 }
368 }
369
370 return ret;
371}
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800372
373XGL_RESULT XGLAPI intelDbgSetValidationLevel(
374 XGL_DEVICE device,
375 XGL_VALIDATION_LEVEL validationLevel)
376{
377 struct intel_dev *dev = intel_dev(device);
Chia-I Wu069f30f2014-08-21 13:45:20 +0800378 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800379
Chia-I Wu069f30f2014-08-21 13:45:20 +0800380 if (dbg)
381 dbg->validation_level = validationLevel;
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800382
383 return XGL_SUCCESS;
384}
385
386XGL_RESULT XGLAPI intelDbgSetMessageFilter(
387 XGL_DEVICE device,
388 XGL_INT msgCode,
389 XGL_DBG_MSG_FILTER filter)
390{
391 struct intel_dev *dev = intel_dev(device);
392
393 if (!dev->base.dbg)
394 return XGL_SUCCESS;
395
396 if (filter == XGL_DBG_MSG_FILTER_NONE) {
397 intel_dev_remove_msg_filter(dev, msgCode);
398 return XGL_SUCCESS;
399 }
400
401 return intel_dev_add_msg_filter(dev, msgCode, filter);
402}
403
404XGL_RESULT XGLAPI intelDbgSetDeviceOption(
405 XGL_DEVICE device,
406 XGL_DBG_DEVICE_OPTION dbgOption,
407 XGL_SIZE dataSize,
408 const XGL_VOID* pData)
409{
410 struct intel_dev *dev = intel_dev(device);
Chia-I Wu069f30f2014-08-21 13:45:20 +0800411 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800412 XGL_RESULT ret = XGL_SUCCESS;
413
414 if (dataSize == 0)
415 return XGL_ERROR_INVALID_VALUE;
416
417 switch (dbgOption) {
418 case XGL_DBG_OPTION_DISABLE_PIPELINE_LOADS:
Chia-I Wu069f30f2014-08-21 13:45:20 +0800419 if (dbg)
420 dbg->disable_pipeline_loads = *((const bool *) pData);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800421 break;
422 case XGL_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS:
Chia-I Wu069f30f2014-08-21 13:45:20 +0800423 if (dbg)
424 dbg->force_object_memory_reqs = *((const bool *) pData);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800425 break;
426 case XGL_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT:
Chia-I Wu069f30f2014-08-21 13:45:20 +0800427 if (dbg)
428 dbg->force_large_image_alignment = *((const bool *) pData);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800429 break;
430 default:
431 ret = XGL_ERROR_INVALID_VALUE;
432 break;
433 }
434
435 return ret;
436}