blob: a00914fffd2ea5d270fddbf6fe4242b49a8ec1db [file] [log] [blame]
Chia-I Wue54854a2014-08-05 10:23:50 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wue54854a2014-08-05 10:23:50 +080026 */
27
Chia-I Wu82d3d8b2014-08-09 13:07:44 +080028#include <stdarg.h>
Chia-I Wue54854a2014-08-05 10:23:50 +080029#include "kmd/winsys.h"
Chia-I Wude2bb862014-08-19 14:32:47 +080030#include "dispatch.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080031#include "gpu.h"
Chia-I Wue09b5362014-08-07 09:25:14 +080032#include "queue.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080033#include "dev.h"
34
Chia-I Wue54854a2014-08-05 10:23:50 +080035static XGL_RESULT dev_create_queues(struct intel_dev *dev,
36 const XGL_DEVICE_QUEUE_CREATE_INFO *queues,
37 XGL_UINT count)
38{
39 XGL_UINT i;
40
41 if (!count)
42 return XGL_ERROR_INVALID_POINTER;
43
44 for (i = 0; i < count; i++) {
45 const XGL_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
46 XGL_RESULT ret = XGL_SUCCESS;
47
Chia-I Wu9ae59c12014-08-07 10:08:49 +080048 if (q->queueNodeIndex < INTEL_GPU_ENGINE_COUNT &&
49 q->queueCount == 1 && !dev->queues[q->queueNodeIndex]) {
50 ret = intel_queue_create(dev, q->queueNodeIndex,
51 &dev->queues[q->queueNodeIndex]);
Chia-I Wue54854a2014-08-05 10:23:50 +080052 }
53 else {
Chia-I Wu9ae59c12014-08-07 10:08:49 +080054 ret = XGL_ERROR_INVALID_POINTER;
Chia-I Wue54854a2014-08-05 10:23:50 +080055 }
56
57 if (ret != XGL_SUCCESS) {
58 XGL_UINT j;
59 for (j = 0; j < i; j++)
Chia-I Wue09b5362014-08-07 09:25:14 +080060 intel_queue_destroy(dev->queues[j]);
Chia-I Wue54854a2014-08-05 10:23:50 +080061
62 return ret;
63 }
64 }
65
66 return XGL_SUCCESS;
67}
68
69XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
70 const XGL_DEVICE_CREATE_INFO *info,
71 struct intel_dev **dev_ret)
72{
Chia-I Wue54854a2014-08-05 10:23:50 +080073 struct intel_dev *dev;
Chia-I Wu1db76e02014-09-15 14:21:14 +080074 XGL_UINT i;
Chia-I Wue54854a2014-08-05 10:23:50 +080075 XGL_RESULT ret;
76
Chia-I Wuf07865e2014-09-15 13:52:21 +080077 if (gpu->device_fd >= 0)
Chia-I Wue54854a2014-08-05 10:23:50 +080078 return XGL_ERROR_DEVICE_ALREADY_CREATED;
79
Courtney Goeltzenleuchterfb4fb532014-08-14 09:35:21 -060080 dev = (struct intel_dev *) intel_base_create(NULL, sizeof(*dev),
Chia-I Wubbf2c932014-08-07 12:20:08 +080081 info->flags & XGL_DEVICE_CREATE_VALIDATION_BIT,
82 XGL_DBG_OBJECT_DEVICE, info, sizeof(struct intel_dev_dbg));
Chia-I Wue54854a2014-08-05 10:23:50 +080083 if (!dev)
84 return XGL_ERROR_OUT_OF_MEMORY;
85
Chia-I Wu1db76e02014-09-15 14:21:14 +080086 for (i = 0; i < info->extensionCount; i++) {
87 const enum intel_ext_type ext = intel_gpu_lookup_extension(gpu,
88 (const char *) info->ppEnabledExtensionNames[i]);
89
90 if (ext == INTEL_EXT_INVALID)
91 return XGL_ERROR_INVALID_EXTENSION;
92
93 dev->exts[ext] = true;
94 }
95
Chia-I Wue54854a2014-08-05 10:23:50 +080096 dev->gpu = gpu;
97
98 ret = intel_gpu_open(gpu);
99 if (ret != XGL_SUCCESS) {
100 intel_dev_destroy(dev);
101 return ret;
102 }
103
Chia-I Wuf07865e2014-09-15 13:52:21 +0800104 dev->winsys = intel_winsys_create_for_fd(gpu->device_fd);
Chia-I Wue54854a2014-08-05 10:23:50 +0800105 if (!dev->winsys) {
106 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
Chia-I Wuf07865e2014-09-15 13:52:21 +0800107 0, 0, "failed to create device winsys");
Chia-I Wue54854a2014-08-05 10:23:50 +0800108 intel_dev_destroy(dev);
109 return XGL_ERROR_UNKNOWN;
110 }
111
Chia-I Wu0b784442014-08-25 22:54:16 +0800112 dev->cmd_scratch_bo = intel_winsys_alloc_buffer(dev->winsys,
Chia-I Wu32a22462014-08-26 14:13:46 +0800113 "command buffer scratch", 4096, false);
Chia-I Wu0b784442014-08-25 22:54:16 +0800114 if (!dev->cmd_scratch_bo) {
115 intel_dev_destroy(dev);
116 return XGL_ERROR_OUT_OF_GPU_MEMORY;
117 }
118
Chia-I Wue54854a2014-08-05 10:23:50 +0800119 ret = dev_create_queues(dev, info->pRequestedQueues,
120 info->queueRecordCount);
121 if (ret != XGL_SUCCESS) {
122 intel_dev_destroy(dev);
123 return ret;
124 }
125
Chia-I Wue54854a2014-08-05 10:23:50 +0800126 *dev_ret = dev;
127
128 return XGL_SUCCESS;
129}
130
Chia-I Wubbf2c932014-08-07 12:20:08 +0800131static void dev_clear_msg_filters(struct intel_dev *dev)
132{
133 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
134 struct intel_dev_dbg_msg_filter *filter;
135
136 filter = dbg->filters;
137 while (filter) {
138 struct intel_dev_dbg_msg_filter *next = filter->next;
139 icd_free(filter);
140 filter = next;
141 }
142
143 dbg->filters = NULL;
144}
145
Chia-I Wue54854a2014-08-05 10:23:50 +0800146void intel_dev_destroy(struct intel_dev *dev)
147{
148 XGL_UINT i;
149
150 if (dev->base.dbg)
Chia-I Wubbf2c932014-08-07 12:20:08 +0800151 dev_clear_msg_filters(dev);
Chia-I Wue54854a2014-08-05 10:23:50 +0800152
153 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
154 if (dev->queues[i])
Chia-I Wue09b5362014-08-07 09:25:14 +0800155 intel_queue_destroy(dev->queues[i]);
Chia-I Wue54854a2014-08-05 10:23:50 +0800156 }
157
Chia-I Wu0b784442014-08-25 22:54:16 +0800158 if (dev->cmd_scratch_bo)
159 intel_bo_unreference(dev->cmd_scratch_bo);
160
Chia-I Wue54854a2014-08-05 10:23:50 +0800161 if (dev->winsys)
162 intel_winsys_destroy(dev->winsys);
163
Chia-I Wuf07865e2014-09-15 13:52:21 +0800164 if (dev->gpu->device_fd >= 0)
Chia-I Wue54854a2014-08-05 10:23:50 +0800165 intel_gpu_close(dev->gpu);
166
Chia-I Wubbf2c932014-08-07 12:20:08 +0800167 intel_base_destroy(&dev->base);
Chia-I Wue54854a2014-08-05 10:23:50 +0800168}
169
170void intel_dev_get_heap_props(const struct intel_dev *dev,
171 XGL_MEMORY_HEAP_PROPERTIES *props)
172{
173 props->structSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
174
175 props->heapMemoryType = XGL_HEAP_MEMORY_LOCAL;
176
177 props->heapSize = 0xffffffff; /* TODO system memory size */
178
179 props->pageSize = 4096;
180 props->flags = XGL_MEMORY_HEAP_CPU_VISIBLE_BIT |
181 XGL_MEMORY_HEAP_CPU_GPU_COHERENT_BIT |
182 XGL_MEMORY_HEAP_CPU_WRITE_COMBINED_BIT |
183 XGL_MEMORY_HEAP_HOLDS_PINNED_BIT |
184 XGL_MEMORY_HEAP_SHAREABLE_BIT;
185
186 props->gpuReadPerfRating = 100.0f;
187 props->gpuWritePerfRating = 100.0f;
188 props->cpuReadPerfRating = 10.0f;
189 props->cpuWritePerfRating = 80.0f;
190}
191
192XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
193 XGL_INT msg_code,
194 XGL_DBG_MSG_FILTER filter)
195{
196 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
197 struct intel_dev_dbg_msg_filter *f = dbg->filters;
198
199 assert(filter != XGL_DBG_MSG_FILTER_NONE);
200
201 while (f) {
202 if (f->msg_code == msg_code)
203 break;
204 f = f->next;
205 }
206
207 if (f) {
208 if (f->filter != filter) {
209 f->filter = filter;
210 f->triggered = false;
211 }
212 } else {
213 f = icd_alloc(sizeof(*f), 0, XGL_SYSTEM_ALLOC_DEBUG);
214 if (!f)
215 return XGL_ERROR_OUT_OF_MEMORY;
216
217 f->msg_code = msg_code;
218 f->filter = filter;
219 f->triggered = false;
220
221 f->next = dbg->filters;
222 dbg->filters = f;
223 }
224
225 return XGL_SUCCESS;
226}
227
228void intel_dev_remove_msg_filter(struct intel_dev *dev,
229 XGL_INT msg_code)
230{
231 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
232 struct intel_dev_dbg_msg_filter *f = dbg->filters, *prev = NULL;
233
234 while (f) {
235 if (f->msg_code == msg_code) {
236 if (prev)
237 prev->next = f->next;
238 else
239 dbg->filters = f->next;
240
241 icd_free(f);
242 break;
243 }
244
245 prev = f;
246 f = f->next;
247 }
248}
Chia-I Wua207aba2014-08-05 15:13:37 +0800249
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800250static bool dev_filter_msg(struct intel_dev *dev,
251 XGL_INT msg_code)
252{
253 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
254 struct intel_dev_dbg_msg_filter *filter;
255
256 if (!dbg)
257 return false;
258
259 filter = dbg->filters;
260 while (filter) {
261 if (filter->msg_code != msg_code) {
262 filter = filter->next;
263 continue;
264 }
265
266 if (filter->filter == XGL_DBG_MSG_FILTER_ALL)
267 return true;
268
269 if (filter->filter == XGL_DBG_MSG_FILTER_REPEATED &&
270 filter->triggered)
271 return true;
272
273 filter->triggered = true;
274 break;
275 }
276
277 return false;
278}
279
280void intel_dev_log(struct intel_dev *dev,
281 XGL_DBG_MSG_TYPE msg_type,
282 XGL_VALIDATION_LEVEL validation_level,
Chia-I Wuaabb3602014-08-19 14:18:23 +0800283 struct intel_base *src_object,
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800284 XGL_SIZE location,
285 XGL_INT msg_code,
286 const char *format, ...)
287{
288 va_list ap;
289
290 if (dev_filter_msg(dev, msg_code))
291 return;
292
293 va_start(ap, format);
Chia-I Wuaabb3602014-08-19 14:18:23 +0800294 icd_vlog(msg_type, validation_level, (XGL_BASE_OBJECT) src_object,
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800295 location, msg_code, format, ap);
296 va_end(ap);
297}
298
Chia-I Wua207aba2014-08-05 15:13:37 +0800299XGL_RESULT XGLAPI intelCreateDevice(
300 XGL_PHYSICAL_GPU gpu_,
301 const XGL_DEVICE_CREATE_INFO* pCreateInfo,
302 XGL_DEVICE* pDevice)
303{
304 struct intel_gpu *gpu = intel_gpu(gpu_);
305
306 return intel_dev_create(gpu, pCreateInfo, (struct intel_dev **) pDevice);
307}
308
309XGL_RESULT XGLAPI intelDestroyDevice(
310 XGL_DEVICE device)
311{
312 struct intel_dev *dev = intel_dev(device);
313
314 intel_dev_destroy(dev);
315
316 return XGL_SUCCESS;
317}
318
319XGL_RESULT XGLAPI intelGetMemoryHeapCount(
320 XGL_DEVICE device,
321 XGL_UINT* pCount)
322{
323 *pCount = 1;
324 return XGL_SUCCESS;
325}
326
327XGL_RESULT XGLAPI intelGetMemoryHeapInfo(
328 XGL_DEVICE device,
329 XGL_UINT heapId,
330 XGL_MEMORY_HEAP_INFO_TYPE infoType,
331 XGL_SIZE* pDataSize,
332 XGL_VOID* pData)
333{
334 struct intel_dev *dev = intel_dev(device);
335
336 intel_dev_get_heap_props(dev, pData);
337 *pDataSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
338
339 return XGL_SUCCESS;
340}
Chia-I Wu49dbee82014-08-06 12:48:47 +0800341
342XGL_RESULT XGLAPI intelGetDeviceQueue(
343 XGL_DEVICE device,
344 XGL_QUEUE_TYPE queueType,
345 XGL_UINT queueIndex,
346 XGL_QUEUE* pQueue)
347{
348 struct intel_dev *dev = intel_dev(device);
349
350 switch (queueType) {
351 case XGL_QUEUE_TYPE_GRAPHICS:
352 case XGL_QUEUE_TYPE_COMPUTE:
353 if (queueIndex > 0)
354 return XGL_ERROR_UNAVAILABLE;
355 *pQueue = dev->queues[INTEL_GPU_ENGINE_3D];
356 return XGL_SUCCESS;
357 case XGL_QUEUE_TYPE_DMA:
358 default:
359 return XGL_ERROR_UNAVAILABLE;
360 }
361}
362
Chia-I Wu49dbee82014-08-06 12:48:47 +0800363XGL_RESULT XGLAPI intelDeviceWaitIdle(
364 XGL_DEVICE device)
365{
366 struct intel_dev *dev = intel_dev(device);
367 XGL_RESULT ret = XGL_SUCCESS;
368 XGL_UINT i;
369
370 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
371 if (dev->queues[i]) {
Chia-I Wue09b5362014-08-07 09:25:14 +0800372 const XGL_RESULT r = intel_queue_wait(dev->queues[i], -1);
Chia-I Wu49dbee82014-08-06 12:48:47 +0800373 if (r != XGL_SUCCESS)
374 ret = r;
375 }
376 }
377
378 return ret;
379}
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800380
381XGL_RESULT XGLAPI intelDbgSetValidationLevel(
382 XGL_DEVICE device,
383 XGL_VALIDATION_LEVEL validationLevel)
384{
385 struct intel_dev *dev = intel_dev(device);
Chia-I Wu069f30f2014-08-21 13:45:20 +0800386 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800387
Chia-I Wu069f30f2014-08-21 13:45:20 +0800388 if (dbg)
389 dbg->validation_level = validationLevel;
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800390
391 return XGL_SUCCESS;
392}
393
394XGL_RESULT XGLAPI intelDbgSetMessageFilter(
395 XGL_DEVICE device,
396 XGL_INT msgCode,
397 XGL_DBG_MSG_FILTER filter)
398{
399 struct intel_dev *dev = intel_dev(device);
400
401 if (!dev->base.dbg)
402 return XGL_SUCCESS;
403
404 if (filter == XGL_DBG_MSG_FILTER_NONE) {
405 intel_dev_remove_msg_filter(dev, msgCode);
406 return XGL_SUCCESS;
407 }
408
409 return intel_dev_add_msg_filter(dev, msgCode, filter);
410}
411
412XGL_RESULT XGLAPI intelDbgSetDeviceOption(
413 XGL_DEVICE device,
414 XGL_DBG_DEVICE_OPTION dbgOption,
415 XGL_SIZE dataSize,
416 const XGL_VOID* pData)
417{
418 struct intel_dev *dev = intel_dev(device);
Chia-I Wu069f30f2014-08-21 13:45:20 +0800419 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800420 XGL_RESULT ret = XGL_SUCCESS;
421
422 if (dataSize == 0)
423 return XGL_ERROR_INVALID_VALUE;
424
425 switch (dbgOption) {
426 case XGL_DBG_OPTION_DISABLE_PIPELINE_LOADS:
Chia-I Wu069f30f2014-08-21 13:45:20 +0800427 if (dbg)
428 dbg->disable_pipeline_loads = *((const bool *) pData);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800429 break;
430 case XGL_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS:
Chia-I Wu069f30f2014-08-21 13:45:20 +0800431 if (dbg)
432 dbg->force_object_memory_reqs = *((const bool *) pData);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800433 break;
434 case XGL_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT:
Chia-I Wu069f30f2014-08-21 13:45:20 +0800435 if (dbg)
436 dbg->force_large_image_alignment = *((const bool *) pData);
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800437 break;
438 default:
439 ret = XGL_ERROR_INVALID_VALUE;
440 break;
441 }
442
443 return ret;
444}