blob: ed79469b69aa8ccfbae7f97530a82312dd191a85 [file] [log] [blame]
Chia-I Wue54854a2014-08-05 10:23:50 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "kmd/winsys.h"
26#include "dispatch_tables.h"
27#include "gpu.h"
28#include "dev.h"
29
30static struct intel_queue *queue_create(struct intel_dev *dev,
31 XGL_QUEUE_TYPE type)
32{
33 struct intel_queue *queue;
34
35 queue = icd_alloc(sizeof(*queue), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
36 if (!queue)
37 return NULL;
38
39 memset(queue, 0, sizeof(*queue));
40 queue->dev = dev;
41
42 queue->base.dispatch = dev->base.dispatch;
43 if (dev->base.dbg) {
44 queue->base.dbg =
45 intel_base_dbg_create(XGL_DBG_OBJECT_QUEUE, NULL, 0);
46 if (!queue->base.dbg) {
47 icd_free(queue);
48 return NULL;
49 }
50 }
51
52 return queue;
53}
54
55static void queue_destroy(struct intel_queue *queue)
56{
57 if (queue->base.dbg)
58 intel_base_dbg_destroy(queue->base.dbg);
59 icd_free(queue);
60}
61
Chia-I Wu49dbee82014-08-06 12:48:47 +080062static XGL_RESULT queue_wait(struct intel_queue *queue, int64_t timeout)
63{
64 struct intel_bo *bo = queue->last_submitted_bo;
65
66 return (!bo || intel_bo_wait(bo, timeout) == 0) ?
67 XGL_SUCCESS : XGL_ERROR_UNKNOWN;
68}
69
Chia-I Wue54854a2014-08-05 10:23:50 +080070static struct intel_dev_dbg *dev_dbg_create(const XGL_DEVICE_CREATE_INFO *info)
71{
72 struct intel_dev_dbg *dbg;
73
74 dbg = icd_alloc(sizeof(*dbg), 0, XGL_SYSTEM_ALLOC_DEBUG);
75 if (!dbg)
76 return NULL;
77
78 memset(dbg, 0, sizeof(*dbg));
79
80 if (!intel_base_dbg_init(&dbg->base, XGL_DBG_OBJECT_DEVICE,
81 info, sizeof(*info))) {
82 icd_free(dbg);
83 return NULL;
84 }
85
86 return dbg;
87}
88
89static void dev_dbg_destroy(struct intel_dev_dbg *dbg)
90{
91 struct intel_dev_dbg_msg_filter *filter;
92
93 filter = dbg->filters;
94 while (filter) {
95 struct intel_dev_dbg_msg_filter *next = filter->next;
96 icd_free(filter);
97 filter = next;
98 }
99
100 intel_base_dbg_cleanup(&dbg->base);
101 icd_free(dbg);
102}
103
104static XGL_RESULT dev_create_queues(struct intel_dev *dev,
105 const XGL_DEVICE_QUEUE_CREATE_INFO *queues,
106 XGL_UINT count)
107{
108 XGL_UINT i;
109
110 if (!count)
111 return XGL_ERROR_INVALID_POINTER;
112
113 for (i = 0; i < count; i++) {
114 const XGL_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
115 XGL_RESULT ret = XGL_SUCCESS;
116
117 if (q->queueNodeIndex >= INTEL_GPU_ENGINE_COUNT ||
118 q->queueCount > 1 ||
119 dev->queues[q->queueNodeIndex]) {
120 ret = XGL_ERROR_INVALID_POINTER;
121 }
122 else {
123 dev->queues[q->queueNodeIndex] =
124 queue_create(dev, q->queueNodeIndex);
125 if (!dev->queues[q->queueNodeIndex])
126 ret = XGL_ERROR_OUT_OF_MEMORY;
127 }
128
129 if (ret != XGL_SUCCESS) {
130 XGL_UINT j;
131 for (j = 0; j < i; j++)
132 queue_destroy(dev->queues[j]);
133
134 return ret;
135 }
136 }
137
138 return XGL_SUCCESS;
139}
140
141XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
142 const XGL_DEVICE_CREATE_INFO *info,
143 struct intel_dev **dev_ret)
144{
145 const struct icd_dispatch_table *dispatch;
146 struct intel_dev_dbg *dbg;
147 struct intel_dev *dev;
148 XGL_RESULT ret;
149
150 if (info->extensionCount)
151 return XGL_ERROR_INVALID_EXTENSION;
152
153 if (gpu->fd >= 0)
154 return XGL_ERROR_DEVICE_ALREADY_CREATED;
155
156 dev = icd_alloc(sizeof(*dev), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
157 if (!dev)
158 return XGL_ERROR_OUT_OF_MEMORY;
159
160 memset(dev, 0, sizeof(*dev));
161 dev->gpu = gpu;
162
163 ret = intel_gpu_open(gpu);
164 if (ret != XGL_SUCCESS) {
165 intel_dev_destroy(dev);
166 return ret;
167 }
168
169 dev->winsys = intel_winsys_create_for_fd(gpu->fd);
170 if (!dev->winsys) {
171 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
172 0, 0, "failed to create device winsys for %s", gpu->path);
173 intel_dev_destroy(dev);
174 return XGL_ERROR_UNKNOWN;
175 }
176
177 ret = dev_create_queues(dev, info->pRequestedQueues,
178 info->queueRecordCount);
179 if (ret != XGL_SUCCESS) {
180 intel_dev_destroy(dev);
181 return ret;
182 }
183
184 if (info->flags & XGL_DEVICE_CREATE_VALIDATION_BIT) {
185 dispatch = &intel_debug_dispatch_table;
186 dbg = dev_dbg_create(info);
187
188 if (!dbg) {
189 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0,
190 XGL_NULL_HANDLE, 0, 0,
191 "failed to create device debug layer for %s", gpu->path);
192 return XGL_ERROR_OUT_OF_MEMORY;
193 }
194 } else {
195 dispatch = &intel_normal_dispatch_table;
196 dbg = NULL;
197 }
198
199 dev->base.dispatch = dispatch;
200 dev->base.dbg = &dbg->base;
201
202 *dev_ret = dev;
203
204 return XGL_SUCCESS;
205}
206
207void intel_dev_destroy(struct intel_dev *dev)
208{
209 XGL_UINT i;
210
211 if (dev->base.dbg)
212 dev_dbg_destroy((struct intel_dev_dbg *) dev->base.dbg);
213
214 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
215 if (dev->queues[i])
216 queue_destroy(dev->queues[i]);
217 }
218
219 if (dev->winsys)
220 intel_winsys_destroy(dev->winsys);
221
222 if (dev->gpu->fd >= 0)
223 intel_gpu_close(dev->gpu);
224
225 icd_free(dev);
226}
227
228void intel_dev_get_heap_props(const struct intel_dev *dev,
229 XGL_MEMORY_HEAP_PROPERTIES *props)
230{
231 props->structSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
232
233 props->heapMemoryType = XGL_HEAP_MEMORY_LOCAL;
234
235 props->heapSize = 0xffffffff; /* TODO system memory size */
236
237 props->pageSize = 4096;
238 props->flags = XGL_MEMORY_HEAP_CPU_VISIBLE_BIT |
239 XGL_MEMORY_HEAP_CPU_GPU_COHERENT_BIT |
240 XGL_MEMORY_HEAP_CPU_WRITE_COMBINED_BIT |
241 XGL_MEMORY_HEAP_HOLDS_PINNED_BIT |
242 XGL_MEMORY_HEAP_SHAREABLE_BIT;
243
244 props->gpuReadPerfRating = 100.0f;
245 props->gpuWritePerfRating = 100.0f;
246 props->cpuReadPerfRating = 10.0f;
247 props->cpuWritePerfRating = 80.0f;
248}
249
250XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
251 XGL_INT msg_code,
252 XGL_DBG_MSG_FILTER filter)
253{
254 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
255 struct intel_dev_dbg_msg_filter *f = dbg->filters;
256
257 assert(filter != XGL_DBG_MSG_FILTER_NONE);
258
259 while (f) {
260 if (f->msg_code == msg_code)
261 break;
262 f = f->next;
263 }
264
265 if (f) {
266 if (f->filter != filter) {
267 f->filter = filter;
268 f->triggered = false;
269 }
270 } else {
271 f = icd_alloc(sizeof(*f), 0, XGL_SYSTEM_ALLOC_DEBUG);
272 if (!f)
273 return XGL_ERROR_OUT_OF_MEMORY;
274
275 f->msg_code = msg_code;
276 f->filter = filter;
277 f->triggered = false;
278
279 f->next = dbg->filters;
280 dbg->filters = f;
281 }
282
283 return XGL_SUCCESS;
284}
285
286void intel_dev_remove_msg_filter(struct intel_dev *dev,
287 XGL_INT msg_code)
288{
289 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
290 struct intel_dev_dbg_msg_filter *f = dbg->filters, *prev = NULL;
291
292 while (f) {
293 if (f->msg_code == msg_code) {
294 if (prev)
295 prev->next = f->next;
296 else
297 dbg->filters = f->next;
298
299 icd_free(f);
300 break;
301 }
302
303 prev = f;
304 f = f->next;
305 }
306}
Chia-I Wua207aba2014-08-05 15:13:37 +0800307
308XGL_RESULT XGLAPI intelCreateDevice(
309 XGL_PHYSICAL_GPU gpu_,
310 const XGL_DEVICE_CREATE_INFO* pCreateInfo,
311 XGL_DEVICE* pDevice)
312{
313 struct intel_gpu *gpu = intel_gpu(gpu_);
314
315 return intel_dev_create(gpu, pCreateInfo, (struct intel_dev **) pDevice);
316}
317
318XGL_RESULT XGLAPI intelDestroyDevice(
319 XGL_DEVICE device)
320{
321 struct intel_dev *dev = intel_dev(device);
322
323 intel_dev_destroy(dev);
324
325 return XGL_SUCCESS;
326}
327
328XGL_RESULT XGLAPI intelGetMemoryHeapCount(
329 XGL_DEVICE device,
330 XGL_UINT* pCount)
331{
332 *pCount = 1;
333 return XGL_SUCCESS;
334}
335
336XGL_RESULT XGLAPI intelGetMemoryHeapInfo(
337 XGL_DEVICE device,
338 XGL_UINT heapId,
339 XGL_MEMORY_HEAP_INFO_TYPE infoType,
340 XGL_SIZE* pDataSize,
341 XGL_VOID* pData)
342{
343 struct intel_dev *dev = intel_dev(device);
344
345 intel_dev_get_heap_props(dev, pData);
346 *pDataSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
347
348 return XGL_SUCCESS;
349}
Chia-I Wu49dbee82014-08-06 12:48:47 +0800350
351XGL_RESULT XGLAPI intelGetDeviceQueue(
352 XGL_DEVICE device,
353 XGL_QUEUE_TYPE queueType,
354 XGL_UINT queueIndex,
355 XGL_QUEUE* pQueue)
356{
357 struct intel_dev *dev = intel_dev(device);
358
359 switch (queueType) {
360 case XGL_QUEUE_TYPE_GRAPHICS:
361 case XGL_QUEUE_TYPE_COMPUTE:
362 if (queueIndex > 0)
363 return XGL_ERROR_UNAVAILABLE;
364 *pQueue = dev->queues[INTEL_GPU_ENGINE_3D];
365 return XGL_SUCCESS;
366 case XGL_QUEUE_TYPE_DMA:
367 default:
368 return XGL_ERROR_UNAVAILABLE;
369 }
370}
371
372XGL_RESULT XGLAPI intelQueueSetGlobalMemReferences(
373 XGL_QUEUE queue,
374 XGL_UINT memRefCount,
375 const XGL_MEMORY_REF* pMemRefs)
376{
377 /*
378 * The winwys maintains the list of memory references. These are ignored
379 * until we move away from the winsys.
380 */
381 return XGL_SUCCESS;
382}
383
384XGL_RESULT XGLAPI intelQueueWaitIdle(
385 XGL_QUEUE queue_)
386{
387 struct intel_queue *queue = intel_queue(queue_);
388
389 return queue_wait(queue, -1);
390}
391
392XGL_RESULT XGLAPI intelDeviceWaitIdle(
393 XGL_DEVICE device)
394{
395 struct intel_dev *dev = intel_dev(device);
396 XGL_RESULT ret = XGL_SUCCESS;
397 XGL_UINT i;
398
399 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
400 if (dev->queues[i]) {
401 const XGL_RESULT r = queue_wait(dev->queues[i], -1);
402 if (r != XGL_SUCCESS)
403 ret = r;
404 }
405 }
406
407 return ret;
408}