blob: e068a5c4d64c7a6f17a5526fe501a2315d0c2476 [file] [log] [blame]
Chia-I Wue54854a2014-08-05 10:23:50 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "kmd/winsys.h"
26#include "dispatch_tables.h"
27#include "gpu.h"
Chia-I Wue09b5362014-08-07 09:25:14 +080028#include "queue.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080029#include "dev.h"
30
Chia-I Wue54854a2014-08-05 10:23:50 +080031static struct intel_dev_dbg *dev_dbg_create(const XGL_DEVICE_CREATE_INFO *info)
32{
33 struct intel_dev_dbg *dbg;
34
35 dbg = icd_alloc(sizeof(*dbg), 0, XGL_SYSTEM_ALLOC_DEBUG);
36 if (!dbg)
37 return NULL;
38
39 memset(dbg, 0, sizeof(*dbg));
40
41 if (!intel_base_dbg_init(&dbg->base, XGL_DBG_OBJECT_DEVICE,
42 info, sizeof(*info))) {
43 icd_free(dbg);
44 return NULL;
45 }
46
47 return dbg;
48}
49
50static void dev_dbg_destroy(struct intel_dev_dbg *dbg)
51{
52 struct intel_dev_dbg_msg_filter *filter;
53
54 filter = dbg->filters;
55 while (filter) {
56 struct intel_dev_dbg_msg_filter *next = filter->next;
57 icd_free(filter);
58 filter = next;
59 }
60
61 intel_base_dbg_cleanup(&dbg->base);
62 icd_free(dbg);
63}
64
65static XGL_RESULT dev_create_queues(struct intel_dev *dev,
66 const XGL_DEVICE_QUEUE_CREATE_INFO *queues,
67 XGL_UINT count)
68{
69 XGL_UINT i;
70
71 if (!count)
72 return XGL_ERROR_INVALID_POINTER;
73
74 for (i = 0; i < count; i++) {
75 const XGL_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
76 XGL_RESULT ret = XGL_SUCCESS;
77
Chia-I Wu9ae59c12014-08-07 10:08:49 +080078 if (q->queueNodeIndex < INTEL_GPU_ENGINE_COUNT &&
79 q->queueCount == 1 && !dev->queues[q->queueNodeIndex]) {
80 ret = intel_queue_create(dev, q->queueNodeIndex,
81 &dev->queues[q->queueNodeIndex]);
Chia-I Wue54854a2014-08-05 10:23:50 +080082 }
83 else {
Chia-I Wu9ae59c12014-08-07 10:08:49 +080084 ret = XGL_ERROR_INVALID_POINTER;
Chia-I Wue54854a2014-08-05 10:23:50 +080085 }
86
87 if (ret != XGL_SUCCESS) {
88 XGL_UINT j;
89 for (j = 0; j < i; j++)
Chia-I Wue09b5362014-08-07 09:25:14 +080090 intel_queue_destroy(dev->queues[j]);
Chia-I Wue54854a2014-08-05 10:23:50 +080091
92 return ret;
93 }
94 }
95
96 return XGL_SUCCESS;
97}
98
99XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
100 const XGL_DEVICE_CREATE_INFO *info,
101 struct intel_dev **dev_ret)
102{
103 const struct icd_dispatch_table *dispatch;
104 struct intel_dev_dbg *dbg;
105 struct intel_dev *dev;
106 XGL_RESULT ret;
107
108 if (info->extensionCount)
109 return XGL_ERROR_INVALID_EXTENSION;
110
111 if (gpu->fd >= 0)
112 return XGL_ERROR_DEVICE_ALREADY_CREATED;
113
114 dev = icd_alloc(sizeof(*dev), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
115 if (!dev)
116 return XGL_ERROR_OUT_OF_MEMORY;
117
118 memset(dev, 0, sizeof(*dev));
119 dev->gpu = gpu;
120
121 ret = intel_gpu_open(gpu);
122 if (ret != XGL_SUCCESS) {
123 intel_dev_destroy(dev);
124 return ret;
125 }
126
127 dev->winsys = intel_winsys_create_for_fd(gpu->fd);
128 if (!dev->winsys) {
129 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
130 0, 0, "failed to create device winsys for %s", gpu->path);
131 intel_dev_destroy(dev);
132 return XGL_ERROR_UNKNOWN;
133 }
134
135 ret = dev_create_queues(dev, info->pRequestedQueues,
136 info->queueRecordCount);
137 if (ret != XGL_SUCCESS) {
138 intel_dev_destroy(dev);
139 return ret;
140 }
141
142 if (info->flags & XGL_DEVICE_CREATE_VALIDATION_BIT) {
143 dispatch = &intel_debug_dispatch_table;
144 dbg = dev_dbg_create(info);
145
146 if (!dbg) {
147 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0,
148 XGL_NULL_HANDLE, 0, 0,
149 "failed to create device debug layer for %s", gpu->path);
150 return XGL_ERROR_OUT_OF_MEMORY;
151 }
152 } else {
153 dispatch = &intel_normal_dispatch_table;
154 dbg = NULL;
155 }
156
157 dev->base.dispatch = dispatch;
158 dev->base.dbg = &dbg->base;
159
160 *dev_ret = dev;
161
162 return XGL_SUCCESS;
163}
164
165void intel_dev_destroy(struct intel_dev *dev)
166{
167 XGL_UINT i;
168
169 if (dev->base.dbg)
170 dev_dbg_destroy((struct intel_dev_dbg *) dev->base.dbg);
171
172 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
173 if (dev->queues[i])
Chia-I Wue09b5362014-08-07 09:25:14 +0800174 intel_queue_destroy(dev->queues[i]);
Chia-I Wue54854a2014-08-05 10:23:50 +0800175 }
176
177 if (dev->winsys)
178 intel_winsys_destroy(dev->winsys);
179
180 if (dev->gpu->fd >= 0)
181 intel_gpu_close(dev->gpu);
182
183 icd_free(dev);
184}
185
186void intel_dev_get_heap_props(const struct intel_dev *dev,
187 XGL_MEMORY_HEAP_PROPERTIES *props)
188{
189 props->structSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
190
191 props->heapMemoryType = XGL_HEAP_MEMORY_LOCAL;
192
193 props->heapSize = 0xffffffff; /* TODO system memory size */
194
195 props->pageSize = 4096;
196 props->flags = XGL_MEMORY_HEAP_CPU_VISIBLE_BIT |
197 XGL_MEMORY_HEAP_CPU_GPU_COHERENT_BIT |
198 XGL_MEMORY_HEAP_CPU_WRITE_COMBINED_BIT |
199 XGL_MEMORY_HEAP_HOLDS_PINNED_BIT |
200 XGL_MEMORY_HEAP_SHAREABLE_BIT;
201
202 props->gpuReadPerfRating = 100.0f;
203 props->gpuWritePerfRating = 100.0f;
204 props->cpuReadPerfRating = 10.0f;
205 props->cpuWritePerfRating = 80.0f;
206}
207
208XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
209 XGL_INT msg_code,
210 XGL_DBG_MSG_FILTER filter)
211{
212 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
213 struct intel_dev_dbg_msg_filter *f = dbg->filters;
214
215 assert(filter != XGL_DBG_MSG_FILTER_NONE);
216
217 while (f) {
218 if (f->msg_code == msg_code)
219 break;
220 f = f->next;
221 }
222
223 if (f) {
224 if (f->filter != filter) {
225 f->filter = filter;
226 f->triggered = false;
227 }
228 } else {
229 f = icd_alloc(sizeof(*f), 0, XGL_SYSTEM_ALLOC_DEBUG);
230 if (!f)
231 return XGL_ERROR_OUT_OF_MEMORY;
232
233 f->msg_code = msg_code;
234 f->filter = filter;
235 f->triggered = false;
236
237 f->next = dbg->filters;
238 dbg->filters = f;
239 }
240
241 return XGL_SUCCESS;
242}
243
244void intel_dev_remove_msg_filter(struct intel_dev *dev,
245 XGL_INT msg_code)
246{
247 struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
248 struct intel_dev_dbg_msg_filter *f = dbg->filters, *prev = NULL;
249
250 while (f) {
251 if (f->msg_code == msg_code) {
252 if (prev)
253 prev->next = f->next;
254 else
255 dbg->filters = f->next;
256
257 icd_free(f);
258 break;
259 }
260
261 prev = f;
262 f = f->next;
263 }
264}
Chia-I Wua207aba2014-08-05 15:13:37 +0800265
266XGL_RESULT XGLAPI intelCreateDevice(
267 XGL_PHYSICAL_GPU gpu_,
268 const XGL_DEVICE_CREATE_INFO* pCreateInfo,
269 XGL_DEVICE* pDevice)
270{
271 struct intel_gpu *gpu = intel_gpu(gpu_);
272
273 return intel_dev_create(gpu, pCreateInfo, (struct intel_dev **) pDevice);
274}
275
276XGL_RESULT XGLAPI intelDestroyDevice(
277 XGL_DEVICE device)
278{
279 struct intel_dev *dev = intel_dev(device);
280
281 intel_dev_destroy(dev);
282
283 return XGL_SUCCESS;
284}
285
286XGL_RESULT XGLAPI intelGetMemoryHeapCount(
287 XGL_DEVICE device,
288 XGL_UINT* pCount)
289{
290 *pCount = 1;
291 return XGL_SUCCESS;
292}
293
294XGL_RESULT XGLAPI intelGetMemoryHeapInfo(
295 XGL_DEVICE device,
296 XGL_UINT heapId,
297 XGL_MEMORY_HEAP_INFO_TYPE infoType,
298 XGL_SIZE* pDataSize,
299 XGL_VOID* pData)
300{
301 struct intel_dev *dev = intel_dev(device);
302
303 intel_dev_get_heap_props(dev, pData);
304 *pDataSize = sizeof(XGL_MEMORY_HEAP_PROPERTIES);
305
306 return XGL_SUCCESS;
307}
Chia-I Wu49dbee82014-08-06 12:48:47 +0800308
309XGL_RESULT XGLAPI intelGetDeviceQueue(
310 XGL_DEVICE device,
311 XGL_QUEUE_TYPE queueType,
312 XGL_UINT queueIndex,
313 XGL_QUEUE* pQueue)
314{
315 struct intel_dev *dev = intel_dev(device);
316
317 switch (queueType) {
318 case XGL_QUEUE_TYPE_GRAPHICS:
319 case XGL_QUEUE_TYPE_COMPUTE:
320 if (queueIndex > 0)
321 return XGL_ERROR_UNAVAILABLE;
322 *pQueue = dev->queues[INTEL_GPU_ENGINE_3D];
323 return XGL_SUCCESS;
324 case XGL_QUEUE_TYPE_DMA:
325 default:
326 return XGL_ERROR_UNAVAILABLE;
327 }
328}
329
Chia-I Wu49dbee82014-08-06 12:48:47 +0800330XGL_RESULT XGLAPI intelDeviceWaitIdle(
331 XGL_DEVICE device)
332{
333 struct intel_dev *dev = intel_dev(device);
334 XGL_RESULT ret = XGL_SUCCESS;
335 XGL_UINT i;
336
337 for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
338 if (dev->queues[i]) {
Chia-I Wue09b5362014-08-07 09:25:14 +0800339 const XGL_RESULT r = intel_queue_wait(dev->queues[i], -1);
Chia-I Wu49dbee82014-08-06 12:48:47 +0800340 if (r != XGL_SUCCESS)
341 ret = r;
342 }
343 }
344
345 return ret;
346}