blob: 68820990decf5e595bbd46bae0be9a50b224cb0d [file] [log] [blame]
Chia-I Wu214dac62014-08-05 11:07:40 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <stdio.h>
26#include <sys/types.h>
27#include <sys/stat.h>
28#include <fcntl.h>
29#include <unistd.h>
30
31#include "genhw/genhw.h"
Chia-I Wude2bb862014-08-19 14:32:47 +080032#include "dispatch.h"
Chia-I Wuec841722014-08-25 22:36:01 +080033#include "queue.h"
Chia-I Wu214dac62014-08-05 11:07:40 +080034#include "gpu.h"
35
36static const char *gpu_get_name(const struct intel_gpu *gpu)
37{
38 const char *name = NULL;
39
40 if (gen_is_hsw(gpu->devid)) {
41 if (gen_is_desktop(gpu->devid))
42 name = "Intel(R) Haswell Desktop";
43 else if (gen_is_mobile(gpu->devid))
44 name = "Intel(R) Haswell Mobile";
45 else if (gen_is_server(gpu->devid))
46 name = "Intel(R) Haswell Server";
47 }
48 else if (gen_is_ivb(gpu->devid)) {
49 if (gen_is_desktop(gpu->devid))
50 name = "Intel(R) Ivybridge Desktop";
51 else if (gen_is_mobile(gpu->devid))
52 name = "Intel(R) Ivybridge Mobile";
53 else if (gen_is_server(gpu->devid))
54 name = "Intel(R) Ivybridge Server";
55 }
56 else if (gen_is_snb(gpu->devid)) {
57 if (gen_is_desktop(gpu->devid))
58 name = "Intel(R) Sandybridge Desktop";
59 else if (gen_is_mobile(gpu->devid))
60 name = "Intel(R) Sandybridge Mobile";
61 else if (gen_is_server(gpu->devid))
62 name = "Intel(R) Sandybridge Server";
63 }
64
65 if (!name)
66 name = "Unknown Intel Chipset";
67
68 return name;
69}
70
71static int gpu_open_internal(struct intel_gpu *gpu)
72{
73 if (gpu->fd_internal < 0) {
74 gpu->fd_internal = open(gpu->path, O_RDWR);
75 if (gpu->fd_internal < 0) {
76 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, NULL, 0,
77 0, "failed to open %s", gpu->path);
78 }
79 }
80
81 return gpu->fd_internal;
82}
83
84static void gpu_close_internal(struct intel_gpu *gpu)
85{
86 if (gpu->fd_internal >= 0) {
87 close(gpu->fd_internal);
88 gpu->fd_internal = -1;
89 }
90}
91
92static struct intel_gpu *gpu_create(int gen, int devid, const char *path)
93{
94 struct intel_gpu *gpu;
95 size_t path_len;
96
97 gpu = icd_alloc(sizeof(*gpu), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
98 if (!gpu)
99 return NULL;
100
101 memset(gpu, 0, sizeof(*gpu));
102
103 /* debug layer is always enabled for intel_gpu */
Chia-I Wu6a42c2a2014-08-19 14:36:47 +0800104 gpu->dispatch = intel_dispatch_get(true);
Chia-I Wu214dac62014-08-05 11:07:40 +0800105
106 gpu->devid = devid;
107
108 path_len = strlen(path);
109 gpu->path = icd_alloc(path_len + 1, 0, XGL_SYSTEM_ALLOC_INTERNAL);
110 if (!gpu->path) {
111 icd_free(gpu);
112 return NULL;
113 }
114 memcpy(gpu->path, path, path_len + 1);
115
116 gpu->gen_opaque = gen;
117
Chia-I Wu960f1952014-08-28 23:27:10 +0800118 switch (intel_gpu_gen(gpu)) {
119 case INTEL_GEN(7.5):
120 gpu->gt = gen_get_hsw_gt(devid);
121 break;
122 case INTEL_GEN(7):
123 gpu->gt = gen_get_ivb_gt(devid);
124 break;
125 case INTEL_GEN(6):
126 gpu->gt = gen_get_snb_gt(devid);
127 break;
128 }
129
Chia-I Wu214dac62014-08-05 11:07:40 +0800130 /* 8192 dwords */
Chia-I Wud6109bb2014-08-21 09:12:19 +0800131 gpu->max_batch_buffer_size = sizeof(uint32_t) * 8192;
132
133 /* the winsys is prepared for one reloc every two dwords, then minus 2 */
134 gpu->batch_buffer_reloc_count =
135 gpu->max_batch_buffer_size / sizeof(uint32_t) / 2 - 2;
Chia-I Wu214dac62014-08-05 11:07:40 +0800136
137 gpu->fd_internal = -1;
138 gpu->fd = -1;
139
140 return gpu;
141}
142
143static void gpu_destroy(struct intel_gpu *gpu)
144{
145 gpu_close_internal(gpu);
146 icd_free(gpu->path);
147 icd_free(gpu);
148}
149
150static struct intel_gpu *intel_gpus;
151
152/**
153 * Return true if \p gpu is a valid intel_gpu.
154 */
155bool intel_gpu_is_valid(const struct intel_gpu *gpu)
156{
157 const struct intel_gpu *iter = intel_gpus;
158
159 while (iter) {
160 if (iter == gpu)
161 return true;
162 iter = iter->next;
163 }
164
165 return false;
166}
167
168static int devid_to_gen(int devid)
169{
170 int gen;
171
172 if (gen_is_hsw(devid))
173 gen = INTEL_GEN(7.5);
174 else if (gen_is_ivb(devid))
175 gen = INTEL_GEN(7);
176 else if (gen_is_snb(devid))
177 gen = INTEL_GEN(6);
178 else
179 gen = -1;
180
Chia-I Wubfce58e2014-08-28 23:23:33 +0800181#ifdef INTEL_GEN_SPECIALIZED
182 if (gen != INTEL_GEN(INTEL_GEN_SPECIALIZED))
183 gen = -1;
184#endif
185
Chia-I Wu214dac62014-08-05 11:07:40 +0800186 return gen;
187}
188
189XGL_RESULT intel_gpu_add(int devid, const char *path,
190 struct intel_gpu **gpu_ret)
191{
192 const int gen = devid_to_gen(devid);
193 struct intel_gpu *gpu;
194
195 if (gen < 0) {
196 icd_log(XGL_DBG_MSG_WARNING, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
197 0, 0, "unsupported device id 0x%04x", devid);
198 return XGL_ERROR_INITIALIZATION_FAILED;
199 }
200
201 gpu = gpu_create(gen, devid, path);
202 if (!gpu)
203 return XGL_ERROR_OUT_OF_MEMORY;
204
205 gpu->next = intel_gpus;
206 intel_gpus = gpu;
207
208 *gpu_ret = gpu;
209
210 return XGL_SUCCESS;
211}
212
213void intel_gpu_remove_all(void)
214{
215 struct intel_gpu *gpu = intel_gpus;
216
217 while (gpu) {
218 struct intel_gpu *next = gpu->next;
219
220 gpu_destroy(gpu);
221 gpu = next;
222 }
223
224 intel_gpus = NULL;
225}
226
227struct intel_gpu *intel_gpu_get_list(void)
228{
229 return intel_gpus;
230}
231
232void intel_gpu_get_props(const struct intel_gpu *gpu,
233 XGL_PHYSICAL_GPU_PROPERTIES *props)
234{
235 const char *name;
236 size_t name_len;
237
238 props->structSize = sizeof(*props);
239
240 props->apiVersion = INTEL_API_VERSION;
241 props->driverVersion = INTEL_DRIVER_VERSION;
242
243 props->vendorId = 0x8086;
244 props->deviceId = gpu->devid;
245
246 props->gpuType = XGL_GPU_TYPE_INTEGRATED;
247
248 /* copy GPU name */
249 name = gpu_get_name(gpu);
250 name_len = strlen(name);
251 if (name_len > sizeof(props->gpuName) - 1)
252 name_len = sizeof(props->gpuName) - 1;
253 memcpy(props->gpuName, name, name_len);
254 props->gpuName[name_len] = '\0';
255
Chia-I Wud6109bb2014-08-21 09:12:19 +0800256 props->maxMemRefsPerSubmission = gpu->batch_buffer_reloc_count;
Chia-I Wu214dac62014-08-05 11:07:40 +0800257
258 props->virtualMemPageSize = 4096;
259
260 /* no size limit, but no bounded buffer could exceed 2GB */
261 props->maxInlineMemoryUpdateSize = 2u << 30;
262
263 props->maxBoundDescriptorSets = 1;
264 props->maxThreadGroupSize = 512;
265
266 /* incremented every 80ns */
267 props->timestampFrequency = 1000 * 1000 * 1000 / 80;
268
269 props->multiColorAttachmentClears = false;
270}
271
272void intel_gpu_get_perf(const struct intel_gpu *gpu,
273 XGL_PHYSICAL_GPU_PERFORMANCE *perf)
274{
275 /* TODO */
276 perf->maxGpuClock = 1.0f;
277 perf->aluPerClock = 1.0f;
278 perf->texPerClock = 1.0f;
279 perf->primsPerClock = 1.0f;
280 perf->pixelsPerClock = 1.0f;
281}
282
283void intel_gpu_get_queue_props(const struct intel_gpu *gpu,
284 enum intel_gpu_engine_type engine,
285 XGL_PHYSICAL_GPU_QUEUE_PROPERTIES *props)
286{
287 props->structSize = sizeof(*props);
288
289 switch (engine) {
290 case INTEL_GPU_ENGINE_3D:
291 props->queueFlags = XGL_QUEUE_GRAPHICS_BIT | XGL_QUEUE_COMPUTE_BIT;
292 props->queueCount = 1;
Chia-I Wuec841722014-08-25 22:36:01 +0800293 props->maxAtomicCounters = INTEL_QUEUE_ATOMIC_COUNTER_COUNT;
Chia-I Wu214dac62014-08-05 11:07:40 +0800294 props->supportsTimestamps = true;
295 break;
296 default:
297 assert(!"unknown engine type");
298 return;
299 }
300}
301
302void intel_gpu_get_memory_props(const struct intel_gpu *gpu,
303 XGL_PHYSICAL_GPU_MEMORY_PROPERTIES *props)
304{
305 props->structSize = sizeof(*props);
306
307 props->supportsMigration = false;
308
309 /* no kernel support yet */
310 props->supportsVirtualMemoryRemapping = false;
311
Chia-I Wu54c0c4b2014-08-06 13:48:25 +0800312 /* no winsys support for DRM_I915_GEM_USERPTR yet */
313 props->supportsPinning = false;
Chia-I Wu214dac62014-08-05 11:07:40 +0800314}
315
316XGL_RESULT intel_gpu_open(struct intel_gpu *gpu)
317{
318 gpu->fd = gpu_open_internal(gpu);
319
320 return (gpu->fd >= 0) ? XGL_SUCCESS : XGL_ERROR_UNKNOWN;
321}
322
323void intel_gpu_close(struct intel_gpu *gpu)
324{
325 gpu->fd = -1;
326 gpu_close_internal(gpu);
327}
328
329bool intel_gpu_has_extension(const struct intel_gpu *gpu, const char *ext)
330{
331 return false;
332}
Chia-I Wubec90a02014-08-06 12:33:03 +0800333
334XGL_RESULT XGLAPI intelGetGpuInfo(
335 XGL_PHYSICAL_GPU gpu_,
336 XGL_PHYSICAL_GPU_INFO_TYPE infoType,
337 XGL_SIZE* pDataSize,
338 XGL_VOID* pData)
339{
340 const struct intel_gpu *gpu = intel_gpu(gpu_);
341 XGL_RESULT ret = XGL_SUCCESS;
342
343 switch (infoType) {
344 case XGL_INFO_TYPE_PHYSICAL_GPU_PROPERTIES:
345 if (pData == NULL) {
346 return XGL_ERROR_INVALID_POINTER;
347 }
348 *pDataSize = sizeof(XGL_PHYSICAL_GPU_PROPERTIES);
349 intel_gpu_get_props(gpu, pData);
350 break;
351
352 case XGL_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE:
353 if (pData == NULL) {
354 return XGL_ERROR_INVALID_POINTER;
355 }
356 *pDataSize = sizeof(XGL_PHYSICAL_GPU_PERFORMANCE);
357 intel_gpu_get_perf(gpu, pData);
358 break;
359
360 case XGL_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES:
361 /*
362 * XGL Programmers guide, page 33:
363 * to determine the data size an application calls
364 * xglGetGpuInfo() with a NULL data pointer. The
365 * expected data size for all queue property structures
366 * is returned in pDataSize
367 */
368 *pDataSize = sizeof(XGL_PHYSICAL_GPU_QUEUE_PROPERTIES) *
369 INTEL_GPU_ENGINE_COUNT;
370 if (pData != NULL) {
371 XGL_PHYSICAL_GPU_QUEUE_PROPERTIES *dst = pData;
372 int engine;
373
374 for (engine = 0; engine < INTEL_GPU_ENGINE_COUNT; engine++) {
375 intel_gpu_get_queue_props(gpu, engine, dst);
376 dst++;
377 }
378 }
379 break;
380
381 case XGL_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES:
382 if (pData == NULL) {
383 return XGL_ERROR_INVALID_POINTER;
384 }
385 *pDataSize = sizeof(XGL_PHYSICAL_GPU_MEMORY_PROPERTIES);
386 intel_gpu_get_memory_props(gpu, pData);
387 break;
388
389 default:
390 ret = XGL_ERROR_INVALID_VALUE;
391 }
392
393 return ret;
394}
395
396XGL_RESULT XGLAPI intelGetExtensionSupport(
397 XGL_PHYSICAL_GPU gpu_,
398 const XGL_CHAR* pExtName)
399{
400 struct intel_gpu *gpu = intel_gpu(gpu_);
401
402 return (intel_gpu_has_extension(gpu, (const char *) pExtName)) ?
403 XGL_SUCCESS : XGL_ERROR_INVALID_EXTENSION;
404}
Chia-I Wu251e7d92014-08-19 13:35:42 +0800405
406XGL_RESULT XGLAPI intelGetMultiGpuCompatibility(
407 XGL_PHYSICAL_GPU gpu0,
408 XGL_PHYSICAL_GPU gpu1,
409 XGL_GPU_COMPATIBILITY_INFO* pInfo)
410{
411 return XGL_ERROR_UNAVAILABLE;
412}