blob: 2aea9f31b0e611f2bc3279af826d86197a892e04 [file] [log] [blame]
Chia-I Wu214dac62014-08-05 11:07:40 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <stdio.h>
26#include <sys/types.h>
27#include <sys/stat.h>
28#include <fcntl.h>
29#include <unistd.h>
30
31#include "genhw/genhw.h"
32#include "dispatch_tables.h"
33#include "gpu.h"
34
35static const char *gpu_get_name(const struct intel_gpu *gpu)
36{
37 const char *name = NULL;
38
39 if (gen_is_hsw(gpu->devid)) {
40 if (gen_is_desktop(gpu->devid))
41 name = "Intel(R) Haswell Desktop";
42 else if (gen_is_mobile(gpu->devid))
43 name = "Intel(R) Haswell Mobile";
44 else if (gen_is_server(gpu->devid))
45 name = "Intel(R) Haswell Server";
46 }
47 else if (gen_is_ivb(gpu->devid)) {
48 if (gen_is_desktop(gpu->devid))
49 name = "Intel(R) Ivybridge Desktop";
50 else if (gen_is_mobile(gpu->devid))
51 name = "Intel(R) Ivybridge Mobile";
52 else if (gen_is_server(gpu->devid))
53 name = "Intel(R) Ivybridge Server";
54 }
55 else if (gen_is_snb(gpu->devid)) {
56 if (gen_is_desktop(gpu->devid))
57 name = "Intel(R) Sandybridge Desktop";
58 else if (gen_is_mobile(gpu->devid))
59 name = "Intel(R) Sandybridge Mobile";
60 else if (gen_is_server(gpu->devid))
61 name = "Intel(R) Sandybridge Server";
62 }
63
64 if (!name)
65 name = "Unknown Intel Chipset";
66
67 return name;
68}
69
70static int gpu_open_internal(struct intel_gpu *gpu)
71{
72 if (gpu->fd_internal < 0) {
73 gpu->fd_internal = open(gpu->path, O_RDWR);
74 if (gpu->fd_internal < 0) {
75 icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, NULL, 0,
76 0, "failed to open %s", gpu->path);
77 }
78 }
79
80 return gpu->fd_internal;
81}
82
83static void gpu_close_internal(struct intel_gpu *gpu)
84{
85 if (gpu->fd_internal >= 0) {
86 close(gpu->fd_internal);
87 gpu->fd_internal = -1;
88 }
89}
90
91static struct intel_gpu *gpu_create(int gen, int devid, const char *path)
92{
93 struct intel_gpu *gpu;
94 size_t path_len;
95
96 gpu = icd_alloc(sizeof(*gpu), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
97 if (!gpu)
98 return NULL;
99
100 memset(gpu, 0, sizeof(*gpu));
101
102 /* debug layer is always enabled for intel_gpu */
103 gpu->dispatch = &intel_debug_dispatch_table;
104
105 gpu->devid = devid;
106
107 path_len = strlen(path);
108 gpu->path = icd_alloc(path_len + 1, 0, XGL_SYSTEM_ALLOC_INTERNAL);
109 if (!gpu->path) {
110 icd_free(gpu);
111 return NULL;
112 }
113 memcpy(gpu->path, path, path_len + 1);
114
115 gpu->gen_opaque = gen;
116
117 /* 8192 dwords */
118 gpu->batch_buffer_size = sizeof(uint32_t) * 8192;
119
120 gpu->fd_internal = -1;
121 gpu->fd = -1;
122
123 return gpu;
124}
125
126static void gpu_destroy(struct intel_gpu *gpu)
127{
128 gpu_close_internal(gpu);
129 icd_free(gpu->path);
130 icd_free(gpu);
131}
132
133static struct intel_gpu *intel_gpus;
134
135/**
136 * Return true if \p gpu is a valid intel_gpu.
137 */
138bool intel_gpu_is_valid(const struct intel_gpu *gpu)
139{
140 const struct intel_gpu *iter = intel_gpus;
141
142 while (iter) {
143 if (iter == gpu)
144 return true;
145 iter = iter->next;
146 }
147
148 return false;
149}
150
151static int devid_to_gen(int devid)
152{
153 int gen;
154
155 if (gen_is_hsw(devid))
156 gen = INTEL_GEN(7.5);
157 else if (gen_is_ivb(devid))
158 gen = INTEL_GEN(7);
159 else if (gen_is_snb(devid))
160 gen = INTEL_GEN(6);
161 else
162 gen = -1;
163
164 return gen;
165}
166
167XGL_RESULT intel_gpu_add(int devid, const char *path,
168 struct intel_gpu **gpu_ret)
169{
170 const int gen = devid_to_gen(devid);
171 struct intel_gpu *gpu;
172
173 if (gen < 0) {
174 icd_log(XGL_DBG_MSG_WARNING, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
175 0, 0, "unsupported device id 0x%04x", devid);
176 return XGL_ERROR_INITIALIZATION_FAILED;
177 }
178
179 gpu = gpu_create(gen, devid, path);
180 if (!gpu)
181 return XGL_ERROR_OUT_OF_MEMORY;
182
183 gpu->next = intel_gpus;
184 intel_gpus = gpu;
185
186 *gpu_ret = gpu;
187
188 return XGL_SUCCESS;
189}
190
191void intel_gpu_remove_all(void)
192{
193 struct intel_gpu *gpu = intel_gpus;
194
195 while (gpu) {
196 struct intel_gpu *next = gpu->next;
197
198 gpu_destroy(gpu);
199 gpu = next;
200 }
201
202 intel_gpus = NULL;
203}
204
205struct intel_gpu *intel_gpu_get_list(void)
206{
207 return intel_gpus;
208}
209
210void intel_gpu_get_props(const struct intel_gpu *gpu,
211 XGL_PHYSICAL_GPU_PROPERTIES *props)
212{
213 const char *name;
214 size_t name_len;
215
216 props->structSize = sizeof(*props);
217
218 props->apiVersion = INTEL_API_VERSION;
219 props->driverVersion = INTEL_DRIVER_VERSION;
220
221 props->vendorId = 0x8086;
222 props->deviceId = gpu->devid;
223
224 props->gpuType = XGL_GPU_TYPE_INTEGRATED;
225
226 /* copy GPU name */
227 name = gpu_get_name(gpu);
228 name_len = strlen(name);
229 if (name_len > sizeof(props->gpuName) - 1)
230 name_len = sizeof(props->gpuName) - 1;
231 memcpy(props->gpuName, name, name_len);
232 props->gpuName[name_len] = '\0';
233
234 /* the winsys is prepared for one reloc every two dwords, then minus 2 */
235 props->maxMemRefsPerSubmission =
236 gpu->batch_buffer_size / sizeof(uint32_t) / 2 - 2;
237
238 props->virtualMemPageSize = 4096;
239
240 /* no size limit, but no bounded buffer could exceed 2GB */
241 props->maxInlineMemoryUpdateSize = 2u << 30;
242
243 props->maxBoundDescriptorSets = 1;
244 props->maxThreadGroupSize = 512;
245
246 /* incremented every 80ns */
247 props->timestampFrequency = 1000 * 1000 * 1000 / 80;
248
249 props->multiColorAttachmentClears = false;
250}
251
252void intel_gpu_get_perf(const struct intel_gpu *gpu,
253 XGL_PHYSICAL_GPU_PERFORMANCE *perf)
254{
255 /* TODO */
256 perf->maxGpuClock = 1.0f;
257 perf->aluPerClock = 1.0f;
258 perf->texPerClock = 1.0f;
259 perf->primsPerClock = 1.0f;
260 perf->pixelsPerClock = 1.0f;
261}
262
263void intel_gpu_get_queue_props(const struct intel_gpu *gpu,
264 enum intel_gpu_engine_type engine,
265 XGL_PHYSICAL_GPU_QUEUE_PROPERTIES *props)
266{
267 props->structSize = sizeof(*props);
268
269 switch (engine) {
270 case INTEL_GPU_ENGINE_3D:
271 props->queueFlags = XGL_QUEUE_GRAPHICS_BIT | XGL_QUEUE_COMPUTE_BIT;
272 props->queueCount = 1;
273 props->maxAtomicCounters = 4096;
274 props->supportsTimestamps = true;
275 break;
276 default:
277 assert(!"unknown engine type");
278 return;
279 }
280}
281
282void intel_gpu_get_memory_props(const struct intel_gpu *gpu,
283 XGL_PHYSICAL_GPU_MEMORY_PROPERTIES *props)
284{
285 props->structSize = sizeof(*props);
286
287 props->supportsMigration = false;
288
289 /* no kernel support yet */
290 props->supportsVirtualMemoryRemapping = false;
291
292 props->supportsPinning = true;
293}
294
295XGL_RESULT intel_gpu_open(struct intel_gpu *gpu)
296{
297 gpu->fd = gpu_open_internal(gpu);
298
299 return (gpu->fd >= 0) ? XGL_SUCCESS : XGL_ERROR_UNKNOWN;
300}
301
302void intel_gpu_close(struct intel_gpu *gpu)
303{
304 gpu->fd = -1;
305 gpu_close_internal(gpu);
306}
307
308bool intel_gpu_has_extension(const struct intel_gpu *gpu, const char *ext)
309{
310 return false;
311}