blob: e80aa3748a43e0fde637f8cb84e161c3af0c1176 [file] [log] [blame]
Chia-I Wue09b5362014-08-07 09:25:14 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Chia-I Wu3ad3c542014-08-25 11:09:17 +080025#include "genhw/genhw.h"
Chia-I Wue09b5362014-08-07 09:25:14 +080026#include "kmd/winsys.h"
Chia-I Wu34f45182014-08-19 14:02:59 +080027#include "cmd.h"
Chia-I Wue09b5362014-08-07 09:25:14 +080028#include "dev.h"
Chia-I Wuc5438c22014-08-19 14:03:06 +080029#include "fence.h"
Chia-I Wue09b5362014-08-07 09:25:14 +080030#include "queue.h"
31
Chia-I Wu94d2fba2014-08-25 11:38:08 +080032static XGL_RESULT queue_submit_bo(struct intel_queue *queue,
33 struct intel_bo *bo,
34 XGL_GPU_SIZE used)
35{
36 struct intel_winsys *winsys = queue->dev->winsys;
37 int err;
38
39 if (intel_debug & INTEL_DEBUG_BATCH)
40 intel_winsys_decode_bo(winsys, bo, used);
41
42 if (intel_debug & INTEL_DEBUG_NOHW)
43 err = 0;
44 else
45 err = intel_winsys_submit_bo(winsys, queue->ring, bo, used, 0);
46
47 return (err) ? XGL_ERROR_UNKNOWN : XGL_SUCCESS;
48}
49
Chia-I Wuec841722014-08-25 22:36:01 +080050static struct intel_bo *queue_create_bo(struct intel_queue *queue,
51 XGL_GPU_SIZE size,
52 const void *cmd,
53 XGL_SIZE cmd_len)
Chia-I Wu3ad3c542014-08-25 11:09:17 +080054{
Chia-I Wuec841722014-08-25 22:36:01 +080055 struct intel_bo *bo;
56 void *ptr;
57
58 bo = intel_winsys_alloc_buffer(queue->dev->winsys,
59 "queue bo", size, INTEL_DOMAIN_CPU);
60 if (!bo)
61 return NULL;
62
63 if (!cmd_len)
64 return bo;
65
66 ptr = intel_bo_map(bo, true);
67 if (!ptr) {
68 intel_bo_unreference(bo);
69 return NULL;
70 }
71
72 memcpy(ptr, cmd, cmd_len);
73 intel_bo_unmap(bo);
74
75 return bo;
76}
77
78static XGL_RESULT queue_select_pipeline(struct intel_queue *queue,
79 int pipeline_select)
80{
81 uint32_t pipeline_select_cmd[] = {
82 GEN_RENDER_CMD(SINGLE_DW, GEN6, PIPELINE_SELECT),
83 GEN_MI_CMD(MI_BATCH_BUFFER_END),
84 };
85 struct intel_bo *bo;
86 XGL_RESULT ret;
87
88 if (queue->ring != INTEL_RING_RENDER ||
89 queue->last_pipeline_select == pipeline_select)
90 return XGL_SUCCESS;
91
92 switch (pipeline_select) {
93 case GEN6_PIPELINE_SELECT_DW0_SELECT_3D:
94 bo = queue->select_graphics_bo;
95 break;
96 case GEN6_PIPELINE_SELECT_DW0_SELECT_MEDIA:
97 bo = queue->select_compute_bo;
98 break;
99 default:
100 return XGL_ERROR_INVALID_VALUE;
101 break;
102 }
103
104 if (!bo) {
105 pipeline_select_cmd[0] |= pipeline_select;
106 bo = queue_create_bo(queue, sizeof(pipeline_select_cmd),
107 pipeline_select_cmd, sizeof(pipeline_select_cmd));
108 if (!bo)
109 return XGL_ERROR_OUT_OF_GPU_MEMORY;
110
111 switch (pipeline_select) {
112 case GEN6_PIPELINE_SELECT_DW0_SELECT_3D:
113 queue->select_graphics_bo = bo;
114 break;
115 case GEN6_PIPELINE_SELECT_DW0_SELECT_MEDIA:
116 queue->select_compute_bo = bo;
117 break;
118 default:
119 break;
120 }
121 }
122
123 ret = queue_submit_bo(queue, bo, sizeof(pipeline_select_cmd));
124 if (ret == XGL_SUCCESS)
125 queue->last_pipeline_select = pipeline_select;
126
127 return ret;
128}
129
130static XGL_RESULT queue_init_hw_and_atomic_bo(struct intel_queue *queue)
131{
132 const uint32_t ctx_init_cmd[] = {
Chia-I Wu63883292014-08-25 13:50:26 +0800133 /* STATE_SIP */
134 GEN_RENDER_CMD(COMMON, GEN6, STATE_SIP),
135 0,
136 /* PIPELINE_SELECT */
137 GEN_RENDER_CMD(SINGLE_DW, GEN6, PIPELINE_SELECT) |
138 GEN6_PIPELINE_SELECT_DW0_SELECT_3D,
139 /* 3DSTATE_VF_STATISTICS */
140 GEN_RENDER_CMD(SINGLE_DW, GEN6, 3DSTATE_VF_STATISTICS),
141 /* end */
142 GEN_MI_CMD(MI_BATCH_BUFFER_END),
143 GEN_MI_CMD(MI_NOOP),
144 };
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800145 struct intel_bo *bo;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800146 XGL_RESULT ret;
147
Chia-I Wuec841722014-08-25 22:36:01 +0800148 if (queue->ring != INTEL_RING_RENDER) {
149 queue->last_pipeline_select = -1;
150 queue->atomic_bo = queue_create_bo(queue,
151 sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
152 NULL, 0);
153 return (queue->atomic_bo) ? XGL_SUCCESS : XGL_ERROR_OUT_OF_GPU_MEMORY;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800154 }
155
Chia-I Wuec841722014-08-25 22:36:01 +0800156 bo = queue_create_bo(queue,
157 sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
158 ctx_init_cmd, sizeof(ctx_init_cmd));
159 if (!bo)
160 return XGL_ERROR_OUT_OF_GPU_MEMORY;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800161
Chia-I Wuec841722014-08-25 22:36:01 +0800162 ret = queue_submit_bo(queue, bo, sizeof(ctx_init_cmd));
163 if (ret != XGL_SUCCESS) {
164 intel_bo_unreference(bo);
165 return ret;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800166 }
167
Chia-I Wuec841722014-08-25 22:36:01 +0800168 queue->last_pipeline_select = GEN6_PIPELINE_SELECT_DW0_SELECT_3D;
169 /* reuse */
170 queue->atomic_bo = bo;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800171
Chia-I Wuec841722014-08-25 22:36:01 +0800172 return XGL_SUCCESS;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800173}
174
Chia-I Wu9ae59c12014-08-07 10:08:49 +0800175XGL_RESULT intel_queue_create(struct intel_dev *dev,
Chia-I Wucdcff732014-08-19 14:44:15 +0800176 enum intel_gpu_engine_type engine,
Chia-I Wu9ae59c12014-08-07 10:08:49 +0800177 struct intel_queue **queue_ret)
Chia-I Wue09b5362014-08-07 09:25:14 +0800178{
179 struct intel_queue *queue;
Chia-I Wuc5438c22014-08-19 14:03:06 +0800180 enum intel_ring_type ring;
181
Chia-I Wucdcff732014-08-19 14:44:15 +0800182 switch (engine) {
183 case INTEL_GPU_ENGINE_3D:
Chia-I Wuc5438c22014-08-19 14:03:06 +0800184 ring = INTEL_RING_RENDER;
185 break;
Chia-I Wuc5438c22014-08-19 14:03:06 +0800186 default:
187 return XGL_ERROR_INVALID_VALUE;
188 break;
189 }
Chia-I Wue09b5362014-08-07 09:25:14 +0800190
Courtney Goeltzenleuchterfb4fb532014-08-14 09:35:21 -0600191 queue = (struct intel_queue *) intel_base_create(dev, sizeof(*queue),
Chia-I Wubbf2c932014-08-07 12:20:08 +0800192 dev->base.dbg, XGL_DBG_OBJECT_QUEUE, NULL, 0);
Chia-I Wue09b5362014-08-07 09:25:14 +0800193 if (!queue)
Chia-I Wu9ae59c12014-08-07 10:08:49 +0800194 return XGL_ERROR_OUT_OF_MEMORY;
Chia-I Wue09b5362014-08-07 09:25:14 +0800195
Chia-I Wue09b5362014-08-07 09:25:14 +0800196 queue->dev = dev;
Chia-I Wuc5438c22014-08-19 14:03:06 +0800197 queue->ring = ring;
Chia-I Wue09b5362014-08-07 09:25:14 +0800198
Chia-I Wuec841722014-08-25 22:36:01 +0800199 if (queue_init_hw_and_atomic_bo(queue) != XGL_SUCCESS) {
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800200 intel_queue_destroy(queue);
Chia-I Wu63883292014-08-25 13:50:26 +0800201 return XGL_ERROR_INITIALIZATION_FAILED;
Chia-I Wu3ad3c542014-08-25 11:09:17 +0800202 }
203
Chia-I Wu9ae59c12014-08-07 10:08:49 +0800204 *queue_ret = queue;
205
206 return XGL_SUCCESS;
Chia-I Wue09b5362014-08-07 09:25:14 +0800207}
208
209void intel_queue_destroy(struct intel_queue *queue)
210{
Chia-I Wu63883292014-08-25 13:50:26 +0800211 if (queue->atomic_bo)
212 intel_bo_unreference(queue->atomic_bo);
213 if (queue->select_graphics_bo)
214 intel_bo_unreference(queue->select_graphics_bo);
215 if (queue->select_compute_bo)
216 intel_bo_unreference(queue->select_compute_bo);
Chia-I Wubbf2c932014-08-07 12:20:08 +0800217 intel_base_destroy(&queue->base);
Chia-I Wue09b5362014-08-07 09:25:14 +0800218}
219
220XGL_RESULT intel_queue_wait(struct intel_queue *queue, int64_t timeout)
221{
Chia-I Wue24c3292014-08-21 14:05:23 +0800222 struct intel_bo *bo = (queue->last_submitted_cmd) ?
223 intel_cmd_get_batch(queue->last_submitted_cmd, NULL) : NULL;
Chia-I Wue09b5362014-08-07 09:25:14 +0800224
Chia-I Wue24c3292014-08-21 14:05:23 +0800225 return (!bo || intel_bo_wait(bo, timeout) == 0) ?
Chia-I Wue09b5362014-08-07 09:25:14 +0800226 XGL_SUCCESS : XGL_ERROR_UNKNOWN;
227}
228
229XGL_RESULT XGLAPI intelQueueSetGlobalMemReferences(
230 XGL_QUEUE queue,
231 XGL_UINT memRefCount,
232 const XGL_MEMORY_REF* pMemRefs)
233{
234 /*
235 * The winwys maintains the list of memory references. These are ignored
236 * until we move away from the winsys.
237 */
238 return XGL_SUCCESS;
239}
240
241XGL_RESULT XGLAPI intelQueueWaitIdle(
242 XGL_QUEUE queue_)
243{
244 struct intel_queue *queue = intel_queue(queue_);
245
246 return intel_queue_wait(queue, -1);
247}
Chia-I Wu251e7d92014-08-19 13:35:42 +0800248
249XGL_RESULT XGLAPI intelQueueSubmit(
Chia-I Wuc5438c22014-08-19 14:03:06 +0800250 XGL_QUEUE queue_,
Chia-I Wu251e7d92014-08-19 13:35:42 +0800251 XGL_UINT cmdBufferCount,
252 const XGL_CMD_BUFFER* pCmdBuffers,
253 XGL_UINT memRefCount,
254 const XGL_MEMORY_REF* pMemRefs,
Chia-I Wuc5438c22014-08-19 14:03:06 +0800255 XGL_FENCE fence_)
Chia-I Wu251e7d92014-08-19 13:35:42 +0800256{
Chia-I Wuc5438c22014-08-19 14:03:06 +0800257 struct intel_queue *queue = intel_queue(queue_);
258 XGL_RESULT ret = XGL_SUCCESS;
259 XGL_UINT i;
260
261 for (i = 0; i < cmdBufferCount; i++) {
262 struct intel_cmd *cmd = intel_cmd(pCmdBuffers[i]);
Chia-I Wu94d2fba2014-08-25 11:38:08 +0800263 struct intel_bo *bo;
264 XGL_GPU_SIZE used;
265 XGL_RESULT ret;
Chia-I Wuc5438c22014-08-19 14:03:06 +0800266
Chia-I Wuec841722014-08-25 22:36:01 +0800267 ret = queue_select_pipeline(queue, cmd->pipeline_select);
Chia-I Wu63883292014-08-25 13:50:26 +0800268 if (ret != XGL_SUCCESS)
269 break;
270
Chia-I Wu94d2fba2014-08-25 11:38:08 +0800271 bo = intel_cmd_get_batch(cmd, &used);
272 ret = queue_submit_bo(queue, bo, used);
273 queue->last_submitted_cmd = cmd;
274
Chia-I Wuc5438c22014-08-19 14:03:06 +0800275 if (ret != XGL_SUCCESS)
276 break;
277 }
278
279 if (ret == XGL_SUCCESS && fence_ != XGL_NULL_HANDLE) {
280 struct intel_fence *fence = intel_fence(fence_);
281 intel_fence_set_cmd(fence, queue->last_submitted_cmd);
282 }
283
284 /* XGL_MEMORY_REFs are ignored as the winsys already knows them */
285
286 return ret;
Chia-I Wu251e7d92014-08-19 13:35:42 +0800287}
288
289XGL_RESULT XGLAPI intelOpenSharedQueueSemaphore(
290 XGL_DEVICE device,
291 const XGL_QUEUE_SEMAPHORE_OPEN_INFO* pOpenInfo,
292 XGL_QUEUE_SEMAPHORE* pSemaphore)
293{
294 return XGL_ERROR_UNAVAILABLE;
295}
296
297XGL_RESULT XGLAPI intelCreateQueueSemaphore(
298 XGL_DEVICE device,
299 const XGL_QUEUE_SEMAPHORE_CREATE_INFO* pCreateInfo,
300 XGL_QUEUE_SEMAPHORE* pSemaphore)
301{
302 /*
303 * We want to find an unused semaphore register and initialize it. Signal
304 * will increment the register. Wait will atomically decrement it and
305 * block if the value is zero, or a large constant N if we do not want to
306 * go negative.
307 *
308 * XXX However, MI_SEMAPHORE_MBOX does not seem to have the flexibility.
309 */
310 return XGL_ERROR_UNAVAILABLE;
311}
312
313XGL_RESULT XGLAPI intelSignalQueueSemaphore(
314 XGL_QUEUE queue,
315 XGL_QUEUE_SEMAPHORE semaphore)
316{
317 return XGL_ERROR_UNAVAILABLE;
318}
319
320XGL_RESULT XGLAPI intelWaitQueueSemaphore(
321 XGL_QUEUE queue,
322 XGL_QUEUE_SEMAPHORE semaphore)
323{
324 return XGL_ERROR_UNAVAILABLE;
325}