blob: 261b42d6ad003ab862859621556bc57bb92de9d7 [file] [log] [blame]
Chia-I Wue54854a2014-08-05 10:23:50 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wue54854a2014-08-05 10:23:50 +080026 */
27
28#ifndef DEV_H
29#define DEV_H
30
Chia-I Wue09b5362014-08-07 09:25:14 +080031#include "intel.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080032#include "gpu.h"
Chia-I Wua2161db2014-08-15 16:34:34 +080033#include "obj.h"
Chia-I Wue54854a2014-08-05 10:23:50 +080034
Chia-I Wu9fe3ec42014-10-17 09:49:16 +080035struct intel_pipeline_shader;
Chia-I Wue54854a2014-08-05 10:23:50 +080036struct intel_queue;
37struct intel_winsys;
38
Chia-I Wu9fe3ec42014-10-17 09:49:16 +080039enum intel_dev_meta_shader {
40 /*
Chia-I Wu0c87f472014-11-25 14:37:30 +080041 * This expects an ivec2 to be pushed:
42 *
43 * .x is memory offset
44 * .y is fill value
45 *
46 * as well as GEN6_VFCOMP_STORE_VID.
47 */
48 INTEL_DEV_META_VS_FILL_MEM,
49
50 /*
51 * These expect an ivec2 to be pushed:
52 *
53 * .x is dst memory offset
54 * .y is src memory offset
55 *
56 * as well as GEN6_VFCOMP_STORE_VID.
57 */
58 INTEL_DEV_META_VS_COPY_MEM,
59 INTEL_DEV_META_VS_COPY_MEM_UNALIGNED,
60
61 /*
Chia-I Wu9fe3ec42014-10-17 09:49:16 +080062 * These expect an ivec4 to be pushed:
63 *
64 * .xy is added to fragment coord to form (u, v)
65 * .z is ai
66 * .w is lod
67 */
68 INTEL_DEV_META_FS_COPY_MEM, /* ld_lz(u) */
69 INTEL_DEV_META_FS_COPY_1D, /* ld(u, lod) */
70 INTEL_DEV_META_FS_COPY_1D_ARRAY, /* ld(u, lod, ai) */
71 INTEL_DEV_META_FS_COPY_2D, /* ld(u, lod, v) */
72 INTEL_DEV_META_FS_COPY_2D_ARRAY, /* ld(u, lod, v, ai) */
73 INTEL_DEV_META_FS_COPY_2D_MS, /* ld_mcs() + ld2dms() */
74
75 /*
76 * These expect a second ivec4 to be pushed:
77 *
78 * .x is memory offset
79 * .y is extent width
80 *
81 * The second ivec4 is to convert linear fragment coord to (u, v).
82 */
83 INTEL_DEV_META_FS_COPY_1D_TO_MEM, /* ld(u, lod) */
84 INTEL_DEV_META_FS_COPY_1D_ARRAY_TO_MEM, /* ld(u, lod, ai) */
85 INTEL_DEV_META_FS_COPY_2D_TO_MEM, /* ld(u, lod, v) */
86 INTEL_DEV_META_FS_COPY_2D_ARRAY_TO_MEM, /* ld(u, lod, v, ai) */
87 INTEL_DEV_META_FS_COPY_2D_MS_TO_MEM, /* ld_mcs() + ld2dms() */
88
89 /*
90 * This expects an ivec4 to be pushed:
91 *
92 * .xy is added to fargment coord to form (u, v)
93 * .z is extent width
94 *
95 * .z is used to linearize (u, v).
96 */
97 INTEL_DEV_META_FS_COPY_MEM_TO_IMG, /* ld_lz(u) */
98
99 /*
100 * These expect the clear value to be pushed, and set fragment color or
101 * depth to the clear value.
102 */
103 INTEL_DEV_META_FS_CLEAR_COLOR,
104 INTEL_DEV_META_FS_CLEAR_DEPTH,
105
106 /*
107 * These expect an ivec4 to be pushed:
108 *
109 * .xy is added to fragment coord to form (u, v)
110 *
111 * All samples are fetched and averaged. The fragment color is set to the
112 * averaged value.
113 */
114 INTEL_DEV_META_FS_RESOLVE_2X,
115 INTEL_DEV_META_FS_RESOLVE_4X,
116 INTEL_DEV_META_FS_RESOLVE_8X,
117 INTEL_DEV_META_FS_RESOLVE_16X,
118
119 INTEL_DEV_META_SHADER_COUNT,
120};
121
Chia-I Wue54854a2014-08-05 10:23:50 +0800122struct intel_dev_dbg_msg_filter {
123 XGL_INT msg_code;
124 XGL_DBG_MSG_FILTER filter;
125 bool triggered;
126
127 struct intel_dev_dbg_msg_filter *next;
128};
129
130struct intel_dev_dbg {
131 struct intel_base_dbg base;
132
Chia-I Wu069f30f2014-08-21 13:45:20 +0800133 XGL_VALIDATION_LEVEL validation_level;
134 bool disable_pipeline_loads;
135 bool force_object_memory_reqs;
136 bool force_large_image_alignment;
137
Chia-I Wue54854a2014-08-05 10:23:50 +0800138 struct intel_dev_dbg_msg_filter *filters;
139};
140
141struct intel_dev {
142 struct intel_base base;
143
Chia-I Wu1db76e02014-09-15 14:21:14 +0800144 bool exts[INTEL_EXT_COUNT];
145
Chia-I Wue54854a2014-08-05 10:23:50 +0800146 struct intel_gpu *gpu;
147 struct intel_winsys *winsys;
Chia-I Wu9fe3ec42014-10-17 09:49:16 +0800148
Chia-I Wu0b784442014-08-25 22:54:16 +0800149 struct intel_bo *cmd_scratch_bo;
Chia-I Wu9fe3ec42014-10-17 09:49:16 +0800150 struct intel_pipeline_shader *cmd_meta_shaders[INTEL_DEV_META_SHADER_COUNT];
Chia-I Wu0b784442014-08-25 22:54:16 +0800151
Chia-I Wue54854a2014-08-05 10:23:50 +0800152 struct intel_queue *queues[INTEL_GPU_ENGINE_COUNT];
153};
154
Chia-I Wue54854a2014-08-05 10:23:50 +0800155static inline struct intel_dev *intel_dev(XGL_DEVICE dev)
156{
157 return (struct intel_dev *) dev;
158}
159
160static inline struct intel_dev_dbg *intel_dev_dbg(struct intel_dev *dev)
161{
162 return (struct intel_dev_dbg *) dev->base.dbg;
163}
164
Chia-I Wue54854a2014-08-05 10:23:50 +0800165XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
166 const XGL_DEVICE_CREATE_INFO *info,
167 struct intel_dev **dev_ret);
168void intel_dev_destroy(struct intel_dev *dev);
169
170void intel_dev_get_heap_props(const struct intel_dev *dev,
171 XGL_MEMORY_HEAP_PROPERTIES *props);
172
173XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
174 XGL_INT msg_code,
175 XGL_DBG_MSG_FILTER filter);
176
177void intel_dev_remove_msg_filter(struct intel_dev *dev,
178 XGL_INT msg_code);
179
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800180void intel_dev_log(struct intel_dev *dev,
181 XGL_DBG_MSG_TYPE msg_type,
182 XGL_VALIDATION_LEVEL validation_level,
Chia-I Wuaabb3602014-08-19 14:18:23 +0800183 struct intel_base *src_object,
Chia-I Wu82d3d8b2014-08-09 13:07:44 +0800184 XGL_SIZE location,
185 XGL_INT msg_code,
186 const char *format, ...);
187
Chia-I Wu9fe3ec42014-10-17 09:49:16 +0800188static inline const struct intel_pipeline_shader *intel_dev_get_meta_shader(const struct intel_dev *dev,
189 enum intel_dev_meta_shader id)
190{
191 assert(id < INTEL_DEV_META_SHADER_COUNT);
192 return dev->cmd_meta_shaders[id];
193}
194
Chia-I Wua207aba2014-08-05 15:13:37 +0800195XGL_RESULT XGLAPI intelCreateDevice(
196 XGL_PHYSICAL_GPU gpu,
197 const XGL_DEVICE_CREATE_INFO* pCreateInfo,
198 XGL_DEVICE* pDevice);
199
200XGL_RESULT XGLAPI intelDestroyDevice(
201 XGL_DEVICE device);
202
203XGL_RESULT XGLAPI intelGetMemoryHeapCount(
204 XGL_DEVICE device,
205 XGL_UINT* pCount);
206
207XGL_RESULT XGLAPI intelGetMemoryHeapInfo(
208 XGL_DEVICE device,
209 XGL_UINT heapId,
210 XGL_MEMORY_HEAP_INFO_TYPE infoType,
211 XGL_SIZE* pDataSize,
212 XGL_VOID* pData);
213
Chia-I Wu49dbee82014-08-06 12:48:47 +0800214XGL_RESULT XGLAPI intelGetDeviceQueue(
215 XGL_DEVICE device,
216 XGL_QUEUE_TYPE queueType,
217 XGL_UINT queueIndex,
218 XGL_QUEUE* pQueue);
219
Chia-I Wu49dbee82014-08-06 12:48:47 +0800220XGL_RESULT XGLAPI intelDeviceWaitIdle(
221 XGL_DEVICE device);
222
Chia-I Wu7ec9f342014-08-19 10:47:53 +0800223XGL_RESULT XGLAPI intelDbgSetValidationLevel(
224 XGL_DEVICE device,
225 XGL_VALIDATION_LEVEL validationLevel);
226
227XGL_RESULT XGLAPI intelDbgSetMessageFilter(
228 XGL_DEVICE device,
229 XGL_INT msgCode,
230 XGL_DBG_MSG_FILTER filter);
231
232XGL_RESULT XGLAPI intelDbgSetDeviceOption(
233 XGL_DEVICE device,
234 XGL_DBG_DEVICE_OPTION dbgOption,
235 XGL_SIZE dataSize,
236 const XGL_VOID* pData);
237
Chia-I Wue54854a2014-08-05 10:23:50 +0800238#endif /* DEV_H */