Starting to build framework.
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..5ea79bd
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,25 @@
+# The name of our project is "XGL". CMakeLists files in this project can
+# refer to the root source directory of the project as ${XGL_SOURCE_DIR} and
+# to the root binary directory of the project as ${XGL_BINARY_DIR}.
+cmake_minimum_required (VERSION 2.6)
+project (XGL)
+set (XGL_VERSION_MAJOR 0)
+set (XGL_VERSION_MINOR 1)
+
+# Header file for CMake settings
+configure_file (
+ "${PROJECT_SOURCE_DIR}/include/XGLConfig.h.in"
+ "${PROJECT_BINARY_DIR}/XGLConfig.h"
+ )
+
+include_directories("${PROJECT_BINARY_DIR}")
+
+# xgl global includes
+include_directories("${PROJECT_SOURCE_DIR}/include")
+
+# main: Device independent (DI) XGL library
+# drivers: Device dependent (DD) XGL components
+# tests: XGL tests
+add_subdirectory (main)
+add_subdirectory (drivers)
+add_subdirectory (tests)
diff --git a/docs/Explicit GL Programming Guide and API Reference.pdf b/docs/Explicit GL Programming Guide and API Reference.pdf
new file mode 100644
index 0000000..bc9533d
--- /dev/null
+++ b/docs/Explicit GL Programming Guide and API Reference.pdf
Binary files differ
diff --git a/include/XGLConfig.h.in b/include/XGLConfig.h.in
new file mode 100644
index 0000000..7492690
--- /dev/null
+++ b/include/XGLConfig.h.in
@@ -0,0 +1,3 @@
+// the CMAKE options and settings
+#define XGL_VERSION_MAJOR @XGL_VERSION_MAJOR@
+#define XGL_VERSION_MINOR @XGL_VERSION_MINOR@
diff --git a/include/mantle.h b/include/mantle.h
new file mode 100644
index 0000000..3f20dba
--- /dev/null
+++ b/include/mantle.h
@@ -0,0 +1,2033 @@
+//
+// File: mantle.h
+//
+// Copyright 2014 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
+//
+// AMD is granting you permission to use this software for reference
+// purposes only and not for use in any software product.
+//
+// You agree that you will not reverse engineer or decompile the Materials,
+// in whole or in part, except as allowed by applicable law.
+//
+// WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY,
+// INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE
+// WILL RUN UNINTERRUPTED OR ERROR-FREE OR WARRANTIES ARISING FROM CUSTOM OF
+// TRADE OR COURSE OF USAGE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF THE
+// SOFTWARE IS ASSUMED BY YOU.
+// Some jurisdictions do not allow the exclusion of implied warranties, so
+// the above exclusion may not apply to You.
+//
+// LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL
+// NOT, UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT,
+// INCIDENTAL, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF
+// THE SOFTWARE OR THIS AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+// In no event shall AMD's total liability to You for all damages, losses,
+// and causes of action (whether in contract, tort (including negligence) or
+// otherwise) exceed the amount of $100 USD. You agree to defend, indemnify
+// and hold harmless AMD and its licensors, and any of their directors,
+// officers, employees, affiliates or agents from and against any and all
+// loss, damage, liability and other expenses (including reasonable attorneys'
+// fees), resulting from Your use of the Software or violation of the terms and
+// conditions of this Agreement.
+//
+// U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
+// RIGHTS." Use, duplication, or disclosure by the Government is subject to the
+// restrictions as set forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or
+// its successor. Use of the Materials by the Government constitutes
+// acknowledgement of AMD's proprietary rights in them.
+//
+// EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as
+// stated in the Software License Agreement.
+//
+
+#ifndef __MANTLE_H__
+#define __MANTLE_H__
+
+#define GR_MAKE_VERSION(major, minor, patch) \
+ ((major << 22) | (minor << 12) | patch)
+
+#include "mantlePlatform.h"
+
+// Mantle API version supported by this file
+#define GR_API_VERSION GR_MAKE_VERSION(0, 21, 0)
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+* Core Mantle API
+***************************************************************************************************
+*/
+
+#ifdef __cplusplus
+ #define GR_DEFINE_HANDLE(_obj) struct _obj##_T {}; typedef _obj##_T* _obj;
+ #define GR_DEFINE_SUBCLASS_HANDLE(_obj, _base) struct _obj##_T : public _base##_T {}; typedef _obj##_T* _obj;
+#else // __cplusplus
+ #define GR_DEFINE_HANDLE(_obj) typedef void* _obj;
+ #define GR_DEFINE_SUBCLASS_HANDLE(_obj, _base) typedef void* _obj;
+#endif // __cplusplus
+
+GR_DEFINE_HANDLE(GR_PHYSICAL_GPU)
+GR_DEFINE_HANDLE(GR_BASE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_DEVICE, GR_BASE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_QUEUE, GR_BASE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_GPU_MEMORY, GR_BASE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_OBJECT, GR_BASE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_IMAGE, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_IMAGE_VIEW, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_COLOR_TARGET_VIEW, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_DEPTH_STENCIL_VIEW, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_SHADER, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_PIPELINE, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_SAMPLER, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_DESCRIPTOR_SET, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_STATE_OBJECT, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_VIEWPORT_STATE_OBJECT, GR_STATE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_RASTER_STATE_OBJECT, GR_STATE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_MSAA_STATE_OBJECT, GR_STATE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_COLOR_BLEND_STATE_OBJECT, GR_STATE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_DEPTH_STENCIL_STATE_OBJECT, GR_STATE_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_CMD_BUFFER, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_FENCE, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_QUEUE_SEMAPHORE, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_EVENT, GR_OBJECT)
+GR_DEFINE_SUBCLASS_HANDLE(GR_QUERY_POOL, GR_OBJECT)
+
+#define GR_MAX_PHYSICAL_GPUS 16
+#define GR_MAX_PHYSICAL_GPU_NAME 256
+#define GR_MAX_MEMORY_HEAPS 8
+#define GR_MAX_DESCRIPTOR_SETS 2
+#define GR_MAX_VIEWPORTS 16
+#define GR_MAX_COLOR_TARGETS 8
+
+#define GR_LOD_CLAMP_NONE MAX_FLOAT
+#define GR_LAST_MIP_OR_SLICE 0xffffffff
+
+#define GR_TRUE 1
+#define GR_FALSE 0
+
+#define GR_NULL_HANDLE 0
+
+// ------------------------------------------------------------------------------------------------
+// GR_ENUM values
+
+typedef enum _GR_QUEUE_TYPE
+{
+ GR_QUEUE_UNIVERSAL = 0x1000,
+ GR_QUEUE_COMPUTE_ONLY = 0x1001,
+
+ GR_QUEUE_TYPE_BEGIN_RANGE = GR_QUEUE_UNIVERSAL,
+ GR_QUEUE_TYPE_END_RANGE = GR_QUEUE_COMPUTE_ONLY,
+ GR_NUM_QUEUE_TYPE = (GR_QUEUE_TYPE_END_RANGE - GR_QUEUE_TYPE_BEGIN_RANGE + 1),
+} GR_QUEUE_TYPE;
+
+typedef enum _GR_MEMORY_PRIORITY
+{
+ GR_MEMORY_PRIORITY_NORMAL = 0x1100,
+ GR_MEMORY_PRIORITY_HIGH = 0x1101,
+ GR_MEMORY_PRIORITY_LOW = 0x1102,
+ GR_MEMORY_PRIORITY_UNUSED = 0x1103,
+ GR_MEMORY_PRIORITY_VERY_HIGH = 0x1104,
+ GR_MEMORY_PRIORITY_VERY_LOW = 0x1105,
+
+ GR_MEMORY_PRIORITY_BEGIN_RANGE = GR_MEMORY_PRIORITY_NORMAL,
+ GR_MEMORY_PRIORITY_END_RANGE = GR_MEMORY_PRIORITY_VERY_LOW,
+ GR_NUM_MEMORY_PRIORITY = (GR_MEMORY_PRIORITY_END_RANGE - GR_MEMORY_PRIORITY_BEGIN_RANGE + 1),
+} GR_MEMORY_PRIORITY;
+
+typedef enum _GR_MEMORY_STATE
+{
+ GR_MEMORY_STATE_DATA_TRANSFER = 0x1200,
+ GR_MEMORY_STATE_GRAPHICS_SHADER_READ_ONLY = 0x1201,
+ GR_MEMORY_STATE_GRAPHICS_SHADER_WRITE_ONLY = 0x1202,
+ GR_MEMORY_STATE_GRAPHICS_SHADER_READ_WRITE = 0x1203,
+ GR_MEMORY_STATE_COMPUTE_SHADER_READ_ONLY = 0x1204,
+ GR_MEMORY_STATE_COMPUTE_SHADER_WRITE_ONLY = 0x1205,
+ GR_MEMORY_STATE_COMPUTE_SHADER_READ_WRITE = 0x1206,
+ GR_MEMORY_STATE_MULTI_SHADER_READ_ONLY = 0x1207,
+ GR_MEMORY_STATE_INDEX_DATA = 0x1208,
+ GR_MEMORY_STATE_INDIRECT_ARG = 0x1209,
+ GR_MEMORY_STATE_WRITE_TIMESTAMP = 0x120a,
+ GR_MEMORY_STATE_QUEUE_ATOMIC = 0x120b,
+
+ GR_MEMORY_STATE_BEGIN_RANGE = GR_MEMORY_STATE_DATA_TRANSFER,
+ GR_MEMORY_STATE_END_RANGE = GR_MEMORY_STATE_QUEUE_ATOMIC,
+ GR_NUM_MEMORY_STATE = (GR_MEMORY_STATE_END_RANGE - GR_MEMORY_STATE_BEGIN_RANGE + 1),
+} GR_MEMORY_STATE;
+
+typedef enum _GR_IMAGE_STATE
+{
+ GR_IMAGE_STATE_DATA_TRANSFER = 0x1300,
+ GR_IMAGE_STATE_GRAPHICS_SHADER_READ_ONLY = 0x1301,
+ GR_IMAGE_STATE_GRAPHICS_SHADER_WRITE_ONLY = 0x1302,
+ GR_IMAGE_STATE_GRAPHICS_SHADER_READ_WRITE = 0x1303,
+ GR_IMAGE_STATE_COMPUTE_SHADER_READ_ONLY = 0x1304,
+ GR_IMAGE_STATE_COMPUTE_SHADER_WRITE_ONLY = 0x1305,
+ GR_IMAGE_STATE_COMPUTE_SHADER_READ_WRITE = 0x1306,
+ GR_IMAGE_STATE_MULTI_SHADER_READ_ONLY = 0x1307,
+ GR_IMAGE_STATE_TARGET_AND_SHADER_READ_ONLY = 0x1308,
+ GR_IMAGE_STATE_UNINITIALIZED_TARGET = 0x1309,
+ GR_IMAGE_STATE_TARGET_RENDER_ACCESS_OPTIMAL = 0x130a,
+ GR_IMAGE_STATE_TARGET_SHADER_ACCESS_OPTIMAL = 0x130b,
+ GR_IMAGE_STATE_CLEAR = 0x130c,
+ GR_IMAGE_STATE_RESOLVE_SOURCE = 0x130d,
+ GR_IMAGE_STATE_RESOLVE_DESTINATION = 0x130e,
+
+ GR_IMAGE_STATE_BEGIN_RANGE = GR_IMAGE_STATE_DATA_TRANSFER,
+ GR_IMAGE_STATE_END_RANGE = GR_IMAGE_STATE_RESOLVE_DESTINATION,
+ GR_NUM_IMAGE_STATE = (GR_IMAGE_STATE_END_RANGE - GR_IMAGE_STATE_BEGIN_RANGE + 1),
+} GR_IMAGE_STATE;
+
+typedef enum _GR_IMAGE_TYPE
+{
+ GR_IMAGE_1D = 0x1400,
+ GR_IMAGE_2D = 0x1401,
+ GR_IMAGE_3D = 0x1402,
+
+ GR_IMAGE_TYPE_BEGIN_RANGE = GR_IMAGE_1D,
+ GR_IMAGE_TYPE_END_RANGE = GR_IMAGE_3D,
+ GR_NUM_IMAGE_TYPE = (GR_IMAGE_TYPE_END_RANGE - GR_IMAGE_TYPE_BEGIN_RANGE + 1),
+} GR_IMAGE_TYPE;
+
+typedef enum _GR_IMAGE_TILING
+{
+ GR_LINEAR_TILING = 0x1500,
+ GR_OPTIMAL_TILING = 0x1501,
+
+ GR_IMAGE_TILING_BEGIN_RANGE = GR_LINEAR_TILING,
+ GR_IMAGE_TILING_END_RANGE = GR_OPTIMAL_TILING,
+ GR_NUM_IMAGE_TILING = (GR_IMAGE_TILING_END_RANGE - GR_IMAGE_TILING_BEGIN_RANGE + 1),
+} GR_IMAGE_TILING;
+
+typedef enum _GR_IMAGE_VIEW_TYPE
+{
+ GR_IMAGE_VIEW_1D = 0x1600,
+ GR_IMAGE_VIEW_2D = 0x1601,
+ GR_IMAGE_VIEW_3D = 0x1602,
+ GR_IMAGE_VIEW_CUBE = 0x1603,
+
+ GR_IMAGE_VIEW_TYPE_BEGIN_RANGE = GR_IMAGE_VIEW_1D,
+ GR_IMAGE_VIEW_TYPE_END_RANGE = GR_IMAGE_VIEW_CUBE,
+ GR_NUM_IMAGE_VIEW_TYPE = (GR_IMAGE_VIEW_TYPE_END_RANGE - GR_IMAGE_VIEW_TYPE_BEGIN_RANGE + 1),
+} GR_IMAGE_VIEW_TYPE;
+
+typedef enum _GR_IMAGE_ASPECT
+{
+ GR_IMAGE_ASPECT_COLOR = 0x1700,
+ GR_IMAGE_ASPECT_DEPTH = 0x1701,
+ GR_IMAGE_ASPECT_STENCIL = 0x1702,
+
+ GR_IMAGE_ASPECT_BEGIN_RANGE = GR_IMAGE_ASPECT_COLOR,
+ GR_IMAGE_ASPECT_END_RANGE = GR_IMAGE_ASPECT_STENCIL,
+ GR_NUM_IMAGE_ASPECT = (GR_IMAGE_ASPECT_END_RANGE - GR_IMAGE_ASPECT_BEGIN_RANGE + 1),
+} GR_IMAGE_ASPECT;
+
+typedef enum _GR_CHANNEL_SWIZZLE
+{
+ GR_CHANNEL_SWIZZLE_ZERO = 0x1800,
+ GR_CHANNEL_SWIZZLE_ONE = 0x1801,
+ GR_CHANNEL_SWIZZLE_R = 0x1802,
+ GR_CHANNEL_SWIZZLE_G = 0x1803,
+ GR_CHANNEL_SWIZZLE_B = 0x1804,
+ GR_CHANNEL_SWIZZLE_A = 0x1805,
+
+ GR_CHANNEL_SWIZZLE_BEGIN_RANGE = GR_CHANNEL_SWIZZLE_ZERO,
+ GR_CHANNEL_SWIZZLE_END_RANGE = GR_CHANNEL_SWIZZLE_A,
+ GR_NUM_CHANNEL_SWIZZLE = (GR_CHANNEL_SWIZZLE_END_RANGE - GR_CHANNEL_SWIZZLE_BEGIN_RANGE + 1),
+} GR_CHANNEL_SWIZZLE;
+
+typedef enum _GR_DESCRIPTOR_SET_SLOT_TYPE
+{
+ GR_SLOT_UNUSED = 0x1900,
+ GR_SLOT_SHADER_RESOURCE = 0x1901,
+ GR_SLOT_SHADER_UAV = 0x1902,
+ GR_SLOT_SHADER_SAMPLER = 0x1903,
+ GR_SLOT_NEXT_DESCRIPTOR_SET = 0x1904,
+
+ GR_DESCRIPTOR_SET_SLOT_TYPE_BEGIN_RANGE = GR_SLOT_UNUSED,
+ GR_DESCRIPTOR_SET_SLOT_TYPE_END_RANGE = GR_SLOT_NEXT_DESCRIPTOR_SET,
+ GR_NUM_DESCRIPTOR_SET_SLOT_TYPE = (GR_DESCRIPTOR_SET_SLOT_TYPE_END_RANGE - GR_DESCRIPTOR_SET_SLOT_TYPE_BEGIN_RANGE + 1),
+} GR_DESCRIPTOR_SET_SLOT_TYPE;
+
+typedef enum _GR_QUERY_TYPE
+{
+ GR_QUERY_OCCLUSION = 0x1a00,
+ GR_QUERY_PIPELINE_STATISTICS = 0x1a01,
+
+ GR_QUERY_TYPE_BEGIN_RANGE = GR_QUERY_OCCLUSION,
+ GR_QUERY_TYPE_END_RANGE = GR_QUERY_PIPELINE_STATISTICS,
+ GR_NUM_QUERY_TYPE = (GR_QUERY_TYPE_END_RANGE - GR_QUERY_TYPE_BEGIN_RANGE + 1),
+} GR_QUERY_TYPE;
+
+typedef enum _GR_TIMESTAMP_TYPE
+{
+ GR_TIMESTAMP_TOP = 0x1b00,
+ GR_TIMESTAMP_BOTTOM = 0x1b01,
+
+ GR_TIMESTAMP_TYPE_BEGIN_RANGE = GR_TIMESTAMP_TOP,
+ GR_TIMESTAMP_TYPE_END_RANGE = GR_TIMESTAMP_BOTTOM,
+ GR_NUM_TIMESTAMP_TYPE = (GR_TIMESTAMP_TYPE_END_RANGE - GR_TIMESTAMP_TYPE_BEGIN_RANGE + 1),
+} GR_TIMESTAMP_TYPE;
+
+typedef enum _GR_BORDER_COLOR_TYPE
+{
+ GR_BORDER_COLOR_WHITE = 0x1c00,
+ GR_BORDER_COLOR_TRANSPARENT_BLACK = 0x1c01,
+ GR_BORDER_COLOR_OPAQUE_BLACK = 0x1c02,
+
+ GR_BORDER_COLOR_TYPE_BEGIN_RANGE = GR_BORDER_COLOR_WHITE,
+ GR_BORDER_COLOR_TYPE_END_RANGE = GR_BORDER_COLOR_OPAQUE_BLACK,
+ GR_NUM_BORDER_COLOR_TYPE = (GR_BORDER_COLOR_TYPE_END_RANGE - GR_BORDER_COLOR_TYPE_BEGIN_RANGE + 1),
+} GR_BORDER_COLOR_TYPE;
+
+typedef enum _GR_PIPELINE_BIND_POINT
+{
+ GR_PIPELINE_BIND_POINT_COMPUTE = 0x1e00,
+ GR_PIPELINE_BIND_POINT_GRAPHICS = 0x1e01,
+
+ GR_PIPELINE_BIND_POINT_BEGIN_RANGE = GR_PIPELINE_BIND_POINT_COMPUTE,
+ GR_PIPELINE_BIND_POINT_END_RANGE = GR_PIPELINE_BIND_POINT_GRAPHICS,
+ GR_NUM_PIPELINE_BIND_POINT = (GR_PIPELINE_BIND_POINT_END_RANGE - GR_PIPELINE_BIND_POINT_BEGIN_RANGE + 1),
+} GR_PIPELINE_BIND_POINT;
+
+typedef enum _GR_STATE_BIND_POINT
+{
+ GR_STATE_BIND_VIEWPORT = 0x1f00,
+ GR_STATE_BIND_RASTER = 0x1f01,
+ GR_STATE_BIND_DEPTH_STENCIL = 0x1f02,
+ GR_STATE_BIND_COLOR_BLEND = 0x1f03,
+ GR_STATE_BIND_MSAA = 0x1f04,
+
+ GR_STATE_BIND_POINT_BEGIN_RANGE = GR_STATE_BIND_VIEWPORT,
+ GR_STATE_BIND_POINT_END_RANGE = GR_STATE_BIND_MSAA,
+ GR_NUM_STATE_BIND_POINT = (GR_STATE_BIND_POINT_END_RANGE - GR_STATE_BIND_POINT_BEGIN_RANGE + 1),
+} GR_STATE_BIND_POINT;
+
+typedef enum _GR_PRIMITIVE_TOPOLOGY
+{
+ GR_TOPOLOGY_POINT_LIST = 0x2000,
+ GR_TOPOLOGY_LINE_LIST = 0x2001,
+ GR_TOPOLOGY_LINE_STRIP = 0x2002,
+ GR_TOPOLOGY_TRIANGLE_LIST = 0x2003,
+ GR_TOPOLOGY_TRIANGLE_STRIP = 0x2004,
+ GR_TOPOLOGY_RECT_LIST = 0x2005,
+ GR_TOPOLOGY_QUAD_LIST = 0x2006,
+ GR_TOPOLOGY_QUAD_STRIP = 0x2007,
+ GR_TOPOLOGY_LINE_LIST_ADJ = 0x2008,
+ GR_TOPOLOGY_LINE_STRIP_ADJ = 0x2009,
+ GR_TOPOLOGY_TRIANGLE_LIST_ADJ = 0x200a,
+ GR_TOPOLOGY_TRIANGLE_STRIP_ADJ = 0x200b,
+ GR_TOPOLOGY_PATCH = 0x200c,
+
+ GR_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = GR_TOPOLOGY_POINT_LIST,
+ GR_PRIMITIVE_TOPOLOGY_END_RANGE = GR_TOPOLOGY_PATCH,
+ GR_NUM_PRIMITIVE_TOPOLOGY = (GR_PRIMITIVE_TOPOLOGY_END_RANGE - GR_PRIMITIVE_TOPOLOGY_BEGIN_RANGE + 1),
+} GR_PRIMITIVE_TOPOLOGY;
+
+typedef enum _GR_INDEX_TYPE
+{
+ GR_INDEX_16 = 0x2100,
+ GR_INDEX_32 = 0x2101,
+
+ GR_INDEX_TYPE_BEGIN_RANGE = GR_INDEX_16,
+ GR_INDEX_TYPE_END_RANGE = GR_INDEX_32,
+ GR_NUM_INDEX_TYPE = (GR_INDEX_TYPE_END_RANGE - GR_INDEX_TYPE_BEGIN_RANGE + 1),
+} GR_INDEX_TYPE;
+
+typedef enum _GR_TEX_FILTER
+{
+ GR_TEX_FILTER_BEGIN_RANGE = 0x2300,
+ GR_TEX_FILTER_END_RANGE = 0x23ff,
+
+ GR_TEX_FILTER_MAG_POINT_MIN_POINT_MIP_POINT = GR_TEX_FILTER_BEGIN_RANGE + 0x40,
+ GR_TEX_FILTER_MAG_LINEAR_MIN_POINT_MIP_POINT = GR_TEX_FILTER_BEGIN_RANGE + 0x41,
+ GR_TEX_FILTER_MAG_POINT_MIN_LINEAR_MIP_POINT = GR_TEX_FILTER_BEGIN_RANGE + 0x44,
+ GR_TEX_FILTER_MAG_LINEAR_MIN_LINEAR_MIP_POINT = GR_TEX_FILTER_BEGIN_RANGE + 0x45,
+ GR_TEX_FILTER_MAG_POINT_MIN_POINT_MIP_LINEAR = GR_TEX_FILTER_BEGIN_RANGE + 0x80,
+ GR_TEX_FILTER_MAG_LINEAR_MIN_POINT_MIP_LINEAR = GR_TEX_FILTER_BEGIN_RANGE + 0x81,
+ GR_TEX_FILTER_MAG_POINT_MIN_LINEAR_MIP_LINEAR = GR_TEX_FILTER_BEGIN_RANGE + 0x84,
+ GR_TEX_FILTER_MAG_LINEAR_MIN_LINEAR_MIP_LINEAR = GR_TEX_FILTER_BEGIN_RANGE + 0x85,
+ GR_TEX_FILTER_ANISOTROPIC = GR_TEX_FILTER_BEGIN_RANGE + 0x8f,
+ GR_NUM_TEX_FILTER = (GR_TEX_FILTER_END_RANGE - GR_TEX_FILTER_BEGIN_RANGE + 1),
+} GR_TEX_FILTER;
+
+typedef enum _GR_TEX_ADDRESS
+{
+ GR_TEX_ADDRESS_WRAP = 0x2400,
+ GR_TEX_ADDRESS_MIRROR = 0x2401,
+ GR_TEX_ADDRESS_CLAMP = 0x2402,
+ GR_TEX_ADDRESS_MIRROR_ONCE = 0x2403,
+ GR_TEX_ADDRESS_CLAMP_BORDER = 0x2404,
+
+ GR_TEX_ADDRESS_BEGIN_RANGE = GR_TEX_ADDRESS_WRAP,
+ GR_TEX_ADDRESS_END_RANGE = GR_TEX_ADDRESS_CLAMP_BORDER,
+ GR_NUM_TEX_ADDRESS = (GR_TEX_ADDRESS_END_RANGE - GR_TEX_ADDRESS_BEGIN_RANGE + 1),
+} GR_TEX_ADDRESS;
+
+typedef enum _GR_COMPARE_FUNC
+{
+ GR_COMPARE_NEVER = 0x2500,
+ GR_COMPARE_LESS = 0x2501,
+ GR_COMPARE_EQUAL = 0x2502,
+ GR_COMPARE_LESS_EQUAL = 0x2503,
+ GR_COMPARE_GREATER = 0x2504,
+ GR_COMPARE_NOT_EQUAL = 0x2505,
+ GR_COMPARE_GREATER_EQUAL = 0x2506,
+ GR_COMPARE_ALWAYS = 0x2507,
+
+ GR_COMPARE_FUNC_BEGIN_RANGE = GR_COMPARE_NEVER,
+ GR_COMPARE_FUNC_END_RANGE = GR_COMPARE_ALWAYS,
+ GR_NUM_COMPARE_FUNC = (GR_COMPARE_FUNC_END_RANGE - GR_COMPARE_FUNC_BEGIN_RANGE + 1),
+} GR_COMPARE_FUNC;
+
+typedef enum _GR_FILL_MODE
+{
+ GR_FILL_SOLID = 0x2600,
+ GR_FILL_WIREFRAME = 0x2601,
+
+ GR_FILL_MODE_BEGIN_RANGE = GR_FILL_SOLID,
+ GR_FILL_MODE_END_RANGE = GR_FILL_WIREFRAME,
+ GR_NUM_FILL_MODE = (GR_FILL_MODE_END_RANGE - GR_FILL_MODE_BEGIN_RANGE + 1),
+} GR_FILL_MODE;
+
+typedef enum _GR_CULL_MODE
+{
+ GR_CULL_NONE = 0x2700,
+ GR_CULL_FRONT = 0x2701,
+ GR_CULL_BACK = 0x2702,
+
+ GR_CULL_MODE_BEGIN_RANGE = GR_CULL_NONE,
+ GR_CULL_MODE_END_RANGE = GR_CULL_BACK,
+ GR_NUM_CULL_MODE = (GR_CULL_MODE_END_RANGE - GR_CULL_MODE_BEGIN_RANGE + 1),
+} GR_CULL_MODE;
+
+typedef enum _GR_FACE_ORIENTATION
+{
+ GR_FRONT_FACE_CCW = 0x2800,
+ GR_FRONT_FACE_CW = 0x2801,
+
+ GR_FACE_ORIENTATION_BEGIN_RANGE = GR_FRONT_FACE_CCW,
+ GR_FACE_ORIENTATION_END_RANGE = GR_FRONT_FACE_CW,
+ GR_NUM_FACE_ORIENTATION = (GR_FACE_ORIENTATION_END_RANGE - GR_FACE_ORIENTATION_BEGIN_RANGE + 1),
+} GR_FACE_ORIENTATION;
+
+typedef enum _GR_BLEND
+{
+ GR_BLEND_ZERO = 0x2900,
+ GR_BLEND_ONE = 0x2901,
+ GR_BLEND_SRC_COLOR = 0x2902,
+ GR_BLEND_ONE_MINUS_SRC_COLOR = 0x2903,
+ GR_BLEND_DEST_COLOR = 0x2904,
+ GR_BLEND_ONE_MINUS_DEST_COLOR = 0x2905,
+ GR_BLEND_SRC_ALPHA = 0x2906,
+ GR_BLEND_ONE_MINUS_SRC_ALPHA = 0x2907,
+ GR_BLEND_DEST_ALPHA = 0x2908,
+ GR_BLEND_ONE_MINUS_DEST_ALPHA = 0x2909,
+ GR_BLEND_CONSTANT_COLOR = 0x290a,
+ GR_BLEND_ONE_MINUS_CONSTANT_COLOR = 0x290b,
+ GR_BLEND_CONSTANT_ALPHA = 0x290c,
+ GR_BLEND_ONE_MINUS_CONSTANT_ALPHA = 0x290d,
+ GR_BLEND_SRC_ALPHA_SATURATE = 0x290e,
+ GR_BLEND_SRC1_COLOR = 0x290f,
+ GR_BLEND_ONE_MINUS_SRC1_COLOR = 0x2910,
+ GR_BLEND_SRC1_ALPHA = 0x2911,
+ GR_BLEND_ONE_MINUS_SRC1_ALPHA = 0x2912,
+
+ GR_BLEND_BEGIN_RANGE = GR_BLEND_ZERO,
+ GR_BLEND_END_RANGE = GR_BLEND_ONE_MINUS_SRC1_ALPHA,
+ GR_NUM_BLEND = (GR_BLEND_END_RANGE - GR_BLEND_BEGIN_RANGE + 1),
+} GR_BLEND;
+
+typedef enum _GR_BLEND_FUNC
+{
+ GR_BLEND_FUNC_ADD = 0x2a00,
+ GR_BLEND_FUNC_SUBTRACT = 0x2a01,
+ GR_BLEND_FUNC_REVERSE_SUBTRACT = 0x2a02,
+ GR_BLEND_FUNC_MIN = 0x2a03,
+ GR_BLEND_FUNC_MAX = 0x2a04,
+
+ GR_BLEND_FUNC_BEGIN_RANGE = GR_BLEND_FUNC_ADD,
+ GR_BLEND_FUNC_END_RANGE = GR_BLEND_FUNC_MAX,
+ GR_NUM_BLEND_FUNC = (GR_BLEND_FUNC_END_RANGE - GR_BLEND_FUNC_BEGIN_RANGE + 1),
+} GR_BLEND_FUNC;
+
+typedef enum _GR_STENCIL_OP
+{
+ GR_STENCIL_OP_KEEP = 0x2b00,
+ GR_STENCIL_OP_ZERO = 0x2b01,
+ GR_STENCIL_OP_REPLACE = 0x2b02,
+ GR_STENCIL_OP_INC_CLAMP = 0x2b03,
+ GR_STENCIL_OP_DEC_CLAMP = 0x2b04,
+ GR_STENCIL_OP_INVERT = 0x2b05,
+ GR_STENCIL_OP_INC_WRAP = 0x2b06,
+ GR_STENCIL_OP_DEC_WRAP = 0x2b07,
+
+ GR_STENCIL_OP_BEGIN_RANGE = GR_STENCIL_OP_KEEP,
+ GR_STENCIL_OP_END_RANGE = GR_STENCIL_OP_DEC_WRAP,
+ GR_NUM_STENCIL_OP = (GR_STENCIL_OP_END_RANGE - GR_STENCIL_OP_BEGIN_RANGE + 1),
+} GR_STENCIL_OP;
+
+typedef enum _GR_LOGIC_OP
+{
+ GR_LOGIC_OP_COPY = 0x2c00,
+ GR_LOGIC_OP_CLEAR = 0x2c01,
+ GR_LOGIC_OP_AND = 0x2c02,
+ GR_LOGIC_OP_AND_REVERSE = 0x2c03,
+ GR_LOGIC_OP_AND_INVERTED = 0x2c04,
+ GR_LOGIC_OP_NOOP = 0x2c05,
+ GR_LOGIC_OP_XOR = 0x2c06,
+ GR_LOGIC_OP_OR = 0x2c07,
+ GR_LOGIC_OP_NOR = 0x2c08,
+ GR_LOGIC_OP_EQUIV = 0x2c09,
+ GR_LOGIC_OP_INVERT = 0x2c0a,
+ GR_LOGIC_OP_OR_REVERSE = 0x2c0b,
+ GR_LOGIC_OP_COPY_INVERTED = 0x2c0c,
+ GR_LOGIC_OP_OR_INVERTED = 0x2c0d,
+ GR_LOGIC_OP_NAND = 0x2c0e,
+ GR_LOGIC_OP_SET = 0x2c0f,
+
+ GR_LOGIC_OP_BEGIN_RANGE = GR_LOGIC_OP_COPY,
+ GR_LOGIC_OP_END_RANGE = GR_LOGIC_OP_SET,
+ GR_NUM_LOGIC_OP = (GR_LOGIC_OP_END_RANGE - GR_LOGIC_OP_BEGIN_RANGE + 1),
+} GR_LOGIC_OP;
+
+typedef enum _GR_ATOMIC_OP
+{
+ GR_ATOMIC_ADD_INT32 = 0x2d00,
+ GR_ATOMIC_SUB_INT32 = 0x2d01,
+ GR_ATOMIC_MIN_UINT32 = 0x2d02,
+ GR_ATOMIC_MAX_UINT32 = 0x2d03,
+ GR_ATOMIC_MIN_SINT32 = 0x2d04,
+ GR_ATOMIC_MAX_SINT32 = 0x2d05,
+ GR_ATOMIC_AND_INT32 = 0x2d06,
+ GR_ATOMIC_OR_INT32 = 0x2d07,
+ GR_ATOMIC_XOR_INT32 = 0x2d08,
+ GR_ATOMIC_INC_UINT32 = 0x2d09,
+ GR_ATOMIC_DEC_UINT32 = 0x2d0a,
+ GR_ATOMIC_ADD_INT64 = 0x2d0b,
+ GR_ATOMIC_SUB_INT64 = 0x2d0c,
+ GR_ATOMIC_MIN_UINT64 = 0x2d0d,
+ GR_ATOMIC_MAX_UINT64 = 0x2d0e,
+ GR_ATOMIC_MIN_SINT64 = 0x2d0f,
+ GR_ATOMIC_MAX_SINT64 = 0x2d10,
+ GR_ATOMIC_AND_INT64 = 0x2d11,
+ GR_ATOMIC_OR_INT64 = 0x2d12,
+ GR_ATOMIC_XOR_INT64 = 0x2d13,
+ GR_ATOMIC_INC_UINT64 = 0x2d14,
+ GR_ATOMIC_DEC_UINT64 = 0x2d15,
+
+ GR_ATOMIC_OP_BEGIN_RANGE = GR_ATOMIC_ADD_INT32,
+ GR_ATOMIC_OP_END_RANGE = GR_ATOMIC_DEC_UINT64,
+ GR_NUM_ATOMIC_OP = (GR_ATOMIC_OP_END_RANGE - GR_ATOMIC_OP_BEGIN_RANGE + 1),
+} GR_ATOMIC_OP;
+
+typedef enum _GR_SYSTEM_ALLOC_TYPE
+{
+ GR_SYSTEM_ALLOC_API_OBJECT = 0x2e00,
+ GR_SYSTEM_ALLOC_INTERNAL = 0x2e01,
+ GR_SYSTEM_ALLOC_INTERNAL_TEMP = 0x2e02,
+ GR_SYSTEM_ALLOC_INTERNAL_SHADER = 0x2e03,
+ GR_SYSTEM_ALLOC_DEBUG = 0x2e04,
+
+ GR_SYSTEM_ALLOC_BEGIN_RANGE = GR_SYSTEM_ALLOC_API_OBJECT,
+ GR_SYSTEM_ALLOC_END_RANGE = GR_SYSTEM_ALLOC_DEBUG,
+ GR_NUM_SYSTEM_ALLOC_TYPE = (GR_SYSTEM_ALLOC_END_RANGE - GR_SYSTEM_ALLOC_BEGIN_RANGE + 1),
+} GR_SYSTEM_ALLOC_TYPE;
+
+typedef enum _GR_HEAP_MEMORY_TYPE
+{
+ GR_HEAP_MEMORY_OTHER = 0x2f00,
+ GR_HEAP_MEMORY_LOCAL = 0x2f01,
+ GR_HEAP_MEMORY_REMOTE = 0x2f02,
+ GR_HEAP_MEMORY_EMBEDDED = 0x2f03,
+
+ GR_HEAP_MEMORY_BEGIN_RANGE = GR_HEAP_MEMORY_OTHER,
+ GR_HEAP_MEMORY_END_RANGE = GR_HEAP_MEMORY_EMBEDDED,
+ GR_NUM_HEAP_MEMORY_TYPE = (GR_HEAP_MEMORY_END_RANGE - GR_HEAP_MEMORY_BEGIN_RANGE + 1),
+} GR_HEAP_MEMORY_TYPE;
+
+typedef enum _GR_PHYSICAL_GPU_TYPE
+{
+ GR_GPU_TYPE_OTHER = 0x3000,
+ GR_GPU_TYPE_INTEGRATED = 0x3001,
+ GR_GPU_TYPE_DISCRETE = 0x3002,
+ GR_GPU_TYPE_VIRTUAL = 0x3003,
+
+ GR_PHYSICAL_GPU_TYPE_BEGIN_RANGE = GR_GPU_TYPE_OTHER,
+ GR_PHYSICAL_GPU_TYPE_END_RANGE = GR_GPU_TYPE_VIRTUAL,
+ GR_NUM_PHYSICAL_GPU_TYPE = (GR_PHYSICAL_GPU_TYPE_END_RANGE - GR_PHYSICAL_GPU_TYPE_BEGIN_RANGE + 1),
+} GR_PHYSICAL_GPU_TYPE;
+
+typedef enum _GR_INFO_TYPE
+{
+ // Info type for grGetGpuInfo()
+ GR_INFO_TYPE_PHYSICAL_GPU_PROPERTIES = 0x6100,
+ GR_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE = 0x6101,
+ GR_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES = 0x6102,
+ GR_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES = 0x6103,
+ // Info type for grGetMemoryHeapInfo()
+ GR_INFO_TYPE_MEMORY_HEAP_PROPERTIES = 0x6200,
+ // Info type for grGetFormatInfo()
+ GR_INFO_TYPE_FORMAT_PROPERTIES = 0x6300,
+ // Info type for grGetImageSubresourceInfo()
+ GR_INFO_TYPE_SUBRESOURCE_LAYOUT = 0x6400,
+ // Info type for grGetObjectInfo()
+ GR_INFO_TYPE_MEMORY_REQUIREMENTS = 0x6800,
+} GR_INFO_TYPE;
+
+typedef enum _GR_VALIDATION_LEVEL
+{
+ GR_VALIDATION_LEVEL_0 = 0x8000,
+ GR_VALIDATION_LEVEL_1 = 0x8001,
+ GR_VALIDATION_LEVEL_2 = 0x8002,
+ GR_VALIDATION_LEVEL_3 = 0x8003,
+ GR_VALIDATION_LEVEL_4 = 0x8004,
+
+ GR_VALIDATION_LEVEL_BEGIN_RANGE = GR_VALIDATION_LEVEL_0,
+ GR_VALIDATION_LEVEL_END_RANGE = GR_VALIDATION_LEVEL_4,
+ GR_NUM_VALIDATION_LEVEL = (GR_VALIDATION_LEVEL_END_RANGE - GR_VALIDATION_LEVEL_BEGIN_RANGE + 1),
+} GR_VALIDATION_LEVEL;
+
+// ------------------------------------------------------------------------------------------------
+// Error and return codes
+
+typedef enum _GR_RESULT_CODE
+{
+ // Return codes for successful operation execution
+ GR_SUCCESS = 0x10000,
+ GR_UNSUPPORTED = 0x10001,
+ GR_NOT_READY = 0x10002,
+ GR_TIMEOUT = 0x10003,
+ GR_EVENT_SET = 0x10004,
+ GR_EVENT_RESET = 0x10005,
+ // Error codes
+ GR_ERROR_UNKNOWN = 0x11000,
+ GR_ERROR_UNAVAILABLE = 0x11001,
+ GR_ERROR_INITIALIZATION_FAILED = 0x11002,
+ GR_ERROR_OUT_OF_MEMORY = 0x11003,
+ GR_ERROR_OUT_OF_GPU_MEMORY = 0x11004,
+ GR_ERROR_DEVICE_ALREADY_CREATED = 0x11005,
+ GR_ERROR_DEVICE_LOST = 0x11006,
+ GR_ERROR_INVALID_POINTER = 0x11007,
+ GR_ERROR_INVALID_VALUE = 0x11008,
+ GR_ERROR_INVALID_HANDLE = 0x11009,
+ GR_ERROR_INVALID_ORDINAL = 0x1100a,
+ GR_ERROR_INVALID_MEMORY_SIZE = 0x1100b,
+ GR_ERROR_INVALID_EXTENSION = 0x1100c,
+ GR_ERROR_INVALID_FLAGS = 0x1100d,
+ GR_ERROR_INVALID_ALIGNMENT = 0x1100e,
+ GR_ERROR_INVALID_FORMAT = 0x1100f,
+ GR_ERROR_INVALID_IMAGE = 0x11010,
+ GR_ERROR_INVALID_DESCRIPTOR_SET_DATA = 0x11011,
+ GR_ERROR_INVALID_QUEUE_TYPE = 0x11012,
+ GR_ERROR_INVALID_OBJECT_TYPE = 0x11013,
+ GR_ERROR_UNSUPPORTED_SHADER_IL_VERSION = 0x11014,
+ GR_ERROR_BAD_SHADER_CODE = 0x11015,
+ GR_ERROR_BAD_PIPELINE_DATA = 0x11016,
+ GR_ERROR_TOO_MANY_MEMORY_REFERENCES = 0x11017,
+ GR_ERROR_NOT_MAPPABLE = 0x11018,
+ GR_ERROR_MEMORY_MAP_FAILED = 0x11019,
+ GR_ERROR_MEMORY_UNMAP_FAILED = 0x1101a,
+ GR_ERROR_INCOMPATIBLE_DEVICE = 0x1101b,
+ GR_ERROR_INCOMPATIBLE_DRIVER = 0x1101c,
+ GR_ERROR_INCOMPLETE_COMMAND_BUFFER = 0x1101d,
+ GR_ERROR_BUILDING_COMMAND_BUFFER = 0x1101e,
+ GR_ERROR_MEMORY_NOT_BOUND = 0x1101f,
+ GR_ERROR_INCOMPATIBLE_QUEUE = 0x11020,
+ GR_ERROR_NOT_SHAREABLE = 0x11021,
+} GR_RESULT_CODE;
+
+// ------------------------------------------------------------------------------------------------
+// Mantle format definition
+
+typedef enum _GR_CHANNEL_FORMAT
+{
+ GR_CH_FMT_UNDEFINED = 0,
+ GR_CH_FMT_R4G4 = 1,
+ GR_CH_FMT_R4G4B4A4 = 2,
+ GR_CH_FMT_R5G6B5 = 3,
+ GR_CH_FMT_B5G6R5 = 4,
+ GR_CH_FMT_R5G5B5A1 = 5,
+ GR_CH_FMT_R8 = 6,
+ GR_CH_FMT_R8G8 = 7,
+ GR_CH_FMT_R8G8B8A8 = 8,
+ GR_CH_FMT_B8G8R8A8 = 9,
+ GR_CH_FMT_R10G11B11 = 10,
+ GR_CH_FMT_R11G11B10 = 11,
+ GR_CH_FMT_R10G10B10A2 = 12,
+ GR_CH_FMT_R16 = 13,
+ GR_CH_FMT_R16G16 = 14,
+ GR_CH_FMT_R16G16B16A16 = 15,
+ GR_CH_FMT_R32 = 16,
+ GR_CH_FMT_R32G32 = 17,
+ GR_CH_FMT_R32G32B32 = 18,
+ GR_CH_FMT_R32G32B32A32 = 19,
+ GR_CH_FMT_R16G8 = 20,
+ GR_CH_FMT_R32G8 = 21,
+ GR_CH_FMT_R9G9B9E5 = 22,
+ GR_CH_FMT_BC1 = 23,
+ GR_CH_FMT_BC2 = 24,
+ GR_CH_FMT_BC3 = 25,
+ GR_CH_FMT_BC4 = 26,
+ GR_CH_FMT_BC5 = 27,
+ GR_CH_FMT_BC6U = 28,
+ GR_CH_FMT_BC6S = 29,
+ GR_CH_FMT_BC7 = 30,
+ GR_MAX_CH_FMT = GR_CH_FMT_BC7,
+} GR_CHANNEL_FORMAT;
+
+typedef enum _GR_NUM_FORMAT
+{
+ GR_NUM_FMT_UNDEFINED = 0,
+ GR_NUM_FMT_UNORM = 1,
+ GR_NUM_FMT_SNORM = 2,
+ GR_NUM_FMT_UINT = 3,
+ GR_NUM_FMT_SINT = 4,
+ GR_NUM_FMT_FLOAT = 5,
+ GR_NUM_FMT_SRGB = 6,
+ GR_NUM_FMT_DS = 7,
+ GR_MAX_NUM_FMT = GR_NUM_FMT_DS,
+} GR_NUM_FORMAT;
+
+typedef struct _GR_FORMAT
+{
+ GR_UINT32 channelFormat : 16; // GR_CHANNEL_FORMAT
+ GR_UINT32 numericFormat : 16; // GR_NUM_FORMAT
+} GR_FORMAT;
+
+// ------------------------------------------------------------------------------------------------
+// Flags
+
+// Device creation flags
+typedef enum _GR_DEVICE_CREATE_FLAGS
+{
+ GR_DEVICE_CREATE_VALIDATION = 0x00000001,
+ GR_DEVICE_CREATE_MGPU_IQ_MATCH = 0x00000002,
+} GR_DEVICE_CREATE_FLAGS;
+
+// Memory heap properties
+typedef enum _GR_MEMORY_HEAP_FLAGS
+{
+ GR_MEMORY_HEAP_CPU_VISIBLE = 0x00000001,
+ GR_MEMORY_HEAP_CPU_GPU_COHERENT = 0x00000002,
+ GR_MEMORY_HEAP_CPU_UNCACHED = 0x00000004,
+ GR_MEMORY_HEAP_CPU_WRITE_COMBINED = 0x00000008,
+ GR_MEMORY_HEAP_HOLDS_PINNED = 0x00000010,
+ GR_MEMORY_HEAP_SHAREABLE = 0x00000020,
+} GR_MEMORY_HEAP_FLAGS;
+
+// Memory allocation flags
+typedef enum _GR_MEMORY_ALLOC_FLAGS
+{
+ GR_MEMORY_ALLOC_VIRTUAL = 0x00000001,
+ GR_MEMORY_ALLOC_SHAREABLE = 0x00000002,
+} GR_MEMORY_ALLOC_FLAGS;
+
+// Image usage flags
+typedef enum _GR_IMAGE_USAGE_FLAGS
+{
+ GR_IMAGE_USAGE_SHADER_ACCESS_READ = 0x00000001,
+ GR_IMAGE_USAGE_SHADER_ACCESS_WRITE = 0x00000002,
+ GR_IMAGE_USAGE_COLOR_TARGET = 0x00000004,
+ GR_IMAGE_USAGE_DEPTH_STENCIL = 0x00000008,
+} GR_IMAGE_USAGE_FLAGS;
+
+// Image flags
+typedef enum _GR_IMAGE_CREATE_FLAGS
+{
+ GR_IMAGE_CREATE_INVARIANT_DATA = 0x00000001,
+ GR_IMAGE_CREATE_CLONEABLE = 0x00000002,
+ GR_IMAGE_CREATE_SHAREABLE = 0x00000004,
+} GR_IMAGE_CREATE_FLAGS;
+
+// Depth-stencil view creation flags
+typedef enum _GR_DEPTH_STENCIL_VIEW_CREATE_FLAGS
+{
+ GR_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_DEPTH = 0x00000001,
+ GR_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_STENCIL = 0x00000002,
+} GR_DEPTH_STENCIL_VIEW_CREATE_FLAGS;
+
+// Pipeline creation flags
+typedef enum _GR_PIPELINE_CREATE_FLAGS
+{
+ GR_PIPELINE_CREATE_DISABLE_OPTIMIZATION = 0x00000001,
+} GR_PIPELINE_CREATE_FLAGS;
+
+// Semaphore creation flags
+typedef enum _GR_SEMAPHORE_CREATE_FLAGS
+{
+ GR_SEMAPHORE_CREATE_SHAREABLE = 0x00000001,
+} GR_SEMAPHORE_CREATE_FLAGS;
+
+// Memory reference flags
+typedef enum _GR_MEMORY_REF_FLAGS
+{
+ GR_MEMORY_REF_READ_ONLY = 0x00000001,
+} GR_MEMORY_REF_FLAGS;
+
+// Format capability flags
+typedef enum _GR_FORMAT_FEATURE_FLAGS
+{
+ GR_FORMAT_IMAGE_SHADER_READ = 0x00000001,
+ GR_FORMAT_IMAGE_SHADER_WRITE = 0x00000002,
+ GR_FORMAT_IMAGE_COPY = 0x00000004,
+ GR_FORMAT_MEMORY_SHADER_ACCESS = 0x00000008,
+ GR_FORMAT_COLOR_TARGET_WRITE = 0x00000010,
+ GR_FORMAT_COLOR_TARGET_BLEND = 0x00000020,
+ GR_FORMAT_DEPTH_TARGET = 0x00000040,
+ GR_FORMAT_STENCIL_TARGET = 0x00000080,
+ GR_FORMAT_MSAA_TARGET = 0x00000100,
+ GR_FORMAT_CONVERSION = 0x00000200,
+} GR_FORMAT_FEATURE_FLAGS;
+
+// Query flags
+typedef enum _GR_QUERY_CONTROL_FLAGS
+{
+ GR_QUERY_IMPRECISE_DATA = 0x00000001,
+} GR_QUERY_CONTROL_FLAGS;
+
+// GPU compatibility flags
+typedef enum _GR_GPU_COMPATIBILITY_FLAGS
+{
+ GR_GPU_COMPAT_ASIC_FEATURES = 0x00000001,
+ GR_GPU_COMPAT_IQ_MATCH = 0x00000002,
+ GR_GPU_COMPAT_PEER_TRANSFER = 0x00000004,
+ GR_GPU_COMPAT_SHARED_MEMORY = 0x00000008,
+ GR_GPU_COMPAT_SHARED_SYNC = 0x00000010,
+ GR_GPU_COMPAT_SHARED_GPU0_DISPLAY = 0x00000020,
+ GR_GPU_COMPAT_SHARED_GPU1_DISPLAY = 0x00000040,
+} GR_GPU_COMPATIBILITY_FLAGS;
+
+// Command buffer building flags
+typedef enum _GR_CMD_BUFFER_BUILD_FLAGS
+{
+ GR_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH = 0x00000001,
+ GR_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH = 0x00000002,
+ GR_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT = 0x00000004,
+ GR_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH = 0x00000008,
+} GR_CMD_BUFFER_BUILD_FLAGS;
+
+// ------------------------------------------------------------------------------------------------
+// Mantle structures
+
+typedef struct _GR_OFFSET2D
+{
+ GR_INT x;
+ GR_INT y;
+} GR_OFFSET2D;
+
+typedef struct _GR_OFFSET3D
+{
+ GR_INT x;
+ GR_INT y;
+ GR_INT z;
+} GR_OFFSET3D;
+
+typedef struct _GR_EXTENT2D
+{
+ GR_INT width;
+ GR_INT height;
+} GR_EXTENT2D;
+
+typedef struct _GR_EXTENT3D
+{
+ GR_INT width;
+ GR_INT height;
+ GR_INT depth;
+} GR_EXTENT3D;
+
+typedef struct _GR_VIEWPORT
+{
+ GR_FLOAT originX;
+ GR_FLOAT originY;
+ GR_FLOAT width;
+ GR_FLOAT height;
+ GR_FLOAT minDepth;
+ GR_FLOAT maxDepth;
+} GR_VIEWPORT;
+
+typedef struct _GR_RECT
+{
+ GR_OFFSET2D offset;
+ GR_EXTENT2D extent;
+} GR_RECT;
+
+typedef struct _GR_PHYSICAL_GPU_PROPERTIES
+{
+ GR_UINT32 apiVersion;
+ GR_UINT32 driverVersion;
+ GR_UINT32 vendorId;
+ GR_UINT32 deviceId;
+ GR_ENUM gpuType; // GR_PHYSICAL_GPU_TYPE
+ GR_CHAR gpuName[GR_MAX_PHYSICAL_GPU_NAME];
+ GR_UINT maxMemRefsPerSubmission;
+ GR_GPU_SIZE virtualMemPageSize;
+ GR_GPU_SIZE maxInlineMemoryUpdateSize;
+ GR_UINT maxBoundDescriptorSets;
+ GR_UINT maxThreadGroupSize;
+ GR_UINT64 timestampFrequency;
+ GR_BOOL multiColorTargetClears;
+} GR_PHYSICAL_GPU_PROPERTIES;
+
+typedef struct _GR_PHYSICAL_GPU_PERFORMANCE
+{
+ GR_FLOAT maxGpuClock;
+ GR_FLOAT aluPerClock;
+ GR_FLOAT texPerClock;
+ GR_FLOAT primsPerClock;
+ GR_FLOAT pixelsPerClock;
+} GR_PHYSICAL_GPU_PERFORMANCE;
+
+typedef struct _GR_GPU_COMPATIBILITY_INFO
+{
+ GR_FLAGS compatibilityFlags; // GR_GPU_COMPATIBILITY_FLAGS
+} GR_GPU_COMPATIBILITY_INFO;
+
+typedef struct _GR_APPLICATION_INFO
+{
+ const GR_CHAR* pAppName;
+ GR_UINT32 appVersion;
+ const GR_CHAR* pEngineName;
+ GR_UINT32 engineVersion;
+ GR_UINT32 apiVersion;
+} GR_APPLICATION_INFO;
+
+typedef GR_VOID* (GR_STDCALL *GR_ALLOC_FUNCTION)(
+ GR_SIZE size,
+ GR_SIZE alignment,
+ GR_ENUM allocType); // GR_SYSTEM_ALLOC_TYPE
+
+typedef GR_VOID (GR_STDCALL *GR_FREE_FUNCTION)(
+ GR_VOID* pMem);
+
+typedef struct _GR_ALLOC_CALLBACKS
+{
+ GR_ALLOC_FUNCTION pfnAlloc;
+ GR_FREE_FUNCTION pfnFree;
+} GR_ALLOC_CALLBACKS;
+
+typedef struct _GR_DEVICE_QUEUE_CREATE_INFO
+{
+ GR_ENUM queueType; // GR_QUEUE_TYPE
+ GR_UINT queueCount;
+} GR_DEVICE_QUEUE_CREATE_INFO;
+
+typedef struct _GR_DEVICE_CREATE_INFO
+{
+ GR_UINT queueRecordCount;
+ const GR_DEVICE_QUEUE_CREATE_INFO* pRequestedQueues;
+ GR_UINT extensionCount;
+ const GR_CHAR*const* ppEnabledExtensionNames;
+ GR_ENUM maxValidationLevel; // GR_VALIDATION_LEVEL
+ GR_FLAGS flags; // GR_DEVICE_CREATE_FLAGS
+} GR_DEVICE_CREATE_INFO;
+
+typedef struct _GR_PHYSICAL_GPU_QUEUE_PROPERTIES
+{
+ GR_ENUM queueType; // GR_QUEUE_TYPE
+ GR_UINT queueCount;
+ GR_UINT maxAtomicCounters;
+ GR_BOOL supportsTimestamps;
+} GR_PHYSICAL_GPU_QUEUE_PROPERTIES;
+
+typedef struct _GR_PHYSICAL_GPU_MEMORY_PROPERTIES
+{
+ GR_BOOL supportsMigration;
+ GR_BOOL supportsVirtualMemoryRemapping;
+ GR_BOOL supportsPinning;
+} GR_PHYSICAL_GPU_MEMORY_PROPERTIES;
+
+typedef struct _GR_MEMORY_HEAP_PROPERTIES
+{
+ GR_ENUM heapMemoryType; // GR_HEAP_MEMORY_TYPE
+ GR_GPU_SIZE heapSize; // Specified in bytes
+ GR_GPU_SIZE pageSize; // Specified in bytes
+ GR_FLAGS flags; // GR_MEMORY_HEAP_FLAGS
+ GR_FLOAT gpuReadPerfRating;
+ GR_FLOAT gpuWritePerfRating;
+ GR_FLOAT cpuReadPerfRating;
+ GR_FLOAT cpuWritePerfRating;
+} GR_MEMORY_HEAP_PROPERTIES;
+
+typedef struct _GR_MEMORY_ALLOC_INFO
+{
+ GR_GPU_SIZE size;
+ GR_GPU_SIZE alignment;
+ GR_FLAGS flags; // GR_MEMORY_ALLOC_FLAGS
+ GR_UINT heapCount;
+ GR_UINT heaps[GR_MAX_MEMORY_HEAPS];
+ GR_ENUM memPriority; // GR_MEMORY_PRIORITY
+} GR_MEMORY_ALLOC_INFO;
+
+typedef struct _GR_MEMORY_OPEN_INFO
+{
+ GR_GPU_MEMORY sharedMem;
+} GR_MEMORY_OPEN_INFO;
+
+typedef struct _GR_PEER_MEMORY_OPEN_INFO
+{
+ GR_GPU_MEMORY originalMem;
+} GR_PEER_MEMORY_OPEN_INFO;
+
+typedef struct _GR_VIRTUAL_MEMORY_REMAP_RANGE
+{
+ GR_GPU_MEMORY virtualMem;
+ GR_GPU_SIZE virtualStartPage;
+ GR_GPU_MEMORY realMem;
+ GR_GPU_SIZE realStartPage;
+ GR_GPU_SIZE pageCount;
+} GR_VIRTUAL_MEMORY_REMAP_RANGE;
+
+typedef struct _GR_MEMORY_REQUIREMENTS
+{
+ GR_GPU_SIZE size; // Specified in bytes
+ GR_GPU_SIZE alignment; // Specified in bytes
+ GR_UINT heapCount;
+ GR_UINT heaps[GR_MAX_MEMORY_HEAPS];
+} GR_MEMORY_REQUIREMENTS;
+
+typedef struct _GR_FORMAT_PROPERTIES
+{
+ GR_FLAGS linearTilingFeatures; // GR_FORMAT_FEATURE_FLAGS
+ GR_FLAGS optimalTilingFeatures; // GR_FORMAT_FEATURE_FLAGS
+} GR_FORMAT_PROPERTIES;
+
+typedef struct _GR_MEMORY_VIEW_ATTACH_INFO
+{
+ GR_GPU_MEMORY mem;
+ GR_GPU_SIZE offset;
+ GR_GPU_SIZE range;
+ GR_GPU_SIZE stride;
+ GR_FORMAT format;
+ GR_ENUM state; // GR_MEMORY_STATE
+} GR_MEMORY_VIEW_ATTACH_INFO;
+
+typedef struct _GR_IMAGE_VIEW_ATTACH_INFO
+{
+ GR_IMAGE_VIEW view;
+ GR_ENUM state; // GR_IMAGE_STATE
+} GR_IMAGE_VIEW_ATTACH_INFO;
+
+typedef struct _GR_MEMORY_STATE_TRANSITION
+{
+ GR_GPU_MEMORY mem;
+ GR_ENUM oldState; // GR_MEMORY_STATE
+ GR_ENUM newState; // GR_MEMORY_STATE
+ GR_GPU_SIZE offset;
+ GR_GPU_SIZE regionSize;
+} GR_MEMORY_STATE_TRANSITION;
+
+typedef struct _GR_IMAGE_SUBRESOURCE
+{
+ GR_ENUM aspect; // GR_IMAGE_ASPECT
+ GR_UINT mipLevel;
+ GR_UINT arraySlice;
+} GR_IMAGE_SUBRESOURCE;
+
+typedef struct _GR_IMAGE_SUBRESOURCE_RANGE
+{
+ GR_ENUM aspect; // GR_IMAGE_ASPECT
+ GR_UINT baseMipLevel;
+ GR_UINT mipLevels;
+ GR_UINT baseArraySlice;
+ GR_UINT arraySize;
+} GR_IMAGE_SUBRESOURCE_RANGE;
+
+typedef struct _GR_IMAGE_STATE_TRANSITION
+{
+ GR_IMAGE image;
+ GR_ENUM oldState; // GR_IMAGE_STATE
+ GR_ENUM newState; // GR_IMAGE_STATE
+ GR_IMAGE_SUBRESOURCE_RANGE subresourceRange;
+} GR_IMAGE_STATE_TRANSITION;
+
+typedef struct _GR_IMAGE_CREATE_INFO
+{
+ GR_ENUM imageType; // GR_IMAGE_TYPE
+ GR_FORMAT format;
+ GR_EXTENT3D extent;
+ GR_UINT mipLevels;
+ GR_UINT arraySize;
+ GR_UINT samples;
+ GR_ENUM tiling; // GR_IMAGE_TILING
+ GR_FLAGS usage; // GR_IMAGE_USAGE_FLAGS
+ GR_FLAGS flags; // GR_IMAGE_CREATE_FLAGS
+} GR_IMAGE_CREATE_INFO;
+
+typedef struct _GR_PEER_IMAGE_OPEN_INFO
+{
+ GR_IMAGE originalImage;
+} GR_PEER_IMAGE_OPEN_INFO;
+
+typedef struct _GR_SUBRESOURCE_LAYOUT
+{
+ GR_GPU_SIZE offset; // Specified in bytes
+ GR_GPU_SIZE size; // Specified in bytes
+ GR_GPU_SIZE rowPitch; // Specified in bytes
+ GR_GPU_SIZE depthPitch; // Specified in bytes
+} GR_SUBRESOURCE_LAYOUT;
+
+typedef struct _GR_CHANNEL_MAPPING
+{
+ GR_ENUM r; // GR_CHANNEL_SWIZZLE
+ GR_ENUM g; // GR_CHANNEL_SWIZZLE
+ GR_ENUM b; // GR_CHANNEL_SWIZZLE
+ GR_ENUM a; // GR_CHANNEL_SWIZZLE
+} GR_CHANNEL_MAPPING;
+
+typedef struct _GR_IMAGE_VIEW_CREATE_INFO
+{
+ GR_IMAGE image;
+ GR_ENUM viewType; // GR_IMAGE_VIEW_TYPE
+ GR_FORMAT format;
+ GR_CHANNEL_MAPPING channels;
+ GR_IMAGE_SUBRESOURCE_RANGE subresourceRange;
+ GR_FLOAT minLod;
+} GR_IMAGE_VIEW_CREATE_INFO;
+
+typedef struct _GR_COLOR_TARGET_VIEW_CREATE_INFO
+{
+ GR_IMAGE image;
+ GR_FORMAT format;
+ GR_UINT mipLevel;
+ GR_UINT baseArraySlice;
+ GR_UINT arraySize;
+} GR_COLOR_TARGET_VIEW_CREATE_INFO;
+
+typedef struct _GR_DEPTH_STENCIL_VIEW_CREATE_INFO
+{
+ GR_IMAGE image;
+ GR_UINT mipLevel;
+ GR_UINT baseArraySlice;
+ GR_UINT arraySize;
+ GR_FLAGS flags; // GR_DEPTH_STENCIL_VIEW_CREATE_FLAGS
+} GR_DEPTH_STENCIL_VIEW_CREATE_INFO;
+
+typedef struct _GR_COLOR_TARGET_BIND_INFO
+{
+ GR_COLOR_TARGET_VIEW view;
+ GR_ENUM colorTargetState; // GR_IMAGE_STATE
+} GR_COLOR_TARGET_BIND_INFO;
+
+typedef struct _GR_DEPTH_STENCIL_BIND_INFO
+{
+ GR_DEPTH_STENCIL_VIEW view;
+ GR_ENUM depthState; // GR_IMAGE_STATE
+ GR_ENUM stencilState; // GR_IMAGE_STATE
+} GR_DEPTH_STENCIL_BIND_INFO;
+
+typedef struct _GR_MEMORY_COPY
+{
+ GR_GPU_SIZE srcOffset; // Specified in bytes
+ GR_GPU_SIZE destOffset; // Specified in bytes
+ GR_GPU_SIZE copySize; // Specified in bytes
+} GR_MEMORY_COPY;
+
+typedef struct _GR_IMAGE_COPY
+{
+ GR_IMAGE_SUBRESOURCE srcSubresource;
+ GR_OFFSET3D srcOffset;
+ GR_IMAGE_SUBRESOURCE destSubresource;
+ GR_OFFSET3D destOffset;
+ GR_EXTENT3D extent;
+} GR_IMAGE_COPY;
+
+typedef struct _GR_MEMORY_IMAGE_COPY
+{
+ GR_GPU_SIZE memOffset; // Specified in bytes
+ GR_IMAGE_SUBRESOURCE imageSubresource;
+ GR_OFFSET3D imageOffset;
+ GR_EXTENT3D imageExtent;
+} GR_MEMORY_IMAGE_COPY;
+
+typedef struct _GR_IMAGE_RESOLVE
+{
+ GR_IMAGE_SUBRESOURCE srcSubresource;
+ GR_OFFSET2D srcOffset;
+ GR_IMAGE_SUBRESOURCE destSubresource;
+ GR_OFFSET2D destOffset;
+ GR_EXTENT2D extent;
+} GR_IMAGE_RESOLVE;
+
+typedef struct _GR_SHADER_CREATE_INFO
+{
+ GR_SIZE codeSize; // Specified in bytes
+ const GR_VOID* pCode;
+ GR_FLAGS flags; // Reserved
+} GR_SHADER_CREATE_INFO;
+
+struct _GR_DESCRIPTOR_SET_MAPPING;
+
+typedef struct _GR_DESCRIPTOR_SLOT_INFO
+{
+ GR_ENUM slotObjectType; // GR_DESCRIPTOR_SET_SLOT_TYPE
+ union
+ {
+ GR_UINT shaderEntityIndex;// Shader IL slot index for given entity type
+ const struct _GR_DESCRIPTOR_SET_MAPPING* pNextLevelSet; // Pointer to next descriptor set level
+ };
+} GR_DESCRIPTOR_SLOT_INFO;
+
+typedef struct _GR_DESCRIPTOR_SET_MAPPING
+{
+ GR_UINT descriptorCount;
+ const GR_DESCRIPTOR_SLOT_INFO* pDescriptorInfo;
+} GR_DESCRIPTOR_SET_MAPPING;
+
+typedef struct _GR_LINK_CONST_BUFFER
+{
+ GR_UINT bufferId;
+ GR_SIZE bufferSize;
+ const GR_VOID* pBufferData;
+} GR_LINK_CONST_BUFFER;
+
+typedef struct _GR_DYNAMIC_MEMORY_VIEW_SLOT_INFO
+{
+ GR_ENUM slotObjectType; // GR_DESCRIPTOR_SET_SLOT_TYPE
+ GR_UINT shaderEntityIndex;
+} GR_DYNAMIC_MEMORY_VIEW_SLOT_INFO;
+
+typedef struct _GR_PIPELINE_SHADER
+{
+ GR_SHADER shader;
+ GR_DESCRIPTOR_SET_MAPPING descriptorSetMapping[GR_MAX_DESCRIPTOR_SETS];
+ GR_UINT linkConstBufferCount;
+ const GR_LINK_CONST_BUFFER* pLinkConstBufferInfo;
+ GR_DYNAMIC_MEMORY_VIEW_SLOT_INFO dynamicMemoryViewMapping;
+} GR_PIPELINE_SHADER;
+
+typedef struct _GR_COMPUTE_PIPELINE_CREATE_INFO
+{
+ GR_PIPELINE_SHADER cs;
+ GR_FLAGS flags; // GR_PIPELINE_CREATE_FLAGS
+} GR_COMPUTE_PIPELINE_CREATE_INFO;
+
+typedef struct _GR_PIPELINE_IA_STATE
+{
+ GR_ENUM topology; // GR_PRIMITIVE_TOPOLOGY
+ GR_BOOL disableVertexReuse;
+} GR_PIPELINE_IA_STATE;
+
+typedef struct _GR_PIPELINE_TESS_STATE
+{
+ GR_UINT patchControlPoints;
+ GR_FLOAT optimalTessFactor;
+} GR_PIPELINE_TESS_STATE;
+
+typedef struct _GR_PIPELINE_RS_STATE
+{
+ GR_BOOL depthClipEnable;
+} GR_PIPELINE_RS_STATE;
+
+typedef struct _GR_PIPELINE_CB_TARGET_STATE
+{
+ GR_BOOL blendEnable;
+ GR_FORMAT format;
+ GR_UINT8 channelWriteMask;
+} GR_PIPELINE_CB_TARGET_STATE;
+
+typedef struct _GR_PIPELINE_CB_STATE
+{
+ GR_BOOL alphaToCoverageEnable;
+ GR_BOOL dualSourceBlendEnable;
+ GR_ENUM logicOp; // GR_LOGIC_OP
+ GR_PIPELINE_CB_TARGET_STATE target[GR_MAX_COLOR_TARGETS];
+} GR_PIPELINE_CB_STATE;
+
+typedef struct _GR_PIPELINE_DB_STATE
+{
+ GR_FORMAT format;
+} GR_PIPELINE_DB_STATE;
+
+typedef struct _GR_GRAPHICS_PIPELINE_CREATE_INFO
+{
+ GR_PIPELINE_SHADER vs;
+ GR_PIPELINE_SHADER hs;
+ GR_PIPELINE_SHADER ds;
+ GR_PIPELINE_SHADER gs;
+ GR_PIPELINE_SHADER ps;
+ GR_PIPELINE_IA_STATE iaState;
+ GR_PIPELINE_TESS_STATE tessState;
+ GR_PIPELINE_RS_STATE rsState;
+ GR_PIPELINE_CB_STATE cbState;
+ GR_PIPELINE_DB_STATE dbState;
+ GR_FLAGS flags; // GR_PIPELINE_CREATE_FLAGS
+} GR_GRAPHICS_PIPELINE_CREATE_INFO;
+
+typedef struct _GR_SAMPLER_CREATE_INFO
+{
+ GR_ENUM filter; // GR_TEX_FILTER
+ GR_ENUM addressU; // GR_TEX_ADDRESS
+ GR_ENUM addressV; // GR_TEX_ADDRESS
+ GR_ENUM addressW; // GR_TEX_ADDRESS
+ GR_FLOAT mipLodBias;
+ GR_UINT maxAnisotropy;
+ GR_ENUM compareFunc; // GR_COMPARE_FUNC
+ GR_FLOAT minLod;
+ GR_FLOAT maxLod;
+ GR_ENUM borderColor; // GR_BORDER_COLOR_TYPE
+} GR_SAMPLER_CREATE_INFO;
+
+typedef struct _GR_DESCRIPTOR_SET_CREATE_INFO
+{
+ GR_UINT slots;
+} GR_DESCRIPTOR_SET_CREATE_INFO;
+
+typedef struct _GR_DESCRIPTOR_SET_ATTACH_INFO
+{
+ GR_DESCRIPTOR_SET descriptorSet;
+ GR_UINT slotOffset;
+} GR_DESCRIPTOR_SET_ATTACH_INFO;
+
+typedef struct _GR_VIEWPORT_STATE_CREATE_INFO
+{
+ GR_UINT viewportCount;
+ GR_BOOL scissorEnable;
+ GR_VIEWPORT viewports[GR_MAX_VIEWPORTS];
+ GR_RECT scissors[GR_MAX_VIEWPORTS];
+} GR_VIEWPORT_STATE_CREATE_INFO;
+
+typedef struct _GR_RASTER_STATE_CREATE_INFO
+{
+ GR_ENUM fillMode; // GR_FILL_MODE
+ GR_ENUM cullMode; // GR_CULL_MODE
+ GR_ENUM frontFace; // GR_FACE_ORIENTATION
+ GR_INT depthBias;
+ GR_FLOAT depthBiasClamp;
+ GR_FLOAT slopeScaledDepthBias;
+} GR_RASTER_STATE_CREATE_INFO;
+
+typedef struct _GR_MSAA_STATE_CREATE_INFO
+{
+ GR_UINT samples;
+ GR_SAMPLE_MASK sampleMask;
+} GR_MSAA_STATE_CREATE_INFO;
+
+typedef struct _GR_COLOR_TARGET_BLEND_STATE
+{
+ GR_BOOL blendEnable;
+ GR_ENUM srcBlendColor; // GR_BLEND
+ GR_ENUM destBlendColor; // GR_BLEND
+ GR_ENUM blendFuncColor; // GR_BLEND_FUNC
+ GR_ENUM srcBlendAlpha; // GR_BLEND
+ GR_ENUM destBlendAlpha; // GR_BLEND
+ GR_ENUM blendFuncAlpha; // GR_BLEND_FUNC
+} GR_COLOR_TARGET_BLEND_STATE;
+
+typedef struct _GR_COLOR_BLEND_STATE_CREATE_INFO
+{
+ GR_COLOR_TARGET_BLEND_STATE target[GR_MAX_COLOR_TARGETS];
+ GR_FLOAT blendConst[4];
+} GR_COLOR_BLEND_STATE_CREATE_INFO;
+
+typedef struct _GR_DEPTH_STENCIL_OP
+{
+ GR_ENUM stencilFailOp; // GR_STENCIL_OP
+ GR_ENUM stencilPassOp; // GR_STENCIL_OP
+ GR_ENUM stencilDepthFailOp; // GR_STENCIL_OP
+ GR_ENUM stencilFunc; // GR_COMPARE_FUNC
+ GR_UINT8 stencilRef;
+} GR_DEPTH_STENCIL_OP;
+
+typedef struct _GR_DEPTH_STENCIL_STATE_CREATE_INFO
+{
+ GR_BOOL depthEnable;
+ GR_BOOL depthWriteEnable;
+ GR_ENUM depthFunc; // GR_COMPARE_FUNC
+ GR_BOOL depthBoundsEnable;
+ GR_FLOAT minDepth;
+ GR_FLOAT maxDepth;
+ GR_BOOL stencilEnable;
+ GR_UINT8 stencilReadMask;
+ GR_UINT8 stencilWriteMask;
+ GR_DEPTH_STENCIL_OP front;
+ GR_DEPTH_STENCIL_OP back;
+} GR_DEPTH_STENCIL_STATE_CREATE_INFO;
+
+typedef struct _GR_CMD_BUFFER_CREATE_INFO
+{
+ GR_ENUM queueType; // GR_QUEUE_TYPE
+ GR_FLAGS flags; // Reserved
+} GR_CMD_BUFFER_CREATE_INFO;
+
+typedef struct _GR_MEMORY_REF
+{
+ GR_GPU_MEMORY mem;
+ GR_FLAGS flags; // GR_MEMORY_REF_FLAGS
+} GR_MEMORY_REF;
+
+typedef struct _GR_EVENT_CREATE_INFO
+{
+ GR_FLAGS flags; // Reserved
+} GR_EVENT_CREATE_INFO;
+
+typedef struct _GR_FENCE_CREATE_INFO
+{
+ GR_FLAGS flags; // Reserved
+} GR_FENCE_CREATE_INFO;
+
+typedef struct _GR_QUEUE_SEMAPHORE_CREATE_INFO
+{
+ GR_UINT initialCount;
+ GR_FLAGS flags; // GR_SEMAPHORE_CREATE_FLAGS
+} GR_QUEUE_SEMAPHORE_CREATE_INFO;
+
+typedef struct _GR_QUEUE_SEMAPHORE_OPEN_INFO
+{
+ GR_QUEUE_SEMAPHORE sharedSemaphore;
+} GR_QUEUE_SEMAPHORE_OPEN_INFO;
+
+typedef struct _GR_PIPELINE_STATISTICS_DATA
+{
+ GR_UINT64 psInvocations; // Pixel shader invocations
+ GR_UINT64 cPrimitives; // Clipper primitives
+ GR_UINT64 cInvocations; // Clipper invocations
+ GR_UINT64 vsInvocations; // Vertex shader invocations
+ GR_UINT64 gsInvocations; // Geometry shader invocations
+ GR_UINT64 gsPrimitives; // Geometry shader primitives
+ GR_UINT64 iaPrimitives; // Input primitives
+ GR_UINT64 iaVertices; // Input vertices
+ GR_UINT64 hsInvocations; // Hull shader invocations
+ GR_UINT64 dsInvocations; // Domain shader invocations
+ GR_UINT64 csInvocations; // Compute shader invocations
+} GR_PIPELINE_STATISTICS_DATA;
+
+typedef struct _GR_QUERY_POOL_CREATE_INFO
+{
+ GR_ENUM queryType; // GR_QUERY_TYPE
+ GR_UINT slots;
+} GR_QUERY_POOL_CREATE_INFO;
+
+typedef struct _GR_DRAW_INDIRECT_ARG
+{
+ GR_UINT32 vertexCount;
+ GR_UINT32 instanceCount;
+ GR_UINT32 firstVertex;
+ GR_UINT32 firstInstance;
+} GR_DRAW_INDIRECT_ARG;
+
+typedef struct _GR_DRAW_INDEXED_INDIRECT_ARG
+{
+ GR_UINT32 indexCount;
+ GR_UINT32 instanceCount;
+ GR_UINT32 firstIndex;
+ GR_INT32 vertexOffset;
+ GR_UINT32 firstInstance;
+} GR_DRAW_INDEXED_INDIRECT_ARG;
+
+typedef struct _GR_DISPATCH_INDIRECT_ARG
+{
+ GR_UINT32 x;
+ GR_UINT32 y;
+ GR_UINT32 z;
+} GR_DISPATCH_INDIRECT_ARG;
+
+// ------------------------------------------------------------------------------------------------
+// API functions
+
+// GPU initialization
+
+GR_RESULT GR_STDCALL grInitAndEnumerateGpus(
+ const GR_APPLICATION_INFO* pAppInfo,
+ const GR_ALLOC_CALLBACKS* pAllocCb,
+ GR_UINT* pGpuCount,
+ GR_PHYSICAL_GPU gpus[GR_MAX_PHYSICAL_GPUS]);
+
+GR_RESULT GR_STDCALL grGetGpuInfo(
+ GR_PHYSICAL_GPU gpu,
+ GR_ENUM infoType, // GR_INFO_TYPE
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+// Device functions
+
+GR_RESULT GR_STDCALL grCreateDevice(
+ GR_PHYSICAL_GPU gpu,
+ const GR_DEVICE_CREATE_INFO* pCreateInfo,
+ GR_DEVICE* pDevice);
+
+GR_RESULT GR_STDCALL grDestroyDevice(
+ GR_DEVICE device);
+
+// Extension discovery functions
+
+GR_RESULT GR_STDCALL grGetExtensionSupport(
+ GR_PHYSICAL_GPU gpu,
+ const GR_CHAR* pExtName);
+
+// Queue functions
+
+GR_RESULT GR_STDCALL grGetDeviceQueue(
+ GR_DEVICE device,
+ GR_ENUM queueType, // GR_QUEUE_TYPE
+ GR_UINT queueIndex,
+ GR_QUEUE* pQueue);
+
+GR_RESULT GR_STDCALL grQueueSubmit(
+ GR_QUEUE queue,
+ GR_UINT cmdBufferCount,
+ const GR_CMD_BUFFER* pCmdBuffers,
+ GR_UINT memRefCount,
+ const GR_MEMORY_REF* pMemRefs,
+ GR_FENCE fence);
+
+GR_RESULT GR_STDCALL grQueueSetGlobalMemReferences(
+ GR_QUEUE queue,
+ GR_UINT memRefCount,
+ const GR_MEMORY_REF* pMemRefs);
+
+GR_RESULT GR_STDCALL grQueueWaitIdle(
+ GR_QUEUE queue);
+
+GR_RESULT GR_STDCALL grDeviceWaitIdle(
+ GR_DEVICE device);
+
+// Memory functions
+
+GR_RESULT GR_STDCALL grGetMemoryHeapCount(
+ GR_DEVICE device,
+ GR_UINT* pCount);
+
+GR_RESULT GR_STDCALL grGetMemoryHeapInfo(
+ GR_DEVICE device,
+ GR_UINT heapId,
+ GR_ENUM infoType, // GR_INFO_TYPE
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+GR_RESULT GR_STDCALL grAllocMemory(
+ GR_DEVICE device,
+ const GR_MEMORY_ALLOC_INFO* pAllocInfo,
+ GR_GPU_MEMORY* pMem);
+
+GR_RESULT GR_STDCALL grFreeMemory(
+ GR_GPU_MEMORY mem);
+
+GR_RESULT GR_STDCALL grSetMemoryPriority(
+ GR_GPU_MEMORY mem,
+ GR_ENUM priority); // GR_MEMORY_PRIORITY
+
+GR_RESULT GR_STDCALL grMapMemory(
+ GR_GPU_MEMORY mem,
+ GR_FLAGS flags, // Reserved
+ GR_VOID** ppData);
+
+GR_RESULT GR_STDCALL grUnmapMemory(
+ GR_GPU_MEMORY mem);
+
+GR_RESULT GR_STDCALL grPinSystemMemory(
+ GR_DEVICE device,
+ const GR_VOID* pSysMem,
+ GR_SIZE memSize,
+ GR_GPU_MEMORY* pMem);
+
+GR_RESULT GR_STDCALL grRemapVirtualMemoryPages(
+ GR_DEVICE device,
+ GR_UINT rangeCount,
+ const GR_VIRTUAL_MEMORY_REMAP_RANGE* pRanges,
+ GR_UINT preWaitSemaphoreCount,
+ const GR_QUEUE_SEMAPHORE* pPreWaitSemaphores,
+ GR_UINT postSignalSemaphoreCount,
+ const GR_QUEUE_SEMAPHORE* pPostSignalSemaphores);
+
+// Multi-device functions
+
+GR_RESULT GR_STDCALL grGetMultiGpuCompatibility(
+ GR_PHYSICAL_GPU gpu0,
+ GR_PHYSICAL_GPU gpu1,
+ GR_GPU_COMPATIBILITY_INFO* pInfo);
+
+GR_RESULT GR_STDCALL grOpenSharedMemory(
+ GR_DEVICE device,
+ const GR_MEMORY_OPEN_INFO* pOpenInfo,
+ GR_GPU_MEMORY* pMem);
+
+GR_RESULT GR_STDCALL grOpenSharedQueueSemaphore(
+ GR_DEVICE device,
+ const GR_QUEUE_SEMAPHORE_OPEN_INFO* pOpenInfo,
+ GR_QUEUE_SEMAPHORE* pSemaphore);
+
+GR_RESULT GR_STDCALL grOpenPeerMemory(
+ GR_DEVICE device,
+ const GR_PEER_MEMORY_OPEN_INFO* pOpenInfo,
+ GR_GPU_MEMORY* pMem);
+
+GR_RESULT GR_STDCALL grOpenPeerImage(
+ GR_DEVICE device,
+ const GR_PEER_IMAGE_OPEN_INFO* pOpenInfo,
+ GR_IMAGE* pImage,
+ GR_GPU_MEMORY* pMem);
+
+// Generic API object functions
+
+GR_RESULT GR_STDCALL grDestroyObject(
+ GR_OBJECT object);
+
+GR_RESULT GR_STDCALL grGetObjectInfo(
+ GR_BASE_OBJECT object,
+ GR_ENUM infoType, // GR_INFO_TYPE
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+GR_RESULT GR_STDCALL grBindObjectMemory(
+ GR_OBJECT object,
+ GR_GPU_MEMORY mem,
+ GR_GPU_SIZE offset);
+
+// Fence functions
+
+GR_RESULT GR_STDCALL grCreateFence(
+ GR_DEVICE device,
+ const GR_FENCE_CREATE_INFO* pCreateInfo,
+ GR_FENCE* pFence);
+
+GR_RESULT GR_STDCALL grGetFenceStatus(
+ GR_FENCE fence);
+
+GR_RESULT GR_STDCALL grWaitForFences(
+ GR_DEVICE device,
+ GR_UINT fenceCount,
+ const GR_FENCE* pFences,
+ GR_BOOL waitAll,
+ GR_FLOAT timeout);
+
+// Queue semaphore functions
+
+GR_RESULT GR_STDCALL grCreateQueueSemaphore(
+ GR_DEVICE device,
+ const GR_QUEUE_SEMAPHORE_CREATE_INFO* pCreateInfo,
+ GR_QUEUE_SEMAPHORE* pSemaphore);
+
+GR_RESULT GR_STDCALL grSignalQueueSemaphore(
+ GR_QUEUE queue,
+ GR_QUEUE_SEMAPHORE semaphore);
+
+GR_RESULT GR_STDCALL grWaitQueueSemaphore(
+ GR_QUEUE queue,
+ GR_QUEUE_SEMAPHORE semaphore);
+
+// Event functions
+
+GR_RESULT GR_STDCALL grCreateEvent(
+ GR_DEVICE device,
+ const GR_EVENT_CREATE_INFO* pCreateInfo,
+ GR_EVENT* pEvent);
+
+GR_RESULT GR_STDCALL grGetEventStatus(
+ GR_EVENT event);
+
+GR_RESULT GR_STDCALL grSetEvent(
+ GR_EVENT event);
+
+GR_RESULT GR_STDCALL grResetEvent(
+ GR_EVENT event);
+
+// Query functions
+
+GR_RESULT GR_STDCALL grCreateQueryPool(
+ GR_DEVICE device,
+ const GR_QUERY_POOL_CREATE_INFO* pCreateInfo,
+ GR_QUERY_POOL* pQueryPool);
+
+GR_RESULT GR_STDCALL grGetQueryPoolResults(
+ GR_QUERY_POOL queryPool,
+ GR_UINT startQuery,
+ GR_UINT queryCount,
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+// Format capabilities
+
+GR_RESULT GR_STDCALL grGetFormatInfo(
+ GR_DEVICE device,
+ GR_FORMAT format,
+ GR_ENUM infoType, // GR_INFO_TYPE
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+// Image functions
+
+GR_RESULT GR_STDCALL grCreateImage(
+ GR_DEVICE device,
+ const GR_IMAGE_CREATE_INFO* pCreateInfo,
+ GR_IMAGE* pImage);
+
+GR_RESULT GR_STDCALL grGetImageSubresourceInfo(
+ GR_IMAGE image,
+ const GR_IMAGE_SUBRESOURCE* pSubresource,
+ GR_ENUM infoType, // GR_INFO_TYPE
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+// Image view functions
+
+GR_RESULT GR_STDCALL grCreateImageView(
+ GR_DEVICE device,
+ const GR_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
+ GR_IMAGE_VIEW* pView);
+
+GR_RESULT GR_STDCALL grCreateColorTargetView(
+ GR_DEVICE device,
+ const GR_COLOR_TARGET_VIEW_CREATE_INFO* pCreateInfo,
+ GR_COLOR_TARGET_VIEW* pView);
+
+GR_RESULT GR_STDCALL grCreateDepthStencilView(
+ GR_DEVICE device,
+ const GR_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
+ GR_DEPTH_STENCIL_VIEW* pView);
+
+// Shader functions
+
+GR_RESULT GR_STDCALL grCreateShader(
+ GR_DEVICE device,
+ const GR_SHADER_CREATE_INFO* pCreateInfo,
+ GR_SHADER* pShader);
+
+// Pipeline functions
+
+GR_RESULT GR_STDCALL grCreateGraphicsPipeline(
+ GR_DEVICE device,
+ const GR_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
+ GR_PIPELINE* pPipeline);
+
+GR_RESULT GR_STDCALL grCreateComputePipeline(
+ GR_DEVICE device,
+ const GR_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo,
+ GR_PIPELINE* pPipeline);
+
+GR_RESULT GR_STDCALL grStorePipeline(
+ GR_PIPELINE pipeline,
+ GR_SIZE* pDataSize,
+ GR_VOID* pData);
+
+GR_RESULT GR_STDCALL grLoadPipeline(
+ GR_DEVICE device,
+ GR_SIZE dataSize,
+ const GR_VOID* pData,
+ GR_PIPELINE* pPipeline);
+
+// Sampler functions
+
+GR_RESULT GR_STDCALL grCreateSampler(
+ GR_DEVICE device,
+ const GR_SAMPLER_CREATE_INFO* pCreateInfo,
+ GR_SAMPLER* pSampler);
+
+// Descriptor set functions
+
+GR_RESULT GR_STDCALL grCreateDescriptorSet(
+ GR_DEVICE device,
+ const GR_DESCRIPTOR_SET_CREATE_INFO* pCreateInfo,
+ GR_DESCRIPTOR_SET* pDescriptorSet);
+
+GR_VOID GR_STDCALL grBeginDescriptorSetUpdate(
+ GR_DESCRIPTOR_SET descriptorSet);
+
+GR_VOID GR_STDCALL grEndDescriptorSetUpdate(
+ GR_DESCRIPTOR_SET descriptorSet);
+
+GR_VOID GR_STDCALL grAttachSamplerDescriptors(
+ GR_DESCRIPTOR_SET descriptorSet,
+ GR_UINT startSlot,
+ GR_UINT slotCount,
+ const GR_SAMPLER* pSamplers);
+
+GR_VOID GR_STDCALL grAttachImageViewDescriptors(
+ GR_DESCRIPTOR_SET descriptorSet,
+ GR_UINT startSlot,
+ GR_UINT slotCount,
+ const GR_IMAGE_VIEW_ATTACH_INFO* pImageViews);
+
+GR_VOID GR_STDCALL grAttachMemoryViewDescriptors(
+ GR_DESCRIPTOR_SET descriptorSet,
+ GR_UINT startSlot,
+ GR_UINT slotCount,
+ const GR_MEMORY_VIEW_ATTACH_INFO* pMemViews);
+
+GR_VOID GR_STDCALL grAttachNestedDescriptors(
+ GR_DESCRIPTOR_SET descriptorSet,
+ GR_UINT startSlot,
+ GR_UINT slotCount,
+ const GR_DESCRIPTOR_SET_ATTACH_INFO* pNestedDescriptorSets);
+
+GR_VOID GR_STDCALL grClearDescriptorSetSlots(
+ GR_DESCRIPTOR_SET descriptorSet,
+ GR_UINT startSlot,
+ GR_UINT slotCount);
+
+// State object functions
+
+GR_RESULT GR_STDCALL grCreateViewportState(
+ GR_DEVICE device,
+ const GR_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
+ GR_VIEWPORT_STATE_OBJECT* pState);
+
+GR_RESULT GR_STDCALL grCreateRasterState(
+ GR_DEVICE device,
+ const GR_RASTER_STATE_CREATE_INFO* pCreateInfo,
+ GR_RASTER_STATE_OBJECT* pState);
+
+GR_RESULT GR_STDCALL grCreateMsaaState(
+ GR_DEVICE device,
+ const GR_MSAA_STATE_CREATE_INFO* pCreateInfo,
+ GR_MSAA_STATE_OBJECT* pState);
+
+GR_RESULT GR_STDCALL grCreateColorBlendState(
+ GR_DEVICE device,
+ const GR_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
+ GR_COLOR_BLEND_STATE_OBJECT* pState);
+
+GR_RESULT GR_STDCALL grCreateDepthStencilState(
+ GR_DEVICE device,
+ const GR_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
+ GR_DEPTH_STENCIL_STATE_OBJECT* pState);
+
+// Command buffer functions
+
+GR_RESULT GR_STDCALL grCreateCommandBuffer(
+ GR_DEVICE device,
+ const GR_CMD_BUFFER_CREATE_INFO* pCreateInfo,
+ GR_CMD_BUFFER* pCmdBuffer);
+
+GR_RESULT GR_STDCALL grBeginCommandBuffer(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_FLAGS flags); // GR_CMD_BUFFER_BUILD_FLAGS
+
+GR_RESULT GR_STDCALL grEndCommandBuffer(
+ GR_CMD_BUFFER cmdBuffer);
+
+GR_RESULT GR_STDCALL grResetCommandBuffer(
+ GR_CMD_BUFFER cmdBuffer);
+
+// Command buffer building functions
+
+GR_VOID GR_STDCALL grCmdBindPipeline(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM pipelineBindPoint, // GR_PIPELINE_BIND_POINT
+ GR_PIPELINE pipeline);
+
+GR_VOID GR_STDCALL grCmdBindStateObject(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM stateBindPoint, // GR_STATE_BIND_POINT
+ GR_STATE_OBJECT state);
+
+GR_VOID GR_STDCALL grCmdBindDescriptorSet(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM pipelineBindPoint, // GR_PIPELINE_BIND_POINT
+ GR_UINT index,
+ GR_DESCRIPTOR_SET descriptorSet,
+ GR_UINT slotOffset);
+
+GR_VOID GR_STDCALL grCmdBindDynamicMemoryView(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM pipelineBindPoint, // GR_PIPELINE_BIND_POINT
+ const GR_MEMORY_VIEW_ATTACH_INFO* pMemView);
+
+GR_VOID GR_STDCALL grCmdBindIndexData(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY mem,
+ GR_GPU_SIZE offset,
+ GR_ENUM indexType); // GR_INDEX_TYPE
+
+GR_VOID GR_STDCALL grCmdBindTargets(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_UINT colorTargetCount,
+ const GR_COLOR_TARGET_BIND_INFO* pColorTargets,
+ const GR_DEPTH_STENCIL_BIND_INFO* pDepthTarget);
+
+GR_VOID GR_STDCALL grCmdPrepareMemoryRegions(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_UINT transitionCount,
+ const GR_MEMORY_STATE_TRANSITION* pStateTransitions);
+
+GR_VOID GR_STDCALL grCmdPrepareImages(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_UINT transitionCount,
+ const GR_IMAGE_STATE_TRANSITION* pStateTransitions);
+
+GR_VOID GR_STDCALL grCmdDraw(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_UINT firstVertex,
+ GR_UINT vertexCount,
+ GR_UINT firstInstance,
+ GR_UINT instanceCount);
+
+GR_VOID GR_STDCALL grCmdDrawIndexed(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_UINT firstIndex,
+ GR_UINT indexCount,
+ GR_INT vertexOffset,
+ GR_UINT firstInstance,
+ GR_UINT instanceCount);
+
+GR_VOID GR_STDCALL grCmdDrawIndirect(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY mem,
+ GR_GPU_SIZE offset);
+
+GR_VOID GR_STDCALL grCmdDrawIndexedIndirect(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY mem,
+ GR_GPU_SIZE offset);
+
+GR_VOID GR_STDCALL grCmdDispatch(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_UINT x,
+ GR_UINT y,
+ GR_UINT z);
+
+GR_VOID GR_STDCALL grCmdDispatchIndirect(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY mem,
+ GR_GPU_SIZE offset);
+
+GR_VOID GR_STDCALL grCmdCopyMemory(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY srcMem,
+ GR_GPU_MEMORY destMem,
+ GR_UINT regionCount,
+ const GR_MEMORY_COPY* pRegions);
+
+GR_VOID GR_STDCALL grCmdCopyImage(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE srcImage,
+ GR_IMAGE destImage,
+ GR_UINT regionCount,
+ const GR_IMAGE_COPY* pRegions);
+
+GR_VOID GR_STDCALL grCmdCopyMemoryToImage(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY srcMem,
+ GR_IMAGE destImage,
+ GR_UINT regionCount,
+ const GR_MEMORY_IMAGE_COPY* pRegions);
+
+GR_VOID GR_STDCALL grCmdCopyImageToMemory(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE srcImage,
+ GR_GPU_MEMORY destMem,
+ GR_UINT regionCount,
+ const GR_MEMORY_IMAGE_COPY* pRegions);
+
+GR_VOID GR_STDCALL grCmdCloneImageData(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE srcImage,
+ GR_ENUM srcImageState, // GR_IMAGE_STATE
+ GR_IMAGE destImage,
+ GR_ENUM destImageState); // GR_IMAGE_STATE
+
+GR_VOID GR_STDCALL grCmdUpdateMemory(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY destMem,
+ GR_GPU_SIZE destOffset,
+ GR_GPU_SIZE dataSize,
+ const GR_UINT32* pData);
+
+GR_VOID GR_STDCALL grCmdFillMemory(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY destMem,
+ GR_GPU_SIZE destOffset,
+ GR_GPU_SIZE fillSize,
+ GR_UINT32 data);
+
+GR_VOID GR_STDCALL grCmdClearColorImage(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE image,
+ const GR_FLOAT color[4],
+ GR_UINT rangeCount,
+ const GR_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+GR_VOID GR_STDCALL grCmdClearColorImageRaw(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE image,
+ const GR_UINT32 color[4],
+ GR_UINT rangeCount,
+ const GR_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+GR_VOID GR_STDCALL grCmdClearDepthStencil(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE image,
+ GR_FLOAT depth,
+ GR_UINT8 stencil,
+ GR_UINT rangeCount,
+ const GR_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+GR_VOID GR_STDCALL grCmdResolveImage(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_IMAGE srcImage,
+ GR_IMAGE destImage,
+ GR_UINT rectCount,
+ const GR_IMAGE_RESOLVE* pRects);
+
+GR_VOID GR_STDCALL grCmdSetEvent(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_EVENT event);
+
+GR_VOID GR_STDCALL grCmdResetEvent(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_EVENT event);
+
+GR_VOID GR_STDCALL grCmdMemoryAtomic(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_GPU_MEMORY destMem,
+ GR_GPU_SIZE destOffset,
+ GR_UINT64 srcData,
+ GR_ENUM atomicOp); // GR_ATOMIC_OP
+
+GR_VOID GR_STDCALL grCmdBeginQuery(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_QUERY_POOL queryPool,
+ GR_UINT slot,
+ GR_FLAGS flags); // GR_QUERY_CONTROL_FLAGS
+
+GR_VOID GR_STDCALL grCmdEndQuery(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_QUERY_POOL queryPool,
+ GR_UINT slot);
+
+GR_VOID GR_STDCALL grCmdResetQueryPool(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_QUERY_POOL queryPool,
+ GR_UINT startQuery,
+ GR_UINT queryCount);
+
+GR_VOID GR_STDCALL grCmdWriteTimestamp(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM timestampType, // GR_TIMESTAMP_TYPE
+ GR_GPU_MEMORY destMem,
+ GR_GPU_SIZE destOffset);
+
+GR_VOID GR_STDCALL grCmdInitAtomicCounters(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM pipelineBindPoint, // GR_PIPELINE_BIND_POINT
+ GR_UINT startCounter,
+ GR_UINT counterCount,
+ const GR_UINT32* pData);
+
+GR_VOID GR_STDCALL grCmdLoadAtomicCounters(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM pipelineBindPoint, // GR_PIPELINE_BIND_POINT
+ GR_UINT startCounter,
+ GR_UINT counterCount,
+ GR_GPU_MEMORY srcMem,
+ GR_GPU_SIZE srcOffset);
+
+GR_VOID GR_STDCALL grCmdSaveAtomicCounters(
+ GR_CMD_BUFFER cmdBuffer,
+ GR_ENUM pipelineBindPoint, // GR_PIPELINE_BIND_POINT
+ GR_UINT startCounter,
+ GR_UINT counterCount,
+ GR_GPU_MEMORY destMem,
+ GR_GPU_SIZE destOffset);
+
+#ifdef __cplusplus
+}; // extern "C"
+#endif // __cplusplus
+
+#endif // __MANTLE_H__
diff --git a/include/mantleDbg.h b/include/mantleDbg.h
new file mode 100644
index 0000000..d51a3f6
--- /dev/null
+++ b/include/mantleDbg.h
@@ -0,0 +1,623 @@
+//
+// File: mantleDbg.h
+//
+// Copyright 2014 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
+//
+// AMD is granting you permission to use this software for reference
+// purposes only and not for use in any software product.
+//
+// You agree that you will not reverse engineer or decompile the Materials,
+// in whole or in part, except as allowed by applicable law.
+//
+// WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY,
+// INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE
+// WILL RUN UNINTERRUPTED OR ERROR-FREE OR WARRANTIES ARISING FROM CUSTOM OF
+// TRADE OR COURSE OF USAGE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF THE
+// SOFTWARE IS ASSUMED BY YOU.
+// Some jurisdictions do not allow the exclusion of implied warranties, so
+// the above exclusion may not apply to You.
+//
+// LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL
+// NOT, UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT,
+// INCIDENTAL, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF
+// THE SOFTWARE OR THIS AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+// In no event shall AMD's total liability to You for all damages, losses,
+// and causes of action (whether in contract, tort (including negligence) or
+// otherwise) exceed the amount of $100 USD. You agree to defend, indemnify
+// and hold harmless AMD and its licensors, and any of their directors,
+// officers, employees, affiliates or agents from and against any and all
+// loss, damage, liability and other expenses (including reasonable attorneys'
+// fees), resulting from Your use of the Software or violation of the terms and
+// conditions of this Agreement.
+//
+// U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
+// RIGHTS." Use, duplication, or disclosure by the Government is subject to the
+// restrictions as set forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or
+// its successor. Use of the Materials by the Government constitutes
+// acknowledgement of AMD's proprietary rights in them.
+//
+// EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as
+// stated in the Software License Agreement.
+//
+
+#ifndef __MANTLEDBG_H__
+#define __MANTLEDBG_H__
+
+#include "mantle.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+* Mantle debug and validation features
+***************************************************************************************************
+*/
+
+typedef enum _GR_DBG_MSG_TYPE
+{
+ GR_DBG_MSG_UNKNOWN = 0x00020000,
+ GR_DBG_MSG_ERROR = 0x00020001,
+ GR_DBG_MSG_WARNING = 0x00020002,
+ GR_DBG_MSG_PERF_WARNING = 0x00020003,
+
+ GR_DBG_MSG_TYPE_BEGIN_RANGE = GR_DBG_MSG_UNKNOWN,
+ GR_DBG_MSG_TYPE_END_RANGE = GR_DBG_MSG_PERF_WARNING,
+ GR_NUM_DBG_MSG_TYPE = (GR_DBG_MSG_TYPE_END_RANGE - GR_DBG_MSG_TYPE_BEGIN_RANGE + 1),
+} GR_DBG_MSG_TYPE;
+
+typedef enum _GR_DBG_GLOBAL_OPTION
+{
+ GR_DBG_OPTION_DEBUG_ECHO_ENABLE = 0x00020100,
+ GR_DBG_OPTION_BREAK_ON_ERROR = 0x00020101,
+ GR_DBG_OPTION_BREAK_ON_WARNING = 0x00020102,
+
+ GR_DBG_GLOBAL_OPTION_BEGIN_RANGE = GR_DBG_OPTION_DEBUG_ECHO_ENABLE,
+ GR_DBG_GLOBAL_OPTION_END_RANGE = GR_DBG_OPTION_BREAK_ON_WARNING,
+ GR_NUM_DBG_GLOBAL_OPTION = (GR_DBG_GLOBAL_OPTION_END_RANGE - GR_DBG_GLOBAL_OPTION_BEGIN_RANGE + 1),
+} GR_DBG_GLOBAL_OPTION;
+
+typedef enum _GR_DBG_DEVICE_OPTION
+{
+ GR_DBG_OPTION_DISABLE_PIPELINE_LOADS = 0x00020400,
+ GR_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS = 0x00020401,
+ GR_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT = 0x00020402,
+
+ GR_DBG_DEVICE_OPTION_BEGIN_RANGE = GR_DBG_OPTION_DISABLE_PIPELINE_LOADS,
+ GR_DBG_DEVICE_OPTION_END_RANGE = GR_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT,
+ GR_NUM_DBG_DEVICE_OPTION = (GR_DBG_DEVICE_OPTION_END_RANGE - GR_DBG_DEVICE_OPTION_BEGIN_RANGE + 1),
+} GR_DBG_DEVICE_OPTION;
+
+typedef enum _GR_DBG_MSG_FILTER
+{
+ GR_DBG_MSG_FILTER_NONE = 0x00020800,
+ GR_DBG_MSG_FILTER_REPEATED = 0x00020801,
+ GR_DBG_MSG_FILTER_ALL = 0x00020802,
+
+ GR_DBG_MSG_FILTER_BEGIN_RANGE = GR_DBG_MSG_FILTER_NONE,
+ GR_DBG_MSG_FILTER_END_RANGE = GR_DBG_MSG_FILTER_ALL,
+ GR_NUM_DBG_MSG_FILTER = (GR_DBG_MSG_FILTER_END_RANGE - GR_DBG_MSG_FILTER_BEGIN_RANGE + 1),
+} GR_DBG_MSG_FILTER;
+
+typedef enum _GR_DBG_DATA_TYPE
+{
+ // Common object debug data
+ GR_DBG_DATA_OBJECT_TYPE = 0x00020a00,
+ GR_DBG_DATA_OBJECT_CREATE_INFO = 0x00020a01,
+ GR_DBG_DATA_OBJECT_TAG = 0x00020a02,
+ // Command buffer specific debug data
+ GR_DBG_DATA_CMD_BUFFER_API_TRACE = 0x00020b00,
+ // Memory object specific debug data
+ GR_DBG_DATA_MEMORY_OBJECT_LAYOUT = 0x00020c00,
+ GR_DBG_DATA_MEMORY_OBJECT_STATE = 0x00020c01,
+} GR_DBG_DATA_TYPE;
+
+typedef enum _GR_DBG_OBJECT_TYPE
+{
+ GR_DBG_OBJECT_UNKNOWN = 0x00020900,
+ GR_DBG_OBJECT_DEVICE = 0x00020901,
+ GR_DBG_OBJECT_QUEUE = 0x00020902,
+ GR_DBG_OBJECT_GPU_MEMORY = 0x00020903,
+ GR_DBG_OBJECT_IMAGE = 0x00020904,
+ GR_DBG_OBJECT_IMAGE_VIEW = 0x00020905,
+ GR_DBG_OBJECT_COLOR_TARGET_VIEW = 0x00020906,
+ GR_DBG_OBJECT_DEPTH_STENCIL_VIEW = 0x00020907,
+ GR_DBG_OBJECT_SHADER = 0x00020908,
+ GR_DBG_OBJECT_GRAPHICS_PIPELINE = 0x00020909,
+ GR_DBG_OBJECT_COMPUTE_PIPELINE = 0x0002090a,
+ GR_DBG_OBJECT_SAMPLER = 0x0002090b,
+ GR_DBG_OBJECT_DESCRIPTOR_SET = 0x0002090c,
+ GR_DBG_OBJECT_VIEWPORT_STATE = 0x0002090d,
+ GR_DBG_OBJECT_RASTER_STATE = 0x0002090e,
+ GR_DBG_OBJECT_MSAA_STATE = 0x0002090f,
+ GR_DBG_OBJECT_COLOR_BLEND_STATE = 0x00020910,
+ GR_DBG_OBJECT_DEPTH_STENCIL_STATE = 0x00020911,
+ GR_DBG_OBJECT_CMD_BUFFER = 0x00020912,
+ GR_DBG_OBJECT_FENCE = 0x00020913,
+ GR_DBG_OBJECT_QUEUE_SEMAPHORE = 0x00020914,
+ GR_DBG_OBJECT_EVENT = 0x00020915,
+ GR_DBG_OBJECT_QUERY_POOL = 0x00020916,
+ GR_DBG_OBJECT_SHARED_GPU_MEMORY = 0x00020917,
+ GR_DBG_OBJECT_SHARED_QUEUE_SEMAPHORE = 0x00020918,
+ GR_DBG_OBJECT_PEER_GPU_MEMORY = 0x00020919,
+ GR_DBG_OBJECT_PEER_IMAGE = 0x0002091a,
+ GR_DBG_OBJECT_PINNED_GPU_MEMORY = 0x0002091b,
+ GR_DBG_OBJECT_INTERNAL_GPU_MEMORY = 0x0002091c,
+
+ GR_DBG_OBJECT_TYPE_BEGIN_RANGE = GR_DBG_OBJECT_UNKNOWN,
+ GR_DBG_OBJECT_TYPE_END_RANGE = GR_DBG_OBJECT_INTERNAL_GPU_MEMORY,
+ GR_NUM_DBG_OBJECT_TYPE = (GR_DBG_OBJECT_TYPE_END_RANGE - GR_DBG_OBJECT_TYPE_BEGIN_RANGE + 1),
+} GR_DBG_OBJECT_TYPE;
+
+// ------------------------------------------------------------------------------------------------
+// Memory object layout reflection
+
+typedef struct _GR_DBG_MEMORY_OBJECT_LAYOUT_REGION
+{
+ GR_GPU_SIZE offset;
+ GR_GPU_SIZE regionSize;
+ GR_OBJECT boundObject;
+} GR_DBG_MEMORY_OBJECT_LAYOUT_REGION;
+
+typedef struct _GR_DBG_MEMORY_OBJECT_LAYOUT
+{
+ GR_UINT regionCount;
+ GR_DBG_MEMORY_OBJECT_LAYOUT_REGION regions[1];
+ // (regionCount-1) more GR_DBG_MEMORY_OBJECT_LAYOUT_REGION structures to follow...
+} GR_DBG_MEMORY_OBJECT_LAYOUT;
+
+typedef struct _GR_DBG_MEMORY_OBJECT_STATE_REGION
+{
+ GR_GPU_SIZE offset;
+ GR_GPU_SIZE regionSize;
+ GR_ENUM state; // GR_MEMORY_STATE
+} GR_DBG_MEMORY_OBJECT_STATE_REGION;
+
+typedef struct _GR_DBG_MEMORY_OBJECT_STATE
+{
+ GR_UINT regionCount;
+ GR_DBG_MEMORY_OBJECT_STATE_REGION regions[1];
+ // (regionCount-1) more GR_DBG_MEMORY_OBJECT_STATE_REGION structures to follow...
+} GR_DBG_MEMORY_OBJECT_STATE;
+
+// ------------------------------------------------------------------------------------------------
+// Command buffer packet reflection
+
+// Command buffer packet opcodes
+typedef enum _GR_DBG_CMD_BUFFER_OPCODE
+{
+ GR_DBG_OP_CMD_BUFFER_END = 0x00028000,
+ GR_DBG_OP_CMD_BIND_PIPELINE = 0x00028001,
+ GR_DBG_OP_CMD_BIND_STATE_OBJECT = 0x00028002,
+ GR_DBG_OP_CMD_BIND_DESCRIPTOR_SET = 0x00028003,
+ GR_DBG_OP_CMD_BIND_DYNAMIC_MEMORY_VIEW = 0x00028004,
+ GR_DBG_OP_CMD_BIND_INDEX_DATA = 0x00028005,
+ GR_DBG_OP_CMD_BIND_TARGETS = 0x00028006,
+ GR_DBG_OP_CMD_PREPARE_MEMORY_REGIONS = 0x00028007,
+ GR_DBG_OP_CMD_PREPARE_IMAGES = 0x00028008,
+ GR_DBG_OP_CMD_DRAW = 0x00028009,
+ GR_DBG_OP_CMD_DRAW_INDEXED = 0x0002800a,
+ GR_DBG_OP_CMD_DRAW_INDIRECT = 0x0002800b,
+ GR_DBG_OP_CMD_DRAW_INDEXED_INDIRECT = 0x0002800c,
+ GR_DBG_OP_CMD_DISPATCH = 0x0002800d,
+ GR_DBG_OP_CMD_DISPATCH_INDIRECT = 0x0002800e,
+ GR_DBG_OP_CMD_COPY_MEMORY = 0x0002800f,
+ GR_DBG_OP_CMD_COPY_IMAGE = 0x00028010,
+ GR_DBG_OP_CMD_COPY_MEMORY_TO_IMAGE = 0x00028011,
+ GR_DBG_OP_CMD_COPY_IMAGE_TO_MEMORY = 0x00028012,
+ GR_DBG_OP_CMD_CLONE_IMAGE_DATA = 0x00028013,
+ GR_DBG_OP_CMD_FILL_MEMORY = 0x00028014,
+ GR_DBG_OP_CMD_UPDATE_MEMORY = 0x00028015,
+ GR_DBG_OP_CMD_CLEAR_COLOR_IMAGE = 0x00028016,
+ GR_DBG_OP_CMD_CLEAR_COLOR_IMAGE_RAW = 0x00028017,
+ GR_DBG_OP_CMD_CLEAR_DEPTH_STENCIL = 0x00028018,
+ GR_DBG_OP_CMD_RESOLVE_IMAGE = 0x00028019,
+ GR_DBG_OP_CMD_SET_EVENT = 0x0002801a,
+ GR_DBG_OP_CMD_RESET_EVENT = 0x0002801b,
+ GR_DBG_OP_CMD_MEMORY_ATOMIC = 0x0002801c,
+ GR_DBG_OP_CMD_BEGIN_QUERY = 0x0002801d,
+ GR_DBG_OP_CMD_END_QUERY = 0x0002801e,
+ GR_DBG_OP_CMD_RESET_QUERY_POOL = 0x0002801f,
+ GR_DBG_OP_CMD_WRITE_TIMESTAMP = 0x00028020,
+ GR_DBG_OP_CMD_INIT_ATOMIC_COUNTERS = 0x00028021,
+ GR_DBG_OP_CMD_LOAD_ATOMIC_COUNTERS = 0x00028022,
+ GR_DBG_OP_CMD_SAVE_ATOMIC_COUNTERS = 0x00028023,
+ GR_DBG_OP_CMD_DBG_MARKER_BEGIN = 0x00028024,
+ GR_DBG_OP_CMD_DBG_MARKER_END = 0x00028025,
+
+ GR_DBG_CMD_BUFFER_OPCODE_BEGIN_RANGE = GR_DBG_OP_CMD_BUFFER_END,
+ GR_DBG_CMD_BUFFER_OPCODE_END_RANGE = GR_DBG_OP_CMD_DBG_MARKER_END,
+ GR_NUM_DBG_CMD_BUFFER_OPCODE = (GR_DBG_CMD_BUFFER_OPCODE_END_RANGE - GR_DBG_CMD_BUFFER_OPCODE_BEGIN_RANGE + 1),
+} GR_DBG_CMD_BUFFER_OPCODE;
+
+// Command buffer packet header
+typedef struct _GR_DBG_OP_HEADER
+{
+ GR_UINT recordSize;
+ GR_ENUM opCode; // GR_DBG_CMD_BUFFER_OPCODE
+ // Op-specific variable size data follows...
+} GR_DBG_OP_HEADER;
+
+// Memory binding reference for image objects at the time of command buffer recording
+typedef struct _GR_DBG_BOUND_MEMORY_REF
+{
+ GR_GPU_MEMORY mem;
+ GR_GPU_SIZE offset;
+} GR_DBG_BOUND_MEMORY_REF;
+
+typedef struct _GR_DBG_OBJECT_REF
+{
+ GR_OBJECT object;
+ GR_DBG_BOUND_MEMORY_REF memoryRef;
+} GR_DBG_OBJECT_REF;
+
+typedef struct _GR_DBG_VIEW_REF
+{
+ GR_DBG_OBJECT_REF view;
+ // Memory reference for image of the color target view
+ GR_DBG_BOUND_MEMORY_REF imageMemoryRef;
+} GR_DBG_VIEW_REF;
+
+// Command buffer payload packets
+typedef struct _GR_DBG_OP_DATA_CMD_BIND_PIPELINE
+{
+ GR_ENUM pipelineBindPoint; // GR_PIPELINE_BIND_POINT
+ GR_DBG_OBJECT_REF pipeline;
+} GR_DBG_OP_DATA_CMD_BIND_PIPELINE;
+
+typedef struct _GR_DBG_OP_DATA_CMD_BIND_STATE_OBJECT
+{
+ GR_ENUM stateBindPoint; // GR_STATE_BIND_POINT
+ GR_DBG_OBJECT_REF state;
+} GR_DBG_OP_DATA_CMD_BIND_STATE_OBJECT;
+
+typedef struct _GR_DBG_OP_DATA_CMD_BIND_DESCRIPTOR_SET
+{
+ GR_ENUM pipelineBindPoint; // GR_PIPELINE_BIND_POINT
+ GR_UINT index;
+ GR_DBG_OBJECT_REF descriptorSet;
+ GR_UINT slotOffset;
+} GR_DBG_OP_DATA_CMD_BIND_DESCRIPTOR_SET;
+
+typedef struct _GR_DBG_OP_DATA_CMD_BIND_DYNAMIC_MEMORY_VIEW
+{
+ GR_ENUM pipelineBindPoint; // GR_PIPELINE_BIND_POINT
+ GR_MEMORY_VIEW_ATTACH_INFO view;
+} GR_DBG_OP_DATA_CMD_BIND_DYNAMIC_MEMORY_VIEW;
+
+typedef struct _GR_DBG_OP_DATA_CMD_BIND_INDEX_DATA
+{
+ GR_GPU_MEMORY mem;
+ GR_GPU_SIZE offset;
+ GR_ENUM indexType; // GR_INDEX_TYPE
+} GR_DBG_OP_DATA_CMD_BIND_INDEX_DATA;
+
+typedef struct _GR_DBG_COLOR_TARGET_BIND_INFO
+{
+ GR_DBG_VIEW_REF view;
+ GR_ENUM colorTargetState; // GR_IMAGE_STATE
+} GR_DBG_COLOR_TARGET_BIND_INFO;
+
+typedef struct _GR_DBG_DEPTH_STENCIL_BIND_INFO
+{
+ GR_DBG_VIEW_REF view;
+ GR_ENUM depthState; // GR_IMAGE_STATE
+ GR_ENUM stencilState; // GR_IMAGE_STATE
+} GR_DBG_DEPTH_STENCIL_BIND_INFO;
+
+typedef struct _GR_DBG_OP_DATA_CMD_BIND_TARGETS
+{
+ GR_UINT colorTargetCount;
+ GR_DBG_COLOR_TARGET_BIND_INFO colorTargetData[GR_MAX_COLOR_TARGETS];
+ GR_DBG_DEPTH_STENCIL_BIND_INFO depthTargetData;
+} GR_DBG_OP_DATA_CMD_BIND_TARGETS;
+
+typedef struct _GR_DBG_OP_DATA_CMD_PREPARE_MEMORY_REGIONS
+{
+ GR_UINT transitionCount;
+ GR_MEMORY_STATE_TRANSITION stateTransitions[1];
+ // (transitionCount-1) more GR_MEMORY_STATE_TRANSITION structures to follow...
+} GR_DBG_OP_DATA_CMD_PREPARE_MEMORY_REGIONS;
+
+typedef struct _GR_DBG_IMAGE_STATE_TRANSITION
+{
+ GR_DBG_OBJECT_REF image;
+ GR_ENUM oldState; // GR_IMAGE_STATE
+ GR_ENUM newState; // GR_IMAGE_STATE
+ GR_IMAGE_SUBRESOURCE_RANGE subresourceRange;
+} GR_DBG_IMAGE_STATE_TRANSITION;
+
+typedef struct _GR_DBG_OP_DATA_CMD_PREPARE_IMAGES
+{
+ GR_UINT transitionCount;
+ GR_DBG_IMAGE_STATE_TRANSITION stateTransitions[1];
+ // (transitionCount-1) more GR_DBG_IMAGE_STATE_TRANSITION_DATA structures to follow...
+} GR_DBG_OP_DATA_CMD_PREPARE_IMAGES;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DRAW
+{
+ GR_UINT firstVertex;
+ GR_UINT vertexCount;
+ GR_UINT firstInstance;
+ GR_UINT instanceCount;
+} GR_DBG_OP_DATA_CMD_DRAW;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DRAW_INDEXED
+{
+ GR_UINT firstIndex;
+ GR_UINT indexCount;
+ GR_INT vertexOffset;
+ GR_UINT firstInstance;
+ GR_UINT instanceCount;
+} GR_DBG_OP_DATA_CMD_DRAW_INDEXED;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DRAW_INDIRECT
+{
+ GR_GPU_MEMORY mem;
+ GR_GPU_SIZE offset;
+} GR_DBG_OP_DATA_CMD_DRAW_INDIRECT;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DRAW_INDEXED_INDIRECT
+{
+ GR_GPU_MEMORY mem;
+ GR_GPU_SIZE offset;
+} GR_DBG_OP_DATA_CMD_DRAW_INDEXED_INDIRECT;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DISPATCH
+{
+ GR_UINT x;
+ GR_UINT y;
+ GR_UINT z;
+} GR_DBG_OP_DATA_CMD_DISPATCH;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DISPATCH_INDIRECT
+{
+ GR_GPU_MEMORY mem;
+ GR_GPU_SIZE offset;
+} GR_DBG_OP_DATA_CMD_DISPATCH_INDIRECT;
+
+typedef struct _GR_DBG_OP_DATA_CMD_COPY_MEMORY
+{
+ GR_GPU_MEMORY srcMem;
+ GR_GPU_MEMORY destMem;
+ GR_UINT regionCount;
+ GR_MEMORY_COPY regions[1];
+ // (regionCount-1) more GR_MEMORY_COPY structures to follow...
+} GR_DBG_OP_DATA_CMD_COPY_MEMORY;
+
+typedef struct _GR_DBG_OP_DATA_CMD_COPY_IMAGE
+{
+ GR_DBG_OBJECT_REF srcImage;
+ GR_DBG_OBJECT_REF destImage;
+ GR_UINT regionCount;
+ GR_IMAGE_COPY regions[1];
+ // (regionCount-1) more GR_IMAGE_COPY structures to follow...
+} GR_DBG_OP_DATA_CMD_COPY_IMAGE;
+
+typedef struct _GR_DBG_OP_DATA_CMD_COPY_MEMORY_TO_IMAGE
+{
+ GR_GPU_MEMORY srcMem;
+ GR_DBG_OBJECT_REF destImage;
+ GR_UINT regionCount;
+ GR_MEMORY_IMAGE_COPY regions[1];
+ // (regionCount-1) more GR_MEMORY_IMAGE_COPY structures to follow...
+} GR_DBG_OP_DATA_CMD_COPY_MEMORY_TO_IMAGE;
+
+typedef struct _GR_DBG_OP_DATA_CMD_COPY_IMAGE_TO_MEMORY
+{
+ GR_DBG_OBJECT_REF srcImage;
+ GR_GPU_MEMORY destMem;
+ GR_UINT regionCount;
+ GR_MEMORY_IMAGE_COPY regions[1];
+ // (regionCount-1) more GR_MEMORY_IMAGE_COPY structures to follow...
+} GR_DBG_OP_DATA_CMD_COPY_IMAGE_TO_MEMORY;
+
+typedef struct _GR_DBG_OP_DATA_CMD_CLONE_IMAGE_DATA
+{
+ GR_DBG_OBJECT_REF srcImage;
+ GR_ENUM srcImageState; // GR_IMAGE_STATE
+ GR_DBG_OBJECT_REF destImage;
+ GR_ENUM destImageState; // GR_IMAGE_STATE
+} GR_DBG_OP_DATA_CMD_CLONE_IMAGE_DATA;
+
+typedef struct _GR_DBG_OP_DATA_CMD_UPDATE_MEMORY
+{
+ GR_GPU_MEMORY destMem;
+ GR_GPU_SIZE destOffset;
+ GR_GPU_SIZE dataSize;
+ GR_UINT32 data[1];
+ // (dataSize-4) more bytes of data to follow...
+} GR_DBG_OP_DATA_CMD_UPDATE_MEMORY;
+
+typedef struct _GR_DBG_OP_DATA_CMD_FILL_MEMORY
+{
+ GR_GPU_MEMORY destMem;
+ GR_GPU_SIZE destOffset;
+ GR_GPU_SIZE fillSize;
+ GR_UINT32 data;
+} GR_DBG_OP_DATA_CMD_FILL_MEMORY;
+
+typedef struct _GR_DBG_OP_DATA_CMD_CLEAR_COLOR_IMAGE
+{
+ GR_DBG_OBJECT_REF image;
+ GR_FLOAT color[4];
+ GR_UINT rangeCount;
+ GR_IMAGE_SUBRESOURCE_RANGE ranges[1];
+ // (rangeCount-1) more GR_IMAGE_SUBRESOURCE_RANGE structures to follow...
+} GR_DBG_OP_DATA_CMD_CLEAR_COLOR_IMAGE;
+
+typedef struct _GR_DBG_OP_DATA_CMD_CLEAR_COLOR_IMAGE_RAW
+{
+ GR_DBG_OBJECT_REF image;
+ GR_UINT32 color[4];
+ GR_UINT rangeCount;
+ GR_IMAGE_SUBRESOURCE_RANGE ranges[1];
+ // (rangeCount-1) more GR_IMAGE_SUBRESOURCE_RANGE structures to follow...
+} GR_DBG_OP_DATA_CMD_CLEAR_COLOR_IMAGE_RAW;
+
+typedef struct _GR_DBG_OP_DATA_CMD_CLEAR_DEPTH_STENCIL
+{
+ GR_DBG_OBJECT_REF image;
+ GR_FLOAT depth;
+ GR_UINT8 stencil;
+ GR_UINT rangeCount;
+ GR_IMAGE_SUBRESOURCE_RANGE ranges[1];
+ // (rangeCount-1) more GR_IMAGE_SUBRESOURCE_RANGE structures to follow...
+} GR_DBG_OP_DATA_CMD_CLEAR_DEPTH_STENCIL;
+
+typedef struct _GR_DBG_OP_DATA_CMD_RESOLVE_IMAGE
+{
+ GR_DBG_OBJECT_REF srcImage;
+ GR_DBG_OBJECT_REF destImage;
+ GR_UINT rectCount;
+ GR_IMAGE_RESOLVE rects[1];
+ // (rectCount-1) more GR_IMAGE_RESOLVE structures to follow...
+} GR_DBG_OP_DATA_CMD_RESOLVE_IMAGE;
+
+typedef struct _GR_DBG_OP_DATA_CMD_SET_EVENT
+{
+ GR_DBG_OBJECT_REF event;
+} GR_DBG_OP_DATA_CMD_SET_EVENT;
+
+typedef struct _GR_DBG_OP_DATA_CMD_RESET_EVENT
+{
+ GR_DBG_OBJECT_REF event;
+} GR_DBG_OP_DATA_CMD_RESET_EVENT;
+
+typedef struct _GR_DBG_OP_DATA_CMD_MEMORY_ATOMIC
+{
+ GR_GPU_MEMORY destMem;
+ GR_GPU_SIZE destOffset;
+ GR_UINT64 srcData;
+ GR_ENUM atomicOp; // GR_ATOMIC_OP
+} GR_DBG_OP_DATA_CMD_MEMORY_ATOMIC;
+
+typedef struct _GR_DBG_OP_DATA_CMD_BEGIN_QUERY
+{
+ GR_DBG_OBJECT_REF queryPool;
+ GR_UINT slot;
+ GR_FLAGS flags;
+} GR_DBG_OP_DATA_CMD_BEGIN_QUERY;
+
+typedef struct _GR_DBG_OP_DATA_CMD_END_QUERY
+{
+ GR_DBG_OBJECT_REF queryPool;
+ GR_UINT slot;
+} GR_DBG_OP_DATA_CMD_END_QUERY;
+
+typedef struct _GR_DBG_OP_DATA_CMD_RESET_QUERY_POOL
+{
+ GR_DBG_OBJECT_REF queryPool;
+ GR_UINT startQuery;
+ GR_UINT queryCount;
+} GR_DBG_OP_DATA_CMD_RESET_QUERY_POOL;
+
+typedef struct _GR_DBG_OP_DATA_CMD_WRITE_TIMESTAMP
+{
+ GR_ENUM timestampType; // GR_TIMESTAMP_TYPE
+ GR_GPU_MEMORY destMem;
+ GR_GPU_SIZE destOffset;
+} GR_DBG_OP_DATA_CMD_WRITE_TIMESTAMP;
+
+typedef struct _GR_DBG_OP_DATA_CMD_INIT_ATOMIC_COUNTERS
+{
+ GR_ENUM pipelineBindPoint; // GR_PIPELINE_BIND_POINT
+ GR_UINT startCounter;
+ GR_UINT counterCount;
+ GR_UINT32 data[1];
+ // (counterCount-1) more DWORDs of data to follow...
+} GR_DBG_OP_DATA_CMD_INIT_ATOMIC_COUNTERS;
+
+typedef struct _GR_DBG_OP_DATA_CMD_LOAD_ATOMIC_COUNTERS
+{
+ GR_ENUM pipelineBindPoint; // GR_PIPELINE_BIND_POINT
+ GR_UINT startCounter;
+ GR_UINT counterCount;
+ GR_GPU_MEMORY srcMem;
+ GR_GPU_SIZE srcOffset;
+} GR_DBG_OP_DATA_CMD_LOAD_ATOMIC_COUNTERS;
+
+typedef struct _GR_DBG_OP_DATA_CMD_SAVE_ATOMIC_COUNTERS
+{
+ GR_ENUM pipelineBindPoint; // GR_PIPELINE_BIND_POINT
+ GR_UINT startCounter;
+ GR_UINT counterCount;
+ GR_GPU_MEMORY destMem;
+ GR_GPU_SIZE destOffset;
+} GR_DBG_OP_DATA_CMD_SAVE_ATOMIC_COUNTERS;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DBG_MARKER_BEGIN
+{
+ GR_CHAR marker[1];
+ // The rest of null terminated string follows, up to the size of the packet...
+} GR_DBG_OP_DATA_CMD_DBG_MARKER_BEGIN;
+
+typedef struct _GR_DBG_OP_DATA_CMD_DBG_MARKER_END
+{
+ GR_UINT _reserved;
+} GR_DBG_OP_DATA_CMD_DBG_MARKER_END;
+
+// ------------------------------------------------------------------------------------------------
+// Debug message callback
+
+typedef GR_VOID (GR_STDCALL *GR_DBG_MSG_CALLBACK_FUNCTION)(
+ GR_ENUM msgType, // GR_DBG_MSG_TYPE
+ GR_ENUM validationLevel, // GR_VALIDATION_LEVEL
+ GR_BASE_OBJECT srcObject,
+ GR_SIZE location,
+ GR_ENUM msgCode, // GR_DBG_MSG_CODE
+ const GR_CHAR* pMsg,
+ GR_VOID* pUserData);
+
+// ------------------------------------------------------------------------------------------------
+// Debug functions
+
+GR_RESULT GR_STDCALL grDbgSetValidationLevel(
+ GR_DEVICE device,
+ GR_ENUM validationLevel); // GR_VALIDATION_LEVEL
+
+GR_RESULT GR_STDCALL grDbgRegisterMsgCallback(
+ GR_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback,
+ GR_VOID* pUserData);
+
+GR_RESULT GR_STDCALL grDbgUnregisterMsgCallback(
+ GR_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback);
+
+GR_RESULT GR_STDCALL grDbgSetMessageFilter(
+ GR_DEVICE device,
+ GR_ENUM msgCode, // GR_DBG_MSG_CODE
+ GR_ENUM filter); // GR_DBG_MSG_FILTER
+
+GR_RESULT GR_STDCALL grDbgSetObjectTag(
+ GR_BASE_OBJECT object,
+ GR_SIZE tagSize,
+ const GR_VOID* pTag);
+
+GR_RESULT GR_STDCALL grDbgSetGlobalOption(
+ GR_ENUM dbgOption, // GR_DBG_GLOBAL_OPTION
+ GR_SIZE dataSize,
+ const GR_VOID* pData);
+
+GR_RESULT GR_STDCALL grDbgSetDeviceOption(
+ GR_DEVICE device,
+ GR_ENUM dbgOption, // GR_DBG_DEVICE_OPTION
+ GR_SIZE dataSize,
+ const GR_VOID* pData);
+
+GR_VOID GR_STDCALL grCmdDbgMarkerBegin(
+ GR_CMD_BUFFER cmdBuffer,
+ const GR_CHAR* pMarker);
+
+GR_VOID GR_STDCALL grCmdDbgMarkerEnd(
+ GR_CMD_BUFFER cmdBuffer);
+
+#ifdef __cplusplus
+}; // extern "C"
+#endif // __cplusplus
+
+#endif // __MANTLEDBG_H__
diff --git a/include/mantlePlatform.h b/include/mantlePlatform.h
new file mode 100644
index 0000000..efcab0b
--- /dev/null
+++ b/include/mantlePlatform.h
@@ -0,0 +1,111 @@
+//
+// File: mantlePlatform.h
+//
+// Copyright 2014 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
+//
+// AMD is granting you permission to use this software for reference
+// purposes only and not for use in any software product.
+//
+// You agree that you will not reverse engineer or decompile the Materials,
+// in whole or in part, except as allowed by applicable law.
+//
+// WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY,
+// INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE
+// WILL RUN UNINTERRUPTED OR ERROR-FREE OR WARRANTIES ARISING FROM CUSTOM OF
+// TRADE OR COURSE OF USAGE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF THE
+// SOFTWARE IS ASSUMED BY YOU.
+// Some jurisdictions do not allow the exclusion of implied warranties, so
+// the above exclusion may not apply to You.
+//
+// LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL
+// NOT, UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT,
+// INCIDENTAL, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF
+// THE SOFTWARE OR THIS AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+// In no event shall AMD's total liability to You for all damages, losses,
+// and causes of action (whether in contract, tort (including negligence) or
+// otherwise) exceed the amount of $100 USD. You agree to defend, indemnify
+// and hold harmless AMD and its licensors, and any of their directors,
+// officers, employees, affiliates or agents from and against any and all
+// loss, damage, liability and other expenses (including reasonable attorneys'
+// fees), resulting from Your use of the Software or violation of the terms and
+// conditions of this Agreement.
+//
+// U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
+// RIGHTS." Use, duplication, or disclosure by the Government is subject to the
+// restrictions as set forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or
+// its successor. Use of the Materials by the Government constitutes
+// acknowledgement of AMD's proprietary rights in them.
+//
+// EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as
+// stated in the Software License Agreement.
+//
+
+#ifndef __MANTLEPLATFORM_H__
+#define __MANTLEPLATFORM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+* Platform-specific directives and type declarations
+***************************************************************************************************
+*/
+
+#if defined(_WIN32)
+ // On Windows, GR_STDCALL should equate to the __stdcall convention
+ #define GR_STDCALL __stdcall
+#elif defined(__GNUC__)
+ // On other platforms using GCC, GR_STDCALL stays undefined
+ #define GR_STDCALL
+#else
+ // Unsupported Platform!
+ #error "Unsupported OS Platform detected!"
+#endif
+
+#if defined(__GNUC__)
+ #include <stddef.h>
+#endif
+
+#if defined(_WIN32)
+ // Windows platform
+ typedef unsigned __int8 GR_UINT8;
+ typedef signed __int32 GR_INT32;
+ typedef unsigned __int32 GR_UINT32;
+ typedef signed __int64 GR_INT64;
+ typedef unsigned __int64 GR_UINT64;
+#elif defined(__GNUC__)
+ // Other platforms
+ typedef uint8_t GR_UINT8;
+ typedef int32_t GR_INT32;
+ typedef uint32_t GR_UINT32;
+ typedef int64_t GR_INT64;
+ typedef uint64_t GR_UINT64;
+#endif
+
+typedef size_t GR_SIZE;
+typedef GR_UINT64 GR_GPU_SIZE;
+typedef GR_UINT8 GR_BYTE;
+typedef GR_INT32 GR_INT;
+typedef GR_UINT32 GR_UINT;
+typedef char GR_CHAR;
+typedef float GR_FLOAT;
+typedef double GR_DOUBLE;
+typedef GR_UINT32 GR_BOOL;
+typedef void GR_VOID;
+
+typedef GR_UINT32 GR_SAMPLE_MASK;
+typedef GR_UINT32 GR_FLAGS;
+typedef GR_INT32 GR_ENUM;
+typedef GR_INT32 GR_RESULT;
+
+#ifdef __cplusplus
+}; // extern "C"
+#endif // __cplusplus
+
+#endif // __MANTLEPLATFORM_H__
diff --git a/include/mantleWsiWinExt.h b/include/mantleWsiWinExt.h
new file mode 100644
index 0000000..ee27b63
--- /dev/null
+++ b/include/mantleWsiWinExt.h
@@ -0,0 +1,243 @@
+//
+// File: mantleWsiWinExt.h
+//
+// Copyright 2014 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
+//
+// AMD is granting you permission to use this software for reference
+// purposes only and not for use in any software product.
+//
+// You agree that you will not reverse engineer or decompile the Materials,
+// in whole or in part, except as allowed by applicable law.
+//
+// WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY,
+// INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE
+// WILL RUN UNINTERRUPTED OR ERROR-FREE OR WARRANTIES ARISING FROM CUSTOM OF
+// TRADE OR COURSE OF USAGE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF THE
+// SOFTWARE IS ASSUMED BY YOU.
+// Some jurisdictions do not allow the exclusion of implied warranties, so
+// the above exclusion may not apply to You.
+//
+// LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL
+// NOT, UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT,
+// INCIDENTAL, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF
+// THE SOFTWARE OR THIS AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+// In no event shall AMD's total liability to You for all damages, losses,
+// and causes of action (whether in contract, tort (including negligence) or
+// otherwise) exceed the amount of $100 USD. You agree to defend, indemnify
+// and hold harmless AMD and its licensors, and any of their directors,
+// officers, employees, affiliates or agents from and against any and all
+// loss, damage, liability and other expenses (including reasonable attorneys'
+// fees), resulting from Your use of the Software or violation of the terms and
+// conditions of this Agreement.
+//
+// U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
+// RIGHTS." Use, duplication, or disclosure by the Government is subject to the
+// restrictions as set forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or
+// its successor. Use of the Materials by the Government constitutes
+// acknowledgement of AMD's proprietary rights in them.
+//
+// EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as
+// stated in the Software License Agreement.
+//
+
+#ifndef __MANTLEWSIWINEXT_H__
+#define __MANTLEWSIWINEXT_H__
+
+#include <windows.h>
+#include "mantle.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+GR_DEFINE_SUBCLASS_HANDLE(GR_WSI_WIN_DISPLAY, GR_OBJECT)
+
+#define GR_MAX_DEVICE_NAME_LEN 32
+#define GR_MAX_GAMMA_RAMP_CONTROL_POINTS 1025
+
+typedef enum _GR_WSI_WIN_RESULT_CODE
+{
+ GR_WSI_WIN_PRESENT_OCCLUDED = 0x00210000,
+ GR_WSI_WIN_ERROR_FULLSCREEN_UNAVAILABLE = 0x00210001,
+ GR_WSI_WIN_ERROR_DISPLAY_REMOVED = 0x00210002,
+ GR_WSI_WIN_ERROR_INCOMPATIBLE_DISPLAY_MODE = 0x00210003,
+ GR_WSI_WIN_ERROR_MULTI_DEVICE_PRESENT_FAILED = 0x00210004,
+ GR_WSI_WIN_ERROR_BLT_PRESENT_UNAVAILABLE = 0x00210005,
+ GR_WSI_WIN_ERROR_INVALID_RESOLUTION = 0x00210006,
+} GR_WSI_WIN_RESULT_CODE;
+
+typedef enum _GR_WSI_WIN_IMAGE_STATE
+{
+ GR_WSI_WIN_PRESENT_SOURCE_BLT = 0x00200000,
+ GR_WSI_WIN_PRESENT_SOURCE_FLIP = 0x00200001,
+
+ GR_WSI_WIN_IMAGE_STATE_BEGIN_RANGE = GR_WSI_WIN_PRESENT_SOURCE_BLT,
+ GR_WSI_WIN_IMAGE_STATE_END_RANGE = GR_WSI_WIN_PRESENT_SOURCE_FLIP,
+ GR_NUM_WSI_WIN_IMAGE_STATE = (GR_WSI_WIN_IMAGE_STATE_END_RANGE - GR_WSI_WIN_IMAGE_STATE_BEGIN_RANGE + 1),
+} GR_WSI_WIN_IMAGE_STATE;
+
+typedef enum _GR_WSI_WIN_ROTATION_ANGLE
+{
+ GR_WSI_WIN_ROTATION_ANGLE_0 = 0x00200100,
+ GR_WSI_WIN_ROTATION_ANGLE_90 = 0x00200101,
+ GR_WSI_WIN_ROTATION_ANGLE_180 = 0x00200102,
+ GR_WSI_WIN_ROTATION_ANGLE_270 = 0x00200103,
+
+ GR_WSI_WIN_ROTATION_ANGLE_BEGIN_RANGE = GR_WSI_WIN_ROTATION_ANGLE_0,
+ GR_WSI_WIN_ROTATION_ANGLE_END_RANGE = GR_WSI_WIN_ROTATION_ANGLE_270,
+ GR_NUM_WSI_WIN_ROTATION_ANGLE = (GR_WSI_WIN_ROTATION_ANGLE_END_RANGE - GR_WSI_WIN_ROTATION_ANGLE_BEGIN_RANGE + 1),
+} GR_WSI_WIN_ROTATION_ANGLE;
+
+typedef enum _GR_WSI_WIN_PRESENT_MODE
+{
+ GR_WSI_WIN_PRESENT_MODE_BLT = 0x00200200,
+ GR_WSI_WIN_PRESENT_MODE_FLIP = 0x00200201,
+
+ GR_WSI_WIN_PRESENT_MODE_BEGIN_RANGE = GR_WSI_WIN_PRESENT_MODE_BLT,
+ GR_WSI_WIN_PRESENT_MODE_END_RANGE = GR_WSI_WIN_PRESENT_MODE_FLIP,
+ GR_NUM_WSI_WIN_PRESENT_MODE = (GR_WSI_WIN_PRESENT_MODE_END_RANGE - GR_WSI_WIN_PRESENT_MODE_BEGIN_RANGE + 1),
+} GR_WSI_WIN_PRESENT_MODE;
+
+typedef enum _GR_WSI_WIN_INFO_TYPE
+{
+ GR_WSI_WIN_INFO_TYPE_QUEUE_PROPERTIES = 0x00206800,
+ GR_WSI_WIN_INFO_TYPE_DISPLAY_PROPERTIES = 0x00206801,
+ GR_WSI_WIN_INFO_TYPE_GAMMA_RAMP_CAPABILITIES = 0x00206802,
+
+ GR_WSI_WIN_INFO_TYPE_BEGIN_RANGE = GR_WSI_WIN_INFO_TYPE_QUEUE_PROPERTIES,
+ GR_WSI_WIN_INFO_TYPE_END_RANGE = GR_WSI_WIN_INFO_TYPE_GAMMA_RAMP_CAPABILITIES,
+ GR_NUM_WSI_WIN_INFO_TYPE = (GR_WSI_WIN_INFO_TYPE_END_RANGE - GR_WSI_WIN_INFO_TYPE_BEGIN_RANGE + 1),
+} GR_WSI_WIN_INFO_TYPE;
+
+typedef enum _GR_WSI_WIN_PRESENT_FLAGS
+{
+ GR_WSI_WIN_PRESENT_FLIP_DONOTWAIT = 0x00000001,
+ GR_WSI_WIN_PRESENT_FLIP_STEREO = 0x00000002,
+} GR_WSI_WIN_PRESENT_FLAGS;
+
+typedef enum _GR_WSI_WIN_IMAGE_CREATE_FLAGS
+{
+ GR_WSI_WIN_IMAGE_CREATE_FLIPPABLE = 0x00000001,
+ GR_WSI_WIN_IMAGE_CREATE_STEREO = 0x00000002,
+} GR_WSI_WIN_IMAGE_CREATE_FLAGS;
+
+typedef enum _GR_WSI_WIN_PRESENT_SUPPORT_FLAGS
+{
+ GR_WSI_WIN_FLIP_PRESENT_SUPPORTED = 0x00000001,
+ GR_WSI_WIN_BLT_PRESENT_SUPPORTED = 0x00000002,
+} GR_WSI_WIN_PRESENT_SUPPORT_FLAGS;
+
+typedef struct _GR_WSI_WIN_QUEUE_PROPERTIES
+{
+ GR_FLAGS presentSupport; // GR_WSI_WIN_PRESENT_SUPPORT_FLAGS
+} GR_WSI_WIN_QUEUE_PROPERTIES;
+
+typedef struct _GR_WSI_WIN_DISPLAY_PROPERTIES
+{
+ HMONITOR hMonitor;
+ GR_CHAR displayName[GR_MAX_DEVICE_NAME_LEN];
+ GR_RECT desktopCoordinates;
+ GR_ENUM rotation; // GR_WSI_WIN_ROTATION_ANGLE
+} GR_WSI_WIN_DISPLAY_PROPERTIES;
+
+typedef struct _GR_RGB_FLOAT
+{
+ GR_FLOAT red;
+ GR_FLOAT green;
+ GR_FLOAT blue;
+} GR_RGB_FLOAT;
+
+typedef struct _GR_WSI_WIN_GAMMA_RAMP_CAPABILITIES
+{
+ GR_BOOL supportsScaleAndOffset;
+ GR_FLOAT minConvertedValue;
+ GR_FLOAT maxConvertedValue;
+ GR_UINT controlPointCount;
+ GR_FLOAT controlPointPositions[GR_MAX_GAMMA_RAMP_CONTROL_POINTS];
+} GR_WSI_WIN_GAMMA_RAMP_CAPABILITIES;
+
+typedef struct _GR_WSI_WIN_GAMMA_RAMP
+{
+ GR_RGB_FLOAT scale;
+ GR_RGB_FLOAT offset;
+ GR_RGB_FLOAT gammaCurve[GR_MAX_GAMMA_RAMP_CONTROL_POINTS];
+} GR_WSI_WIN_GAMMA_RAMP;
+
+typedef struct _GR_WSI_WIN_PRESENTABLE_IMAGE_CREATE_INFO
+{
+ GR_FORMAT format;
+ GR_FLAGS usage; // GR_IMAGE_USAGE_FLAGS
+ GR_EXTENT2D extent;
+ GR_WSI_WIN_DISPLAY display;
+ GR_FLAGS flags; // GR_WSI_WIN_IMAGE_CREATE_FLAGS
+} GR_WSI_WIN_PRESENTABLE_IMAGE_CREATE_INFO;
+
+typedef struct _GR_WSI_WIN_PRESENT_INFO
+{
+ HWND hWndDest;
+ GR_IMAGE srcImage;
+ GR_ENUM presentMode; // GR_WSI_WIN_PRESENT_MODE
+ GR_UINT flipInterval;
+ GR_FLAGS flags; // GR_WSI_WIN_PRESENT_FLAGS
+} GR_WSI_WIN_PRESENT_INFO;
+
+typedef struct _GR_WSI_WIN_DISPLAY_MODE
+{
+ GR_EXTENT2D extent;
+ GR_FORMAT format;
+ GR_UINT refreshRate;
+ GR_BOOL stereo;
+ GR_BOOL crossDisplayPresent;
+} GR_WSI_WIN_DISPLAY_MODE;
+
+GR_RESULT GR_STDCALL grWsiWinGetDisplays(
+ GR_DEVICE device,
+ GR_UINT* pDisplayCount,
+ GR_WSI_WIN_DISPLAY* pDisplayList);
+
+GR_RESULT GR_STDCALL grWsiWinGetDisplayModeList(
+ GR_WSI_WIN_DISPLAY display,
+ GR_UINT* pDisplayModeCount,
+ GR_WSI_WIN_DISPLAY_MODE* pDisplayModeList);
+
+GR_RESULT GR_STDCALL grWsiWinTakeFullscreenOwnership(
+ GR_WSI_WIN_DISPLAY display,
+ GR_IMAGE image);
+
+GR_RESULT GR_STDCALL grWsiWinReleaseFullscreenOwnership(
+ GR_WSI_WIN_DISPLAY display);
+
+GR_RESULT GR_STDCALL grWsiWinSetGammaRamp(
+ GR_WSI_WIN_DISPLAY display,
+ const GR_WSI_WIN_GAMMA_RAMP* pGammaRamp);
+
+GR_RESULT GR_STDCALL grWsiWinWaitForVerticalBlank(
+ GR_WSI_WIN_DISPLAY display);
+
+GR_RESULT GR_STDCALL grWsiWinGetScanLine(
+ GR_WSI_WIN_DISPLAY display,
+ GR_INT* pScanLine);
+
+GR_RESULT GR_STDCALL grWsiWinCreatePresentableImage(
+ GR_DEVICE device,
+ const GR_WSI_WIN_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
+ GR_IMAGE* pImage,
+ GR_GPU_MEMORY* pMem);
+
+GR_RESULT GR_STDCALL grWsiWinQueuePresent(
+ GR_QUEUE queue,
+ const GR_WSI_WIN_PRESENT_INFO* pPresentInfo);
+
+GR_RESULT GR_STDCALL grWsiWinSetMaxQueuedFrames(
+ GR_DEVICE device,
+ GR_UINT maxFrames);
+
+#ifdef __cplusplus
+}; // extern "C"
+#endif // __cplusplus
+
+#endif // __MANTLEWSIWINEXT_H__
diff --git a/include/mantleWsiWinExtDbg.h b/include/mantleWsiWinExtDbg.h
new file mode 100644
index 0000000..8b0e5d3
--- /dev/null
+++ b/include/mantleWsiWinExtDbg.h
@@ -0,0 +1,70 @@
+//
+// File: mantleWsiWinExtDbg.h
+//
+// Copyright 2014 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
+//
+// AMD is granting you permission to use this software for reference
+// purposes only and not for use in any software product.
+//
+// You agree that you will not reverse engineer or decompile the Materials,
+// in whole or in part, except as allowed by applicable law.
+//
+// WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY,
+// INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE
+// WILL RUN UNINTERRUPTED OR ERROR-FREE OR WARRANTIES ARISING FROM CUSTOM OF
+// TRADE OR COURSE OF USAGE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF THE
+// SOFTWARE IS ASSUMED BY YOU.
+// Some jurisdictions do not allow the exclusion of implied warranties, so
+// the above exclusion may not apply to You.
+//
+// LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL
+// NOT, UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT,
+// INCIDENTAL, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF
+// THE SOFTWARE OR THIS AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+// In no event shall AMD's total liability to You for all damages, losses,
+// and causes of action (whether in contract, tort (including negligence) or
+// otherwise) exceed the amount of $100 USD. You agree to defend, indemnify
+// and hold harmless AMD and its licensors, and any of their directors,
+// officers, employees, affiliates or agents from and against any and all
+// loss, damage, liability and other expenses (including reasonable attorneys'
+// fees), resulting from Your use of the Software or violation of the terms and
+// conditions of this Agreement.
+//
+// U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
+// RIGHTS." Use, duplication, or disclosure by the Government is subject to the
+// restrictions as set forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or
+// its successor. Use of the Materials by the Government constitutes
+// acknowledgement of AMD's proprietary rights in them.
+//
+// EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as
+// stated in the Software License Agreement.
+//
+
+#ifndef __MANTLEWSIWINEXTDBG_H__
+#define __MANTLEWSIWINEXTDBG_H__
+
+#include "mantle.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+typedef enum _GR_WSI_WIN_DBG_OBJECT_TYPE
+{
+ GR_WSI_WIN_DBG_OBJECT_DISPLAY = 0x00210900,
+ GR_WSI_WIN_DBG_OBJECT_PRESENTABLE_IMAGE = 0x00210901,
+
+ GR_WSI_WIN_DBG_OBJECT_TYPE_BEGIN_RANGE = GR_WSI_WIN_DBG_OBJECT_DISPLAY,
+ GR_WSI_WIN_DBG_OBJECT_TYPE_END_RANGE = GR_WSI_WIN_DBG_OBJECT_PRESENTABLE_IMAGE,
+ GR_NUM_WSI_WIN_DBG_OBJECT_TYPE = (GR_WSI_WIN_DBG_OBJECT_TYPE_END_RANGE - GR_WSI_WIN_DBG_OBJECT_TYPE_BEGIN_RANGE + 1),
+} GR_WSI_WIN_DBG_OBJECT_TYPE;
+
+#ifdef __cplusplus
+}; // extern "C"
+#endif // __cplusplus
+
+#endif // __MANTLEWSIWINEXTDBG_H__
diff --git a/include/xgl.h b/include/xgl.h
new file mode 100644
index 0000000..5642660
--- /dev/null
+++ b/include/xgl.h
@@ -0,0 +1,2394 @@
+//
+// File: xgl.h
+//
+// Copyright 2014 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
+//
+// AMD is granting you permission to use this software and documentation (if
+// any) (collectively, the "Materials") pursuant to the terms and conditions
+// of the Software License Agreement included with the Materials. If you do
+// not have a copy of the Software License Agreement, contact your AMD
+// representative for a copy.
+// You agree that you will not reverse engineer or decompile the Materials,
+// in whole or in part, except as allowed by applicable law.
+//
+// WARRANTY DISCLAIMER: THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND. AMD DISCLAIMS ALL WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY,
+// INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, THAT THE SOFTWARE
+// WILL RUN UNINTERRUPTED OR ERROR-FREE OR WARRANTIES ARISING FROM CUSTOM OF
+// TRADE OR COURSE OF USAGE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF THE
+// SOFTWARE IS ASSUMED BY YOU.
+// Some jurisdictions do not allow the exclusion of implied warranties, so
+// the above exclusion may not apply to You.
+//
+// LIMITATION OF LIABILITY AND INDEMNIFICATION: AMD AND ITS LICENSORS WILL
+// NOT, UNDER ANY CIRCUMSTANCES BE LIABLE TO YOU FOR ANY PUNITIVE, DIRECT,
+// INCIDENTAL, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM USE OF
+// THE SOFTWARE OR THIS AGREEMENT EVEN IF AMD AND ITS LICENSORS HAVE BEEN
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+// In no event shall AMD's total liability to You for all damages, losses,
+// and causes of action (whether in contract, tort (including negligence) or
+// otherwise) exceed the amount of $100 USD. You agree to defend, indemnify
+// and hold harmless AMD and its licensors, and any of their directors,
+// officers, employees, affiliates or agents from and against any and all
+// loss, damage, liability and other expenses (including reasonable attorneys'
+// fees), resulting from Your use of the Software or violation of the terms and
+// conditions of this Agreement.
+//
+// U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
+// RIGHTS." Use, duplication, or disclosure by the Government is subject to the
+// restrictions as set forth in FAR 52.227-14 and DFAR252.227-7013, et seq., or
+// its successor. Use of the Materials by the Government constitutes
+// acknowledgement of AMD's proprietary rights in them.
+//
+// EXPORT RESTRICTIONS: The Materials may be subject to export restrictions as
+// stated in the Software License Agreement.
+//
+
+#ifndef __XGL_H__
+#define __XGL_H__
+
+#include <stdint.h>
+
+#define XGL_MAKE_VERSION(major, minor, patch) \
+ ((major << 22) | (minor << 12) | patch)
+
+// XGL API version supported by this file
+#define XGL_API_VERSION XGL_MAKE_VERSION(0, 22, 0)
+
+#ifndef XGLAPI
+ #define XGLAPI
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+ * Datatypes
+ */
+typedef unsigned char XGL_BOOL;
+typedef void XGL_VOID;
+typedef signed char XGL_CHAR; /* 1-byte signed */
+typedef int32_t XGL_INT; /* 4-byte signed */
+typedef int32_t XGL_INT32; /* 4-byte signed */
+typedef uint32_t XGL_UINT; /* 4-byte unsigned */
+typedef uint32_t XGL_UINT32; /* 4-byte unsigned */
+typedef uint64_t XGL_UINT64; /* 8-byte unsigned */
+typedef uint32_t XGL_SIZE; /* 4-byte unsigned */
+typedef uint32_t XGL_GPU_SIZE; /* 4-byte unsigned */
+typedef uint32_t XGL_FLAGS; /* 4-byte unsigned */
+typedef uint32_t XGL_SAMPLE_MASK; /* 4-byte unsigned */
+typedef uint8_t XGL_UINT8; /* 1-byte unsigned */
+typedef float XGL_FLOAT; /* single precision float */
+typedef double XGL_DOUBLE; /* double precision float in [0,1] */
+
+/*
+***************************************************************************************************
+* Core XGL API
+***************************************************************************************************
+*/
+
+#ifdef __cplusplus
+ #define XGL_DEFINE_HANDLE(_obj) struct _obj##_T {}; typedef _obj##_T* _obj;
+ #define XGL_DEFINE_SUBCLASS_HANDLE(_obj, _base) struct _obj##_T : public _base##_T {}; typedef _obj##_T* _obj;
+#else // __cplusplus
+ #define XGL_DEFINE_HANDLE(_obj) typedef void* _obj;
+ #define XGL_DEFINE_SUBCLASS_HANDLE(_obj, _base) typedef void* _obj;
+#endif // __cplusplus
+
+XGL_DEFINE_HANDLE(XGL_PHYSICAL_GPU)
+XGL_DEFINE_HANDLE(XGL_BASE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_DEVICE, XGL_BASE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_QUEUE, XGL_BASE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_GPU_MEMORY, XGL_BASE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_OBJECT, XGL_BASE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_IMAGE, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_IMAGE_VIEW, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_COLOR_ATTACHMENT_VIEW, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_DEPTH_STENCIL_VIEW, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_SHADER, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_PIPELINE, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_PIPELINE_DELTA, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_SAMPLER, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_DESCRIPTOR_SET, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_STATE_OBJECT, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_VIEWPORT_STATE_OBJECT, XGL_STATE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_RASTER_STATE_OBJECT, XGL_STATE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_MSAA_STATE_OBJECT, XGL_STATE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_COLOR_BLEND_STATE_OBJECT, XGL_STATE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_DEPTH_STENCIL_STATE_OBJECT, XGL_STATE_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_CMD_BUFFER, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_FENCE, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_QUEUE_SEMAPHORE, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_EVENT, XGL_OBJECT)
+XGL_DEFINE_SUBCLASS_HANDLE(XGL_QUERY_POOL, XGL_OBJECT)
+
+#define XGL_MAX_PHYSICAL_GPUS 16
+#define XGL_MAX_PHYSICAL_GPU_NAME 256
+#define XGL_MAX_MEMORY_HEAPS 8
+#define XGL_MAX_DESCRIPTOR_SETS 2
+#define XGL_MAX_VIEWPORTS 16
+#define XGL_MAX_COLOR_ATTACHMENTS 8
+
+#define XGL_LOD_CLAMP_NONE MAX_FLOAT
+#define XGL_LAST_MIP_OR_SLICE 0xffffffff
+
+#define XGL_TRUE 1
+#define XGL_FALSE 0
+
+#define XGL_NULL_HANDLE 0
+
+// This macro defines MAX_UINT in enumerations to force compilers to use 32 bits
+// to represent them. This may or may not be necessary on some compilers. The
+// option to compile it out may allow compilers that warn about missing enumerants
+// in switch statements to be silenced.
+#define XGL_MAX_ENUM(T) T##_MAX_ENUM = 0xFFFFFFFF
+
+// ------------------------------------------------------------------------------------------------
+// Enumerations
+
+
+typedef enum _XGL_QUEUE_TYPE
+{
+ XGL_QUEUE_TYPE_GRAPHICS = 0x1,
+ XGL_QUEUE_TYPE_COMPUTE = 0x2,
+ XGL_QUEUE_TYPE_DMA = 0x3,
+ XGL_MAX_ENUM(_XGL_QUEUE_TYPE)
+} XGL_QUEUE_TYPE;
+
+typedef enum _XGL_MEMORY_PRIORITY
+{
+ XGL_MEMORY_PRIORITY_UNUSED = 0x0,
+ XGL_MEMORY_PRIORITY_VERY_LOW = 0x1,
+ XGL_MEMORY_PRIORITY_LOW = 0x2,
+ XGL_MEMORY_PRIORITY_NORMAL = 0x3,
+ XGL_MEMORY_PRIORITY_HIGH = 0x4,
+ XGL_MEMORY_PRIORITY_VERY_HIGH = 0x5,
+
+ XGL_MEMORY_PRIORITY_BEGIN_RANGE = XGL_MEMORY_PRIORITY_UNUSED,
+ XGL_MEMORY_PRIORITY_END_RANGE = XGL_MEMORY_PRIORITY_VERY_HIGH,
+ XGL_NUM_MEMORY_PRIORITY = (XGL_MEMORY_PRIORITY_END_RANGE - XGL_MEMORY_PRIORITY_BEGIN_RANGE + 1),
+} XGL_MEMORY_PRIORITY;
+
+typedef enum _XGL_MEMORY_STATE
+{
+ XGL_MEMORY_STATE_DATA_TRANSFER = 0x00000000,
+ XGL_MEMORY_STATE_GRAPHICS_SHADER_READ_ONLY = 0x00000001,
+ XGL_MEMORY_STATE_GRAPHICS_SHADER_WRITE_ONLY = 0x00000002,
+ XGL_MEMORY_STATE_GRAPHICS_SHADER_READ_WRITE = 0x00000003,
+ XGL_MEMORY_STATE_COMPUTE_SHADER_READ_ONLY = 0x00000004,
+ XGL_MEMORY_STATE_COMPUTE_SHADER_WRITE_ONLY = 0x00000005,
+ XGL_MEMORY_STATE_COMPUTE_SHADER_READ_WRITE = 0x00000006,
+ XGL_MEMORY_STATE_MULTI_SHADER_READ_ONLY = 0x00000007,
+ XGL_MEMORY_STATE_INDEX_DATA = 0x00000008,
+ XGL_MEMORY_STATE_INDIRECT_ARG = 0x00000009,
+ XGL_MEMORY_STATE_WRITE_TIMESTAMP = 0x0000000A,
+ XGL_MEMORY_STATE_QUEUE_ATOMIC = 0x0000000B,
+
+ XGL_MEMORY_STATE_BEGIN_RANGE = XGL_MEMORY_STATE_DATA_TRANSFER,
+ XGL_MEMORY_STATE_END_RANGE = XGL_MEMORY_STATE_QUEUE_ATOMIC,
+ XGL_NUM_MEMORY_STATE = (XGL_MEMORY_STATE_END_RANGE - XGL_MEMORY_STATE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_MEMORY_STATE)
+} XGL_MEMORY_STATE;
+
+typedef enum _XGL_IMAGE_STATE
+{
+ XGL_IMAGE_STATE_DATA_TRANSFER = 0x00000000,
+ XGL_IMAGE_STATE_GRAPHICS_SHADER_READ_ONLY = 0x00000001,
+ XGL_IMAGE_STATE_GRAPHICS_SHADER_WRITE_ONLY = 0x00000002,
+ XGL_IMAGE_STATE_GRAPHICS_SHADER_READ_WRITE = 0x00000003,
+ XGL_IMAGE_STATE_COMPUTE_SHADER_READ_ONLY = 0x00000004,
+ XGL_IMAGE_STATE_COMPUTE_SHADER_WRITE_ONLY = 0x00000005,
+ XGL_IMAGE_STATE_COMPUTE_SHADER_READ_WRITE = 0x00000006,
+ XGL_IMAGE_STATE_MULTI_SHADER_READ_ONLY = 0x00000007,
+ XGL_IMAGE_STATE_TARGET_AND_SHADER_READ_ONLY = 0x00000008,
+ XGL_IMAGE_STATE_UNINITIALIZED_TARGET = 0x00000009,
+ XGL_IMAGE_STATE_TARGET_RENDER_ACCESS_OPTIMAL = 0x0000000A,
+ XGL_IMAGE_STATE_TARGET_SHADER_ACCESS_OPTIMAL = 0x0000000B,
+ XGL_IMAGE_STATE_CLEAR = 0x0000000C,
+ XGL_IMAGE_STATE_RESOLVE_SOURCE = 0x0000000D,
+ XGL_IMAGE_STATE_RESOLVE_DESTINATION = 0x0000000E,
+
+ XGL_IMAGE_STATE_BEGIN_RANGE = XGL_IMAGE_STATE_DATA_TRANSFER,
+ XGL_IMAGE_STATE_END_RANGE = XGL_IMAGE_STATE_RESOLVE_DESTINATION,
+ XGL_NUM_IMAGE_STATE = (XGL_IMAGE_STATE_END_RANGE - XGL_IMAGE_STATE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_IMAGE_STATE)
+} XGL_IMAGE_STATE;
+
+typedef enum _XGL_IMAGE_TYPE
+{
+ XGL_IMAGE_1D = 0x00000000,
+ XGL_IMAGE_2D = 0x00000001,
+ XGL_IMAGE_3D = 0x00000002,
+
+ XGL_IMAGE_TYPE_BEGIN_RANGE = XGL_IMAGE_1D,
+ XGL_IMAGE_TYPE_END_RANGE = XGL_IMAGE_3D,
+ XGL_NUM_IMAGE_TYPE = (XGL_IMAGE_TYPE_END_RANGE - XGL_IMAGE_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_IMAGE_TYPE)
+} XGL_IMAGE_TYPE;
+
+typedef enum _XGL_IMAGE_TILING
+{
+ XGL_LINEAR_TILING = 0x00000000,
+ XGL_OPTIMAL_TILING = 0x00000001,
+
+ XGL_IMAGE_TILING_BEGIN_RANGE = XGL_LINEAR_TILING,
+ XGL_IMAGE_TILING_END_RANGE = XGL_OPTIMAL_TILING,
+ XGL_NUM_IMAGE_TILING = (XGL_IMAGE_TILING_END_RANGE - XGL_IMAGE_TILING_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_IMAGE_TILING)
+} XGL_IMAGE_TILING;
+
+typedef enum _XGL_IMAGE_VIEW_TYPE
+{
+ XGL_IMAGE_VIEW_1D = 0x00000000,
+ XGL_IMAGE_VIEW_2D = 0x00000001,
+ XGL_IMAGE_VIEW_3D = 0x00000002,
+ XGL_IMAGE_VIEW_CUBE = 0x00000003,
+
+ XGL_IMAGE_VIEW_TYPE_BEGIN_RANGE = XGL_IMAGE_VIEW_1D,
+ XGL_IMAGE_VIEW_TYPE_END_RANGE = XGL_IMAGE_VIEW_CUBE,
+ XGL_NUM_IMAGE_VIEW_TYPE = (XGL_IMAGE_VIEW_TYPE_END_RANGE - XGL_IMAGE_VIEW_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_IMAGE_VIEW_TYPE)
+} XGL_IMAGE_VIEW_TYPE;
+
+typedef enum _XGL_IMAGE_ASPECT
+{
+ XGL_IMAGE_ASPECT_COLOR = 0x00000000,
+ XGL_IMAGE_ASPECT_DEPTH = 0x00000001,
+ XGL_IMAGE_ASPECT_STENCIL = 0x00000002,
+
+ XGL_IMAGE_ASPECT_BEGIN_RANGE = XGL_IMAGE_ASPECT_COLOR,
+ XGL_IMAGE_ASPECT_END_RANGE = XGL_IMAGE_ASPECT_STENCIL,
+ XGL_NUM_IMAGE_ASPECT = (XGL_IMAGE_ASPECT_END_RANGE - XGL_IMAGE_ASPECT_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_IMAGE_ASPECT)
+} XGL_IMAGE_ASPECT;
+
+typedef enum _XGL_CHANNEL_SWIZZLE
+{
+ XGL_CHANNEL_SWIZZLE_ZERO = 0x00000000,
+ XGL_CHANNEL_SWIZZLE_ONE = 0x00000001,
+ XGL_CHANNEL_SWIZZLE_R = 0x00000002,
+ XGL_CHANNEL_SWIZZLE_G = 0x00000003,
+ XGL_CHANNEL_SWIZZLE_B = 0x00000004,
+ XGL_CHANNEL_SWIZZLE_A = 0x00000005,
+
+ XGL_CHANNEL_SWIZZLE_BEGIN_RANGE = XGL_CHANNEL_SWIZZLE_ZERO,
+ XGL_CHANNEL_SWIZZLE_END_RANGE = XGL_CHANNEL_SWIZZLE_A,
+ XGL_NUM_CHANNEL_SWIZZLE = (XGL_CHANNEL_SWIZZLE_END_RANGE - XGL_CHANNEL_SWIZZLE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_CHANNEL_SWIZZLE)
+} XGL_CHANNEL_SWIZZLE;
+
+typedef enum _XGL_DESCRIPTOR_SET_SLOT_TYPE
+{
+ XGL_SLOT_UNUSED = 0x00000000,
+ XGL_SLOT_SHADER_RESOURCE = 0x00000001,
+ XGL_SLOT_SHADER_UAV = 0x00000002,
+ XGL_SLOT_SHADER_SAMPLER = 0x00000003,
+ XGL_SLOT_NEXT_DESCRIPTOR_SET = 0x00000004,
+
+ XGL_DESCRIPTOR_SET_SLOT_TYPE_BEGIN_RANGE = XGL_SLOT_UNUSED,
+ XGL_DESCRIPTOR_SET_SLOT_TYPE_END_RANGE = XGL_SLOT_NEXT_DESCRIPTOR_SET,
+ XGL_NUM_DESCRIPTOR_SET_SLOT_TYPE = (XGL_DESCRIPTOR_SET_SLOT_TYPE_END_RANGE - XGL_DESCRIPTOR_SET_SLOT_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_DESCRIPTOR_SET_SLOT_TYPE)
+} XGL_DESCRIPTOR_SET_SLOT_TYPE;
+
+typedef enum _XGL_QUERY_TYPE
+{
+ XGL_QUERY_OCCLUSION = 0x00000000,
+ XGL_QUERY_PIPELINE_STATISTICS = 0x00000001,
+
+ XGL_QUERY_TYPE_BEGIN_RANGE = XGL_QUERY_OCCLUSION,
+ XGL_QUERY_TYPE_END_RANGE = XGL_QUERY_PIPELINE_STATISTICS,
+ XGL_NUM_QUERY_TYPE = (XGL_QUERY_TYPE_END_RANGE - XGL_QUERY_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_QUERY_TYPE)
+} XGL_QUERY_TYPE;
+
+typedef enum _XGL_TIMESTAMP_TYPE
+{
+ XGL_TIMESTAMP_TOP = 0x00000000,
+ XGL_TIMESTAMP_BOTTOM = 0x00000001,
+
+ XGL_TIMESTAMP_TYPE_BEGIN_RANGE = XGL_TIMESTAMP_TOP,
+ XGL_TIMESTAMP_TYPE_END_RANGE = XGL_TIMESTAMP_BOTTOM,
+ XGL_NUM_TIMESTAMP_TYPE = (XGL_TIMESTAMP_TYPE_END_RANGE - XGL_TIMESTAMP_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_TIMESTEAMP_TYPE)
+} XGL_TIMESTAMP_TYPE;
+
+typedef enum _XGL_BORDER_COLOR_TYPE
+{
+ XGL_BORDER_COLOR_OPAQUE_WHITE = 0x00000000,
+ XGL_BORDER_COLOR_TRANSPARENT_BLACK = 0x00000001,
+ XGL_BORDER_COLOR_OPAQUE_BLACK = 0x00000002,
+
+ XGL_BORDER_COLOR_TYPE_BEGIN_RANGE = XGL_BORDER_COLOR_OPAQUE_WHITE,
+ XGL_BORDER_COLOR_TYPE_END_RANGE = XGL_BORDER_COLOR_OPAQUE_BLACK,
+ XGL_NUM_BORDER_COLOR_TYPE = (XGL_BORDER_COLOR_TYPE_END_RANGE - XGL_BORDER_COLOR_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_BORDER_COLOR_TYPE)
+} XGL_BORDER_COLOR_TYPE;
+
+typedef enum _XGL_PIPELINE_BIND_POINT
+{
+ XGL_PIPELINE_BIND_POINT_COMPUTE = 0x00000000,
+ XGL_PIPELINE_BIND_POINT_GRAPHICS = 0x00000001,
+
+ XGL_PIPELINE_BIND_POINT_BEGIN_RANGE = XGL_PIPELINE_BIND_POINT_COMPUTE,
+ XGL_PIPELINE_BIND_POINT_END_RANGE = XGL_PIPELINE_BIND_POINT_GRAPHICS,
+ XGL_NUM_PIPELINE_BIND_POINT = (XGL_PIPELINE_BIND_POINT_END_RANGE - XGL_PIPELINE_BIND_POINT_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_PIPELINE_BIND_POINT)
+} XGL_PIPELINE_BIND_POINT;
+
+typedef enum _XGL_STATE_BIND_POINT
+{
+ XGL_STATE_BIND_VIEWPORT = 0x00000000,
+ XGL_STATE_BIND_RASTER = 0x00000001,
+ XGL_STATE_BIND_DEPTH_STENCIL = 0x00000002,
+ XGL_STATE_BIND_COLOR_BLEND = 0x00000003,
+ XGL_STATE_BIND_MSAA = 0x00000004,
+
+ XGL_STATE_BIND_POINT_BEGIN_RANGE = XGL_STATE_BIND_VIEWPORT,
+ XGL_STATE_BIND_POINT_END_RANGE = XGL_STATE_BIND_MSAA,
+ XGL_NUM_STATE_BIND_POINT = (XGL_STATE_BIND_POINT_END_RANGE - XGL_STATE_BIND_POINT_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_STATE_BIND_POINT)
+} XGL_STATE_BIND_POINT;
+
+typedef enum _XGL_PRIMITIVE_TOPOLOGY
+{
+ XGL_TOPOLOGY_POINT_LIST = 0x00000000,
+ XGL_TOPOLOGY_LINE_LIST = 0x00000001,
+ XGL_TOPOLOGY_LINE_STRIP = 0x00000002,
+ XGL_TOPOLOGY_TRIANGLE_LIST = 0x00000003,
+ XGL_TOPOLOGY_TRIANGLE_STRIP = 0x00000004,
+ XGL_TOPOLOGY_RECT_LIST = 0x00000005,
+ XGL_TOPOLOGY_QUAD_LIST = 0x00000006,
+ XGL_TOPOLOGY_QUAD_STRIP = 0x00000007,
+ XGL_TOPOLOGY_LINE_LIST_ADJ = 0x00000008,
+ XGL_TOPOLOGY_LINE_STRIP_ADJ = 0x00000009,
+ XGL_TOPOLOGY_TRIANGLE_LIST_ADJ = 0x0000000a,
+ XGL_TOPOLOGY_TRIANGLE_STRIP_ADJ = 0x0000000b,
+ XGL_TOPOLOGY_PATCH = 0x0000000c,
+
+ XGL_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = XGL_TOPOLOGY_POINT_LIST,
+ XGL_PRIMITIVE_TOPOLOGY_END_RANGE = XGL_TOPOLOGY_PATCH,
+ XGL_NUM_PRIMITIVE_TOPOLOGY = (XGL_PRIMITIVE_TOPOLOGY_END_RANGE - XGL_PRIMITIVE_TOPOLOGY_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_PRIMITIVE_TOPOLOGY)
+} XGL_PRIMITIVE_TOPOLOGY;
+
+typedef enum _XGL_INDEX_TYPE
+{
+ XGL_INDEX_8 = 0x00000000,
+ XGL_INDEX_16 = 0x00000001,
+ XGL_INDEX_32 = 0x00000002,
+
+ XGL_INDEX_TYPE_BEGIN_RANGE = XGL_INDEX_8,
+ XGL_INDEX_TYPE_END_RANGE = XGL_INDEX_32,
+ XGL_NUM_INDEX_TYPE = (XGL_INDEX_TYPE_END_RANGE - XGL_INDEX_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_INDEX_TYPE)
+} XGL_INDEX_TYPE;
+
+typedef enum _XGL_TEX_FILTER
+{
+ XGL_TEX_FILTER_NEAREST = 0,
+ XGL_TEX_FILTER_LINEAR = 1,
+ XGL_MAX_ENUM(_XGL_TEX_FILTER)
+} XGL_TEX_FILTER;
+
+typedef enum _XGL_TEX_MIPMAP_MODE
+{
+ XGL_TEX_MIPMAP_BASE = 0, // Always choose base level
+ XGL_TEX_MIPMAP_NEAREST = 1, // Choose nearest mip level
+ XGL_TEX_MIPMAP_LINEAR = 2, // Linear filter between mip levels
+ XGL_MAX_ENUM(_XGL_TEX_MIPMAP_MODE)
+} XGL_TEX_MIPMAP_MODE;
+
+typedef enum _XGL_TEX_ADDRESS
+{
+ XGL_TEX_ADDRESS_WRAP = 0x00000000,
+ XGL_TEX_ADDRESS_MIRROR = 0x00000001,
+ XGL_TEX_ADDRESS_CLAMP = 0x00000002,
+ XGL_TEX_ADDRESS_MIRROR_ONCE = 0x00000003,
+ XGL_TEX_ADDRESS_CLAMP_BORDER = 0x00000004,
+
+ XGL_TEX_ADDRESS_BEGIN_RANGE = XGL_TEX_ADDRESS_WRAP,
+ XGL_TEX_ADDRESS_END_RANGE = XGL_TEX_ADDRESS_CLAMP_BORDER,
+ XGL_NUM_TEX_ADDRESS = (XGL_TEX_ADDRESS_END_RANGE - XGL_TEX_ADDRESS_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_TEX_ADDRESS)
+} XGL_TEX_ADDRESS;
+
+typedef enum _XGL_COMPARE_FUNC
+{
+ XGL_COMPARE_NEVER = 0x00000000,
+ XGL_COMPARE_LESS = 0x00000001,
+ XGL_COMPARE_EQUAL = 0x00000002,
+ XGL_COMPARE_LESS_EQUAL = 0x00000003,
+ XGL_COMPARE_GREATER = 0x00000004,
+ XGL_COMPARE_NOT_EQUAL = 0x00000005,
+ XGL_COMPARE_GREATER_EQUAL = 0x00000006,
+ XGL_COMPARE_ALWAYS = 0x00000007,
+
+ XGL_COMPARE_FUNC_BEGIN_RANGE = XGL_COMPARE_NEVER,
+ XGL_COMPARE_FUNC_END_RANGE = XGL_COMPARE_ALWAYS,
+ XGL_NUM_COMPARE_FUNC = (XGL_COMPARE_FUNC_END_RANGE - XGL_COMPARE_FUNC_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_COMPARE_FUNC)
+} XGL_COMPARE_FUNC;
+
+typedef enum _XGL_FILL_MODE
+{
+ XFL_FILL_POINTS = 0x00000000,
+ XGL_FILL_WIREFRAME = 0x00000001,
+ XGL_FILL_SOLID = 0x00000002,
+
+ XGL_FILL_MODE_BEGIN_RANGE = XGL_FILL_SOLID,
+ XGL_FILL_MODE_END_RANGE = XFL_FILL_POINTS,
+ XGL_NUM_FILL_MODE = (XGL_FILL_MODE_END_RANGE - XGL_FILL_MODE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_FILL_MODE)
+} XGL_FILL_MODE;
+
+typedef enum _XGL_CULL_MODE
+{
+ XGL_CULL_NONE = 0x00000000,
+ XGL_CULL_FRONT = 0x00000001,
+ XGL_CULL_BACK = 0x00000002,
+ XGL_CULL_FRONT_AND_BACK = 0x00000003,
+
+ XGL_CULL_MODE_BEGIN_RANGE = XGL_CULL_NONE,
+ XGL_CULL_MODE_END_RANGE = XGL_CULL_FRONT_AND_BACK,
+ XGL_NUM_CULL_MODE = (XGL_CULL_MODE_END_RANGE - XGL_CULL_MODE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_CULL_MODE)
+} XGL_CULL_MODE;
+
+typedef enum _XGL_FACE_ORIENTATION
+{
+ XGL_FRONT_FACE_CCW = 0x00000000,
+ XGL_FRONT_FACE_CW = 0x00000001,
+
+ XGL_FACE_ORIENTATION_BEGIN_RANGE = XGL_FRONT_FACE_CCW,
+ XGL_FACE_ORIENTATION_END_RANGE = XGL_FRONT_FACE_CW,
+ XGL_NUM_FACE_ORIENTATION = (XGL_FACE_ORIENTATION_END_RANGE - XGL_FACE_ORIENTATION_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_FACE_ORIENTATION)
+} XGL_FACE_ORIENTATION;
+
+typedef enum _XGL_BLEND
+{
+ XGL_BLEND_ZERO = 0x00000000,
+ XGL_BLEND_ONE = 0x00000001,
+ XGL_BLEND_SRC_COLOR = 0x00000002,
+ XGL_BLEND_ONE_MINUS_SRC_COLOR = 0x00000003,
+ XGL_BLEND_DEST_COLOR = 0x00000004,
+ XGL_BLEND_ONE_MINUS_DEST_COLOR = 0x00000005,
+ XGL_BLEND_SRC_ALPHA = 0x00000006,
+ XGL_BLEND_ONE_MINUS_SRC_ALPHA = 0x00000007,
+ XGL_BLEND_DEST_ALPHA = 0x00000008,
+ XGL_BLEND_ONE_MINUS_DEST_ALPHA = 0x00000009,
+ XGL_BLEND_CONSTANT_COLOR = 0x0000000a,
+ XGL_BLEND_ONE_MINUS_CONSTANT_COLOR = 0x0000000b,
+ XGL_BLEND_CONSTANT_ALPHA = 0x0000000c,
+ XGL_BLEND_ONE_MINUS_CONSTANT_ALPHA = 0x0000000d,
+ XGL_BLEND_SRC_ALPHA_SATURATE = 0x0000000e,
+ XGL_BLEND_SRC1_COLOR = 0x0000000f,
+ XGL_BLEND_ONE_MINUS_SRC1_COLOR = 0x00000010,
+ XGL_BLEND_SRC1_ALPHA = 0x00000011,
+ XGL_BLEND_ONE_MINUS_SRC1_ALPHA = 0x00000012,
+
+ XGL_BLEND_BEGIN_RANGE = XGL_BLEND_ZERO,
+ XGL_BLEND_END_RANGE = XGL_BLEND_ONE_MINUS_SRC1_ALPHA,
+ XGL_NUM_BLEND = (XGL_BLEND_END_RANGE - XGL_BLEND_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_BLEND)
+} XGL_BLEND;
+
+typedef enum _XGL_BLEND_FUNC
+{
+ XGL_BLEND_FUNC_ADD = 0x00000000,
+ XGL_BLEND_FUNC_SUBTRACT = 0x00000001,
+ XGL_BLEND_FUNC_REVERSE_SUBTRACT = 0x00000002,
+ XGL_BLEND_FUNC_MIN = 0x00000003,
+ XGL_BLEND_FUNC_MAX = 0x00000004,
+
+ XGL_BLEND_FUNC_BEGIN_RANGE = XGL_BLEND_FUNC_ADD,
+ XGL_BLEND_FUNC_END_RANGE = XGL_BLEND_FUNC_MAX,
+ XGL_NUM_BLEND_FUNC = (XGL_BLEND_FUNC_END_RANGE - XGL_BLEND_FUNC_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_BLEND_FUNC)
+} XGL_BLEND_FUNC;
+
+typedef enum _XGL_STENCIL_OP
+{
+ XGL_STENCIL_OP_KEEP = 0x00000000,
+ XGL_STENCIL_OP_ZERO = 0x00000001,
+ XGL_STENCIL_OP_REPLACE = 0x00000002,
+ XGL_STENCIL_OP_INC_CLAMP = 0x00000003,
+ XGL_STENCIL_OP_DEC_CLAMP = 0x00000004,
+ XGL_STENCIL_OP_INVERT = 0x00000005,
+ XGL_STENCIL_OP_INC_WRAP = 0x00000006,
+ XGL_STENCIL_OP_DEC_WRAP = 0x00000007,
+
+ XGL_STENCIL_OP_BEGIN_RANGE = XGL_STENCIL_OP_KEEP,
+ XGL_STENCIL_OP_END_RANGE = XGL_STENCIL_OP_DEC_WRAP,
+ XGL_NUM_STENCIL_OP = (XGL_STENCIL_OP_END_RANGE - XGL_STENCIL_OP_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_STENCIL_OP)
+} XGL_STENCIL_OP;
+
+typedef enum _XGL_LOGIC_OP
+{
+ XGL_LOGIC_OP_COPY = 0x00000000,
+ XGL_LOGIC_OP_CLEAR = 0x00000001,
+ XGL_LOGIC_OP_AND = 0x00000002,
+ XGL_LOGIC_OP_AND_REVERSE = 0x00000003,
+ XGL_LOGIC_OP_AND_INVERTED = 0x00000004,
+ XGL_LOGIC_OP_NOOP = 0x00000005,
+ XGL_LOGIC_OP_XOR = 0x00000006,
+ XGL_LOGIC_OP_OR = 0x00000007,
+ XGL_LOGIC_OP_NOR = 0x00000008,
+ XGL_LOGIC_OP_EQUIV = 0x00000009,
+ XGL_LOGIC_OP_INVERT = 0x0000000a,
+ XGL_LOGIC_OP_OR_REVERSE = 0x0000000b,
+ XGL_LOGIC_OP_COPY_INVERTED = 0x0000000c,
+ XGL_LOGIC_OP_OR_INVERTED = 0x0000000d,
+ XGL_LOGIC_OP_NAND = 0x0000000e,
+ XGL_LOGIC_OP_SET = 0x0000000f,
+
+ XGL_LOGIC_OP_BEGIN_RANGE = XGL_LOGIC_OP_COPY,
+ XGL_LOGIC_OP_END_RANGE = XGL_LOGIC_OP_SET,
+ XGL_NUM_LOGIC_OP = (XGL_LOGIC_OP_END_RANGE - XGL_LOGIC_OP_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_LOGIC_OP)
+} XGL_LOGIC_OP;
+
+typedef enum _XGL_ATOMIC_OP
+{
+ XGL_ATOMIC_ADD_INT32 = 0x00000000,
+ XGL_ATOMIC_SUB_INT32 = 0x00000001,
+ XGL_ATOMIC_MIN_UINT32 = 0x00000002,
+ XGL_ATOMIC_MAX_UINT32 = 0x00000003,
+ XGL_ATOMIC_MIN_SINT32 = 0x00000004,
+ XGL_ATOMIC_MAX_SINT32 = 0x00000005,
+ XGL_ATOMIC_AND_INT32 = 0x00000006,
+ XGL_ATOMIC_OR_INT32 = 0x00000007,
+ XGL_ATOMIC_XOR_INT32 = 0x00000008,
+ XGL_ATOMIC_INC_UINT32 = 0x00000009,
+ XGL_ATOMIC_DEC_UINT32 = 0x0000000a,
+ XGL_ATOMIC_ADD_INT64 = 0x0000000b,
+ XGL_ATOMIC_SUB_INT64 = 0x0000000c,
+ XGL_ATOMIC_MIN_UINT64 = 0x0000000d,
+ XGL_ATOMIC_MAX_UINT64 = 0x0000000e,
+ XGL_ATOMIC_MIN_SINT64 = 0x0000000f,
+ XGL_ATOMIC_MAX_SINT64 = 0x00000010,
+ XGL_ATOMIC_AND_INT64 = 0x00000011,
+ XGL_ATOMIC_OR_INT64 = 0x00000012,
+ XGL_ATOMIC_XOR_INT64 = 0x00000013,
+ XGL_ATOMIC_INC_UINT64 = 0x00000014,
+ XGL_ATOMIC_DEC_UINT64 = 0x00000015,
+
+ XGL_ATOMIC_OP_BEGIN_RANGE = XGL_ATOMIC_ADD_INT32,
+ XGL_ATOMIC_OP_END_RANGE = XGL_ATOMIC_DEC_UINT64,
+ XGL_NUM_ATOMIC_OP = (XGL_ATOMIC_OP_END_RANGE - XGL_ATOMIC_OP_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_ATOMIC_OP)
+} XGL_ATOMIC_OP;
+
+typedef enum _XGL_SYSTEM_ALLOC_TYPE
+{
+ XGL_SYSTEM_ALLOC_API_OBJECT = 0x00000000,
+ XGL_SYSTEM_ALLOC_INTERNAL = 0x00000001,
+ XGL_SYSTEM_ALLOC_INTERNAL_TEMP = 0x00000002,
+ XGL_SYSTEM_ALLOC_INTERNAL_SHADER = 0x00000003,
+ XGL_SYSTEM_ALLOC_DEBUG = 0x00000004,
+
+ XGL_SYSTEM_ALLOC_BEGIN_RANGE = XGL_SYSTEM_ALLOC_API_OBJECT,
+ XGL_SYSTEM_ALLOC_END_RANGE = XGL_SYSTEM_ALLOC_DEBUG,
+ XGL_NUM_SYSTEM_ALLOC_TYPE = (XGL_SYSTEM_ALLOC_END_RANGE - XGL_SYSTEM_ALLOC_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_SYSTEM_ALLOC_TYPE)
+} XGL_SYSTEM_ALLOC_TYPE;
+
+typedef enum _XGL_HEAP_MEMORY_TYPE
+{
+ XGL_HEAP_MEMORY_OTHER = 0x00000000,
+ XGL_HEAP_MEMORY_LOCAL = 0x00000001,
+ XGL_HEAP_MEMORY_REMOTE = 0x00000002,
+ XGL_HEAP_MEMORY_EMBEDDED = 0x00000003,
+
+ XGL_HEAP_MEMORY_BEGIN_RANGE = XGL_HEAP_MEMORY_OTHER,
+ XGL_HEAP_MEMORY_END_RANGE = XGL_HEAP_MEMORY_EMBEDDED,
+ XGL_NUM_HEAP_MEMORY_TYPE = (XGL_HEAP_MEMORY_END_RANGE - XGL_HEAP_MEMORY_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_HEAP_MEMORY_TYPE)
+} XGL_HEAP_MEMORY_TYPE;
+
+typedef enum _XGL_PHYSICAL_GPU_TYPE
+{
+ XGL_GPU_TYPE_OTHER = 0x00000000,
+ XGL_GPU_TYPE_INTEGRATED = 0x00000001,
+ XGL_GPU_TYPE_DISCRETE = 0x00000002,
+ XGL_GPU_TYPE_VIRTUAL = 0x00000003,
+
+ XGL_PHYSICAL_GPU_TYPE_BEGIN_RANGE = XGL_GPU_TYPE_OTHER,
+ XGL_PHYSICAL_GPU_TYPE_END_RANGE = XGL_GPU_TYPE_VIRTUAL,
+ XGL_NUM_PHYSICAL_GPU_TYPE = (XGL_PHYSICAL_GPU_TYPE_END_RANGE - XGL_PHYSICAL_GPU_TYPE_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_PHYSICAL_GPU_TYPE)
+} XGL_PHYSICAL_GPU_TYPE;
+
+typedef enum _XGL_PHYSICAL_GPU_INFO_TYPE
+{
+ // Info type for xglGetGpuInfo()
+ XGL_INFO_TYPE_PHYSICAL_GPU_PROPERTIES = 0x00000000,
+ XGL_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE = 0x00000001,
+ XGL_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES = 0x00000002,
+ XGL_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES = 0x00000003,
+
+ XGL_MAX_ENUM(_XGL_PHYSICAL_GPU_INFO_TYPE)
+} XGL_PHYSICAL_GPU_INFO_TYPE;
+
+typedef enum _XGL_MEMORY_HEAP_INFO_TYPE
+{
+ // Info type for xglGetMemoryHeapInfo()
+ XGL_INFO_TYPE_MEMORY_HEAP_PROPERTIES = 0x00000000,
+
+ XGL_MAX_ENUM(_XGL_MEMORY_HEAP_INFO_TYPE)
+} XGL_MEMORY_HEAP_INFO_TYPE;
+
+typedef enum _XGL_FORMAT_INFO_TYPE
+{
+ // Info type for xlgGetFormatInfo()
+ XGL_INFO_TYPE_FORMAT_PROPERTIES = 0x00000000,
+
+ XGL_MAX_ENUM(_XGL_FORMAT_INFO_TYPE)
+} XGL_FORMAT_INFO_TYPE;
+
+typedef enum _XGL_SUBRESOURCE_INFO_TYPE
+{
+ // Info type for xglGetImageSubresourceInfo()
+ XGL_INFO_TYPE_SUBRESOURCE_LAYOUT = 0x00000000,
+
+ XGL_MAX_ENUM(_XGL_SUBRESOURCE_INFO_TYPE)
+} XGL_SUBRESOURCE_INFO_TYPE;
+
+typedef enum _XGL_OBJECT_INFO_TYPE
+{
+ // Info type for xglGetObjectInfo()
+ XGL_INFO_TYPE_MEMORY_REQUIREMENTS = 0x00000000,
+
+ XGL_MAX_ENUM(_XGL_OBJECT_INFO_TYPE)
+} XGL_OBJECT_INFO_TYPE;
+
+typedef enum _XGL_VALIDATION_LEVEL
+{
+ XGL_VALIDATION_LEVEL_0 = 0x00000000,
+ XGL_VALIDATION_LEVEL_1 = 0x00000001,
+ XGL_VALIDATION_LEVEL_2 = 0x00000002,
+ XGL_VALIDATION_LEVEL_3 = 0x00000003,
+ XGL_VALIDATION_LEVEL_4 = 0x00000004,
+
+ XGL_VALIDATION_LEVEL_BEGIN_RANGE = XGL_VALIDATION_LEVEL_0,
+ XGL_VALIDATION_LEVEL_END_RANGE = XGL_VALIDATION_LEVEL_4,
+ XGL_NUM_VALIDATION_LEVEL = (XGL_VALIDATION_LEVEL_END_RANGE - XGL_VALIDATION_LEVEL_BEGIN_RANGE + 1),
+
+ XGL_MAX_ENUM(_XGL_VALIDATION_LEVEL)
+} XGL_VALIDATION_LEVEL;
+
+// ------------------------------------------------------------------------------------------------
+// Error and return codes
+
+typedef enum _XGL_RESULT_CODE
+{
+ // Return codes for successful operation execution (>= 0)
+ XGL_SUCCESS = 0x0000000,
+ XGL_UNSUPPORTED = 0x0000001,
+ XGL_NOT_READY = 0x0000002,
+ XGL_TIMEOUT = 0x0000003,
+ XGL_EVENT_SET = 0x0000004,
+ XGL_EVENT_RESET = 0x0000005,
+
+ // Error codes (negative values)
+ XGL_ERROR_UNKNOWN = -(0x00000001),
+ XGL_ERROR_UNAVAILABLE = -(0x00000002),
+ XGL_ERROR_INITIALIZATION_FAILED = -(0x00000003),
+ XGL_ERROR_OUT_OF_MEMORY = -(0x00000004),
+ XGL_ERROR_OUT_OF_GPU_MEMORY = -(0x00000005),
+ XGL_ERROR_DEVICE_ALREADY_CREATED = -(0x00000006),
+ XGL_ERROR_DEVICE_LOST = -(0x00000007),
+ XGL_ERROR_INVALID_POINTER = -(0x00000008),
+ XGL_ERROR_INVALID_VALUE = -(0x00000009),
+ XGL_ERROR_INVALID_HANDLE = -(0x0000000A),
+ XGL_ERROR_INVALID_ORDINAL = -(0x0000000B),
+ XGL_ERROR_INVALID_MEMORY_SIZE = -(0x0000000C),
+ XGL_ERROR_INVALID_EXTENSION = -(0x0000000D),
+ XGL_ERROR_INVALID_FLAGS = -(0x0000000E),
+ XGL_ERROR_INVALID_ALIGNMENT = -(0x0000000F),
+ XGL_ERROR_INVALID_FORMAT = -(0x00000010),
+ XGL_ERROR_INVALID_IMAGE = -(0x00000011),
+ XGL_ERROR_INVALID_DESCRIPTOR_SET_DATA = -(0x00000012),
+ XGL_ERROR_INVALID_QUEUE_TYPE = -(0x00000013),
+ XGL_ERROR_INVALID_OBJECT_TYPE = -(0x00000014),
+ XGL_ERROR_UNSUPPORTED_SHADER_IL_VERSION = -(0x00000015),
+ XGL_ERROR_BAD_SHADER_CODE = -(0x00000016),
+ XGL_ERROR_BAD_PIPELINE_DATA = -(0x00000017),
+ XGL_ERROR_TOO_MANY_MEMORY_REFERENCES = -(0x00000018),
+ XGL_ERROR_NOT_MAPPABLE = -(0x00000019),
+ XGL_ERROR_MEMORY_MAP_FAILED = -(0x0000001A),
+ XGL_ERROR_MEMORY_UNMAP_FAILED = -(0x0000001B),
+ XGL_ERROR_INCOMPATIBLE_DEVICE = -(0x0000001C),
+ XGL_ERROR_INCOMPATIBLE_DRIVER = -(0x0000001D),
+ XGL_ERROR_INCOMPLETE_COMMAND_BUFFER = -(0x0000001E),
+ XGL_ERROR_BUILDING_COMMAND_BUFFER = -(0x0000001F),
+ XGL_ERROR_MEMORY_NOT_BOUND = -(0x00000020),
+ XGL_ERROR_INCOMPATIBLE_QUEUE = -(0x00000021),
+ XGL_ERROR_NOT_SHAREABLE = -(0x00000022),
+} XGL_RESULT;
+
+// ------------------------------------------------------------------------------------------------
+// XGL format definitions
+
+typedef enum _XGL_CHANNEL_FORMAT
+{
+ XGL_CH_FMT_UNDEFINED = 0,
+ XGL_CH_FMT_R4G4 = 1,
+ XGL_CH_FMT_R4G4B4A4 = 2,
+ XGL_CH_FMT_R5G6B5 = 3,
+ XGL_CH_FMT_B5G6R5 = 4,
+ XGL_CH_FMT_R5G5B5A1 = 5,
+ XGL_CH_FMT_R8 = 6,
+ XGL_CH_FMT_R8G8 = 7,
+ XGL_CH_FMT_R8G8B8A8 = 8,
+ XGL_CH_FMT_B8G8R8A8 = 9,
+ XGL_CH_FMT_R10G11B11 = 10,
+ XGL_CH_FMT_R11G11B10 = 11,
+ XGL_CH_FMT_R10G10B10A2 = 12,
+ XGL_CH_FMT_R16 = 13,
+ XGL_CH_FMT_R16G16 = 14,
+ XGL_CH_FMT_R16G16B16A16 = 15,
+ XGL_CH_FMT_R32 = 16,
+ XGL_CH_FMT_R32G32 = 17,
+ XGL_CH_FMT_R32G32B32 = 18,
+ XGL_CH_FMT_R32G32B32A32 = 19,
+ XGL_CH_FMT_R16G8 = 20,
+ XGL_CH_FMT_R32G8 = 21,
+ XGL_CH_FMT_R9G9B9E5 = 22,
+ XGL_CH_FMT_BC1 = 23,
+ XGL_CH_FMT_BC2 = 24,
+ XGL_CH_FMT_BC3 = 25,
+ XGL_CH_FMT_BC4 = 26,
+ XGL_CH_FMT_BC5 = 27,
+ XGL_CH_FMT_BC6U = 28,
+ XGL_CH_FMT_BC6S = 29,
+ XGL_CH_FMT_BC7 = 30,
+ XGL_MAX_CH_FMT = XGL_CH_FMT_BC7,
+ XGL_MAX_ENUM(_XGL_CHANNEL_FORMAT)
+} XGL_CHANNEL_FORMAT;
+
+typedef enum _XGL_NUM_FORMAT
+{
+ XGL_NUM_FMT_UNDEFINED = 0,
+ XGL_NUM_FMT_UNORM = 1,
+ XGL_NUM_FMT_SNORM = 2,
+ XGL_NUM_FMT_UINT = 3,
+ XGL_NUM_FMT_SINT = 4,
+ XGL_NUM_FMT_FLOAT = 5,
+ XGL_NUM_FMT_SRGB = 6,
+ XGL_NUM_FMT_DS = 7,
+ XGL_MAX_NUM_FMT = XGL_NUM_FMT_DS,
+ XGL_MAX_ENUM(_XGL_NUM_FORMAT)
+} XGL_NUM_FORMAT;
+
+typedef enum _XGL_PROVOKING_VERTEX_CONVENTION
+{
+ XGL_PROVOKING_VERTEX_FIRST = 0,
+ XGL_PROVOKING_VERTEX_LAST = 1,
+
+ XGL_PROVOKING_VERTEX_BEGIN_RANGE = XGL_PROVOKING_VERTEX_FIRST,
+ XGL_PROVOKING_VERTEX_END_RANGE = XGL_PROVOKING_VERTEX_LAST,
+ XGL_NUM_PROVOKING_VERTEX_CONVENTIONS = (XGL_PROVOKING_VERTEX_END_RANGE - XGL_PROVOKING_VERTEX_BEGIN_RANGE + 1),
+ XGL_MAX_ENUM(_XGL_PROVOKING_VERTEX_CONVENTION)
+} XGL_PROVOKING_VERTEX_CONVENTION;
+
+typedef struct _XGL_FORMAT
+{
+ XGL_CHANNEL_FORMAT channelFormat;
+ XGL_NUM_FORMAT numericFormat;
+} XGL_FORMAT;
+
+// Shader stage enumerant
+typedef enum _XGL_PIPELINE_SHADER_STAGE
+{
+ XGL_SHADER_STAGE_VERTEX = 0,
+ XGL_SHADER_STAGE_TESS_CONTROL = 1,
+ XGL_SHADER_STAGE_TESS_EVALUATION = 2,
+ XGL_SHADER_STAGE_GEOMETRY = 3,
+ XGL_SHADER_STAGE_FRAGMENT = 4,
+ XGL_SHADER_STAGE_COMPUTE = 5,
+ XGL_MAX_ENUM(_XGL_PIPELINE_SHADER_STAGE)
+} XGL_PIPELINE_SHADER_STAGE;
+
+// Structure type enumerant
+typedef enum _XGL_STRUCTURE_TYPE
+{
+ XGL_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ XGL_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 1,
+ XGL_STRUCTURE_TYPE_MEMORY_ALLOC_INFO = 2,
+ XGL_STRUCTURE_TYPE_MEMORY_OPEN_INFO = 4,
+ XGL_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO = 5,
+ XGL_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO = 6,
+ XGL_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO = 7,
+ XGL_STRUCTURE_TYPE_MEMORY_STATE_TRANSITION = 8,
+ XGL_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 9,
+ XGL_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO = 10,
+ XGL_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO = 11,
+ XGL_STRUCTURE_TYPE_SHADER_CREATE_INFO = 12,
+ XGL_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 13,
+ XGL_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 14,
+ XGL_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO = 15,
+ XGL_STRUCTURE_TYPE_RASTER_STATE_CREATE_INFO = 16,
+ XGL_STRUCTURE_TYPE_MSAA_STATE_CREATE_INFO = 17,
+ XGL_STRUCTURE_TYPE_COLOR_BLEND_STATE_CREATE_INFO = 18,
+ XGL_STRUCTURE_TYPE_DEPTH_STENCIL_STATE_CREATE_INFO = 19,
+ XGL_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO = 20,
+ XGL_STRUCTURE_TYPE_EVENT_CREATE_INFO = 21,
+ XGL_STRUCTURE_TYPE_FENCE_CREATE_INFO = 22,
+ XGL_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 23,
+ XGL_STRUCTURE_TYPE_SEMAPHORE_OPEN_INFO = 24,
+ XGL_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 25,
+ XGL_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 26,
+ XGL_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 27,
+ XGL_STRUCTURE_TYPE_IA_STATE_CREATE_INFO = 28,
+ XGL_STRUCTURE_TYPE_PIPELINE_DB_STATE_CREATE_INFO = 29,
+ XGL_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO = 30,
+ XGL_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO = 31,
+ XGL_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO = 32,
+ XGL_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 33,
+ XGL_MAX_ENUM(_XGL_STRUCTURE_TYPE)
+} XGL_STRUCTURE_TYPE;
+
+// ------------------------------------------------------------------------------------------------
+// Flags
+
+// Device creation flags
+typedef enum _XGL_DEVICE_CREATE_FLAGS
+{
+ XGL_DEVICE_CREATE_VALIDATION_BIT = 0x00000001,
+ XGL_DEVICE_CREATE_MGPU_IQ_MATCH_BIT = 0x00000002,
+} XGL_DEVICE_CREATE_FLAGS;
+
+// Queue capabilities
+typedef enum _XGL_QUEUE_FLAGS
+{
+ XGL_QUEUE_GRAPHICS_BIT = 0x00000001, // Queue supports graphics operations
+ XGL_QUEUE_COMPUTE_BIT = 0x00000002, // Queue supports compute operations
+ XGL_QUEUE_DMA_BIT = 0x00000004, // Queue supports DMA operations
+ XGL_QUEUE_EXTENDED_BIT = 0x80000000 // Extended queue
+} XGL_QUEUE_FLAGS;
+
+// Memory heap properties
+typedef enum _XGL_MEMORY_HEAP_FLAGS
+{
+ XGL_MEMORY_HEAP_CPU_VISIBLE_BIT = 0x00000001,
+ XGL_MEMORY_HEAP_CPU_GPU_COHERENT_BIT = 0x00000002,
+ XGL_MEMORY_HEAP_CPU_UNCACHED_BIT = 0x00000004,
+ XGL_MEMORY_HEAP_CPU_WRITE_COMBINED_BIT = 0x00000008,
+ XGL_MEMORY_HEAP_HOLDS_PINNED_BIT = 0x00000010,
+ XGL_MEMORY_HEAP_SHAREABLE_BIT = 0x00000020,
+} XGL_MEMORY_HEAP_FLAGS;
+
+// Memory allocation flags
+typedef enum _XGL_MEMORY_ALLOC_FLAGS
+{
+ XGL_MEMORY_ALLOC_VIRTUAL_BIT = 0x00000001,
+ XGL_MEMORY_ALLOC_SHAREABLE_BIT = 0x00000002,
+} XGL_MEMORY_ALLOC_FLAGS;
+
+// Image usage flags
+typedef enum _XGL_IMAGE_USAGE_FLAGS
+{
+ XGL_IMAGE_USAGE_SHADER_ACCESS_READ_BIT = 0x00000001,
+ XGL_IMAGE_USAGE_SHADER_ACCESS_WRITE_BIT = 0x00000002,
+ XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000004,
+ XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT = 0x00000008,
+} XGL_IMAGE_USAGE_FLAGS;
+
+// Image flags
+typedef enum _XGL_IMAGE_CREATE_FLAGS
+{
+ XGL_IMAGE_CREATE_INVARIANT_DATA_BIT = 0x00000001,
+ XGL_IMAGE_CREATE_CLONEABLE_BIT = 0x00000002,
+ XGL_IMAGE_CREATE_SHAREABLE_BIT = 0x00000004,
+} XGL_IMAGE_CREATE_FLAGS;
+
+// Depth-stencil view creation flags
+typedef enum _XGL_DEPTH_STENCIL_VIEW_CREATE_FLAGS
+{
+ XGL_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_DEPTH_BIT = 0x00000001,
+ XGL_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_STENCIL_BIT = 0x00000002,
+} XGL_DEPTH_STENCIL_VIEW_CREATE_FLAGS;
+
+// Pipeline creation flags
+typedef enum _XGL_PIPELINE_CREATE_FLAGS
+{
+ XGL_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+} XGL_PIPELINE_CREATE_FLAGS;
+
+// Semaphore creation flags
+typedef enum _XGL_SEMAPHORE_CREATE_FLAGS
+{
+ XGL_SEMAPHORE_CREATE_SHAREABLE_BIT = 0x00000001,
+} XGL_SEMAPHORE_CREATE_FLAGS;
+
+// Memory reference flags
+typedef enum _XGL_MEMORY_REF_FLAGS
+{
+ XGL_MEMORY_REF_READ_ONLY_BIT = 0x00000001,
+} XGL_MEMORY_REF_FLAGS;
+
+// Format capability flags
+typedef enum _XGL_FORMAT_FEATURE_FLAGS
+{
+ XGL_FORMAT_IMAGE_SHADER_READ_BIT = 0x00000001,
+ XGL_FORMAT_IMAGE_SHADER_WRITE_BIT = 0x00000002,
+ XGL_FORMAT_IMAGE_COPY_BIT = 0x00000004,
+ XGL_FORMAT_MEMORY_SHADER_ACCESS_BIT = 0x00000008,
+ XGL_FORMAT_COLOR_ATTACHMENT_WRITE_BIT = 0x00000010,
+ XGL_FORMAT_COLOR_ATTACHMENT_BLEND_BIT = 0x00000020,
+ XGL_FORMAT_DEPTH_ATTACHMENT_BIT = 0x00000040,
+ XGL_FORMAT_STENCIL_ATTACHMENT_BIT = 0x00000080,
+ XGL_FORMAT_MSAA_ATTACHMENT_BIT = 0x00000100,
+ XGL_FORMAT_CONVERSION_BIT = 0x00000200,
+} XGL_FORMAT_FEATURE_FLAGS;
+
+// Query flags
+typedef enum _XGL_QUERY_CONTROL_FLAGS
+{
+ XGL_QUERY_IMPRECISE_DATA_BIT = 0x00000001,
+} XGL_QUERY_CONTROL_FLAGS;
+
+// GPU compatibility flags
+typedef enum _XGL_GPU_COMPATIBILITY_FLAGS
+{
+ XGL_GPU_COMPAT_ASIC_FEATURES_BIT = 0x00000001,
+ XGL_GPU_COMPAT_IQ_MATCH_BIT = 0x00000002,
+ XGL_GPU_COMPAT_PEER_TRANSFER_BIT = 0x00000004,
+ XGL_GPU_COMPAT_SHARED_MEMORY_BIT = 0x00000008,
+ XGL_GPU_COMPAT_SHARED_SYNC_BIT = 0x00000010,
+ XGL_GPU_COMPAT_SHARED_GPU0_DISPLAY_BIT = 0x00000020,
+ XGL_GPU_COMPAT_SHARED_GPU1_DISPLAY_BIT = 0x00000040,
+} XGL_GPU_COMPATIBILITY_FLAGS;
+
+// Command buffer building flags
+typedef enum _XGL_CMD_BUFFER_BUILD_FLAGS
+{
+ XGL_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT = 0x00000001,
+ XGL_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT = 0x00000002,
+ XGL_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT = 0x00000004,
+ XGL_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT = 0x00000008,
+} XGL_CMD_BUFFER_BUILD_FLAGS;
+
+// ------------------------------------------------------------------------------------------------
+// XGL structures
+
+typedef struct _XGL_OFFSET2D
+{
+ XGL_INT x;
+ XGL_INT y;
+} XGL_OFFSET2D;
+
+typedef struct _XGL_OFFSET3D
+{
+ XGL_INT x;
+ XGL_INT y;
+ XGL_INT z;
+} XGL_OFFSET3D;
+
+typedef struct _XGL_EXTENT2D
+{
+ XGL_INT width;
+ XGL_INT height;
+} XGL_EXTENT2D;
+
+typedef struct _XGL_EXTENT3D
+{
+ XGL_INT width;
+ XGL_INT height;
+ XGL_INT depth;
+} XGL_EXTENT3D;
+
+typedef struct _XGL_VIEWPORT
+{
+ XGL_FLOAT originX;
+ XGL_FLOAT originY;
+ XGL_FLOAT width;
+ XGL_FLOAT height;
+ XGL_FLOAT minDepth;
+ XGL_FLOAT maxDepth;
+} XGL_VIEWPORT;
+
+typedef struct _XGL_RECT
+{
+ XGL_OFFSET2D offset;
+ XGL_EXTENT2D extent;
+} XGL_RECT;
+
+typedef struct _XGL_PHYSICAL_GPU_PROPERTIES
+{
+ XGL_SIZE structSize;
+ XGL_UINT32 apiVersion;
+ XGL_UINT32 driverVersion;
+ XGL_UINT32 vendorId;
+ XGL_UINT32 deviceId;
+ XGL_PHYSICAL_GPU_TYPE gpuType;
+ XGL_CHAR gpuName[XGL_MAX_PHYSICAL_GPU_NAME];
+ XGL_UINT maxMemRefsPerSubmission;
+ XGL_GPU_SIZE virtualMemPageSize;
+ XGL_GPU_SIZE maxInlineMemoryUpdateSize;
+ XGL_UINT maxBoundDescriptorSets;
+ XGL_UINT maxThreadGroupSize;
+ XGL_UINT64 timestampFrequency;
+ XGL_BOOL multiColorAttachmentClears;
+} XGL_PHYSICAL_GPU_PROPERTIES;
+
+typedef struct _XGL_PHYSICAL_GPU_PERFORMANCE
+{
+ XGL_FLOAT maxGpuClock;
+ XGL_FLOAT aluPerClock;
+ XGL_FLOAT texPerClock;
+ XGL_FLOAT primsPerClock;
+ XGL_FLOAT pixelsPerClock;
+} XGL_PHYSICAL_GPU_PERFORMANCE;
+
+typedef struct _XGL_GPU_COMPATIBILITY_INFO
+{
+ XGL_FLAGS compatibilityFlags; // XGL_GPU_COMPATIBILITY_FLAGS
+} XGL_GPU_COMPATIBILITY_INFO;
+
+typedef struct _XGL_APPLICATION_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Type of structure. Should be XGL_STRUCTURE_TYPE_APPLICATION_INFO
+ XGL_VOID* pNext; // Next structure in chain
+ const XGL_CHAR* pAppName;
+ XGL_UINT32 appVersion;
+ const XGL_CHAR* pEngineName;
+ XGL_UINT32 engineVersion;
+ XGL_UINT32 apiVersion;
+} XGL_APPLICATION_INFO;
+
+typedef XGL_VOID* (XGLAPI *XGL_ALLOC_FUNCTION)(
+ XGL_VOID* pUserData,
+ XGL_SIZE size,
+ XGL_SIZE alignment,
+ XGL_SYSTEM_ALLOC_TYPE allocType);
+
+typedef XGL_VOID (XGLAPI *XGL_FREE_FUNCTION)(
+ XGL_VOID* pUserData,
+ XGL_VOID* pMem);
+
+typedef struct _XGL_ALLOC_CALLBACKS
+{
+ XGL_VOID* pUserData;
+ XGL_ALLOC_FUNCTION pfnAlloc;
+ XGL_FREE_FUNCTION pfnFree;
+} XGL_ALLOC_CALLBACKS;
+
+typedef struct _XGL_DEVICE_QUEUE_CREATE_INFO
+{
+ XGL_UINT queueNodeIndex;
+ XGL_UINT queueCount;
+} XGL_DEVICE_QUEUE_CREATE_INFO;
+
+typedef struct _XGL_DEVICE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Should be XGL_STRUCTURE_TYPE_DEVICE_CREATE_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_UINT queueRecordCount;
+ const XGL_DEVICE_QUEUE_CREATE_INFO* pRequestedQueues;
+ XGL_UINT extensionCount;
+ const XGL_CHAR*const* ppEnabledExtensionNames;
+ XGL_VALIDATION_LEVEL maxValidationLevel;
+ XGL_FLAGS flags; // XGL_DEVICE_CREATE_FLAGS
+} XGL_DEVICE_CREATE_INFO;
+
+typedef struct _XGL_PHYSICAL_GPU_QUEUE_PROPERTIES
+{
+ XGL_SIZE structSize; // Size of structure in bytes
+ XGL_FLAGS queueFlags; // XGL_QUEUE_FLAGS
+ XGL_UINT queueCount;
+ XGL_UINT maxAtomicCounters;
+ XGL_BOOL supportsTimestamps;
+} XGL_PHYSICAL_GPU_QUEUE_PROPERTIES;
+
+typedef struct _XGL_PHYSICAL_GPU_MEMORY_PROPERTIES
+{
+ XGL_SIZE structSize; // Size of structure in bytes
+ XGL_BOOL supportsMigration;
+ XGL_BOOL supportsVirtualMemoryRemapping;
+ XGL_BOOL supportsPinning;
+} XGL_PHYSICAL_GPU_MEMORY_PROPERTIES;
+
+typedef struct _XGL_MEMORY_HEAP_PROPERTIES
+{
+ XGL_SIZE structSize; // Size of structure in bytes
+ XGL_HEAP_MEMORY_TYPE heapMemoryType; // XGL_HEAP_MEMORY_TYPE
+ XGL_GPU_SIZE heapSize; // Specified in bytes
+ XGL_GPU_SIZE pageSize; // Specified in bytes
+ XGL_FLAGS flags; // XGL_MEMORY_HEAP_FLAGS
+ XGL_FLOAT gpuReadPerfRating;
+ XGL_FLOAT gpuWritePerfRating;
+ XGL_FLOAT cpuReadPerfRating;
+ XGL_FLOAT cpuWritePerfRating;
+} XGL_MEMORY_HEAP_PROPERTIES;
+
+typedef struct _XGL_MEMORY_ALLOC_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_GPU_SIZE allocationSize; // Size of memory allocation
+ XGL_GPU_SIZE alignment;
+ XGL_FLAGS flags; // XGL_MEMORY_ALLOC_FLAGS
+ XGL_UINT heapCount;
+ XGL_UINT heaps[XGL_MAX_MEMORY_HEAPS];
+ XGL_MEMORY_PRIORITY memPriority;
+} XGL_MEMORY_ALLOC_INFO;
+
+typedef struct _XGL_MEMORY_OPEN_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_MEMORY_OPEN_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_GPU_MEMORY sharedMem;
+} XGL_MEMORY_OPEN_INFO;
+
+typedef struct _XGL_PEER_MEMORY_OPEN_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_GPU_MEMORY originalMem;
+} XGL_PEER_MEMORY_OPEN_INFO;
+
+typedef struct _XGL_VIRTUAL_MEMORY_REMAP_RANGE
+{
+ XGL_GPU_MEMORY virtualMem;
+ XGL_GPU_SIZE virtualStartPage;
+ XGL_GPU_MEMORY realMem;
+ XGL_GPU_SIZE realStartPage;
+ XGL_GPU_SIZE pageCount;
+} XGL_VIRTUAL_MEMORY_REMAP_RANGE;
+
+typedef struct _XGL_MEMORY_REQUIREMENTS
+{
+ XGL_GPU_SIZE size; // Specified in bytes
+ XGL_GPU_SIZE alignment; // Specified in bytes
+ XGL_UINT heapCount;
+ XGL_UINT heaps[XGL_MAX_MEMORY_HEAPS];
+} XGL_MEMORY_REQUIREMENTS;
+
+typedef struct _XGL_FORMAT_PROPERTIES
+{
+ XGL_FLAGS linearTilingFeatures; // XGL_FORMAT_FEATURE_FLAGS
+ XGL_FLAGS optimalTilingFeatures; // XGL_FORMAT_FEATURE_FLAGS
+} XGL_FORMAT_PROPERTIES;
+
+typedef struct _XGL_MEMORY_VIEW_ATTACH_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_GPU_MEMORY mem;
+ XGL_GPU_SIZE offset;
+ XGL_GPU_SIZE range;
+ XGL_GPU_SIZE stride;
+ XGL_FORMAT format;
+ XGL_MEMORY_STATE state;
+} XGL_MEMORY_VIEW_ATTACH_INFO;
+
+typedef struct _XGL_IMAGE_VIEW_ATTACH_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_IMAGE_VIEW view;
+ XGL_IMAGE_STATE state;
+} XGL_IMAGE_VIEW_ATTACH_INFO;
+
+typedef struct _XGL_MEMORY_STATE_TRANSITION
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_MEMORY_STATE_TRANSITION
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_GPU_MEMORY mem;
+ XGL_MEMORY_STATE oldState;
+ XGL_MEMORY_STATE newState;
+ XGL_GPU_SIZE offset;
+ XGL_GPU_SIZE regionSize;
+} XGL_MEMORY_STATE_TRANSITION;
+
+typedef struct _XGL_IMAGE_SUBRESOURCE
+{
+ XGL_IMAGE_ASPECT aspect;
+ XGL_UINT mipLevel;
+ XGL_UINT arraySlice;
+} XGL_IMAGE_SUBRESOURCE;
+
+typedef struct _XGL_IMAGE_SUBRESOURCE_RANGE
+{
+ XGL_IMAGE_ASPECT aspect;
+ XGL_UINT baseMipLevel;
+ XGL_UINT mipLevels;
+ XGL_UINT baseArraySlice;
+ XGL_UINT arraySize;
+} XGL_IMAGE_SUBRESOURCE_RANGE;
+
+typedef struct _XGL_IMAGE_STATE_TRANSITION
+{
+ XGL_IMAGE image;
+ XGL_IMAGE_STATE oldState;
+ XGL_IMAGE_STATE newState;
+ XGL_IMAGE_SUBRESOURCE_RANGE subresourceRange;
+} XGL_IMAGE_STATE_TRANSITION;
+
+typedef struct _XGL_IMAGE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_IMAGE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure.
+ XGL_IMAGE_TYPE imageType;
+ XGL_FORMAT format;
+ XGL_EXTENT3D extent;
+ XGL_UINT mipLevels;
+ XGL_UINT arraySize;
+ XGL_UINT samples;
+ XGL_IMAGE_TILING tiling;
+ XGL_FLAGS usage; // XGL_IMAGE_USAGE_FLAGS
+ XGL_FLAGS flags; // XGL_IMAGE_CREATE_FLAGS
+} XGL_IMAGE_CREATE_INFO;
+
+typedef struct _XGL_PEER_IMAGE_OPEN_INFO
+{
+ XGL_IMAGE originalImage;
+} XGL_PEER_IMAGE_OPEN_INFO;
+
+typedef struct _XGL_SUBRESOURCE_LAYOUT
+{
+ XGL_GPU_SIZE offset; // Specified in bytes
+ XGL_GPU_SIZE size; // Specified in bytes
+ XGL_GPU_SIZE rowPitch; // Specified in bytes
+ XGL_GPU_SIZE depthPitch; // Specified in bytes
+} XGL_SUBRESOURCE_LAYOUT;
+
+typedef struct _XGL_CHANNEL_MAPPING
+{
+ XGL_CHANNEL_SWIZZLE r;
+ XGL_CHANNEL_SWIZZLE g;
+ XGL_CHANNEL_SWIZZLE b;
+ XGL_CHANNEL_SWIZZLE a;
+} XGL_CHANNEL_MAPPING;
+
+typedef struct _XGL_IMAGE_VIEW_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_IMAGE image;
+ XGL_IMAGE_VIEW_TYPE viewType;
+ XGL_FORMAT format;
+ XGL_CHANNEL_MAPPING channels;
+ XGL_IMAGE_SUBRESOURCE_RANGE subresourceRange;
+ XGL_FLOAT minLod;
+} XGL_IMAGE_VIEW_CREATE_INFO;
+
+typedef struct _XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO
+ XGL_VOID* pNext; // Pointer to next structure
+ XGL_IMAGE image;
+ XGL_FORMAT format;
+ XGL_UINT mipLevel;
+ XGL_UINT baseArraySlice;
+ XGL_UINT arraySize;
+} XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO;
+
+typedef struct _XGL_DEPTH_STENCIL_VIEW_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_IMAGE image;
+ XGL_UINT mipLevel;
+ XGL_UINT baseArraySlice;
+ XGL_UINT arraySize;
+ XGL_FLAGS flags; // XGL_DEPTH_STENCIL_VIEW_CREATE_FLAGS
+} XGL_DEPTH_STENCIL_VIEW_CREATE_INFO;
+
+typedef struct _XGL_COLOR_ATTACHMENT_BIND_INFO
+{
+ XGL_COLOR_ATTACHMENT_VIEW view;
+ XGL_IMAGE_STATE colorAttachmentState; // XGL_IMAGE_STATE
+} XGL_COLOR_ATTACHMENT_BIND_INFO;
+
+typedef struct _XGL_DEPTH_STENCIL_BIND_INFO
+{
+ XGL_DEPTH_STENCIL_VIEW view;
+ XGL_IMAGE_STATE depthState; // XGL_IMAGE_STATE
+ XGL_IMAGE_STATE stencilState; // XGL_IMAGE_STATE
+} XGL_DEPTH_STENCIL_BIND_INFO;
+
+typedef struct _XGL_MEMORY_COPY
+{
+ XGL_GPU_SIZE srcOffset; // Specified in bytes
+ XGL_GPU_SIZE destOffset; // Specified in bytes
+ XGL_GPU_SIZE copySize; // Specified in bytes
+} XGL_MEMORY_COPY;
+
+typedef struct _XGL_IMAGE_COPY
+{
+ XGL_IMAGE_SUBRESOURCE srcSubresource;
+ XGL_OFFSET3D srcOffset;
+ XGL_IMAGE_SUBRESOURCE destSubresource;
+ XGL_OFFSET3D destOffset;
+ XGL_EXTENT3D extent;
+} XGL_IMAGE_COPY;
+
+typedef struct _XGL_MEMORY_IMAGE_COPY
+{
+ XGL_GPU_SIZE memOffset; // Specified in bytes
+ XGL_IMAGE_SUBRESOURCE imageSubresource;
+ XGL_OFFSET3D imageOffset;
+ XGL_EXTENT3D imageExtent;
+} XGL_MEMORY_IMAGE_COPY;
+
+typedef struct _XGL_IMAGE_RESOLVE
+{
+ XGL_IMAGE_SUBRESOURCE srcSubresource;
+ XGL_OFFSET2D srcOffset;
+ XGL_IMAGE_SUBRESOURCE destSubresource;
+ XGL_OFFSET2D destOffset;
+ XGL_EXTENT2D extent;
+} XGL_IMAGE_RESOLVE;
+
+typedef struct _XGL_SHADER_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_SHADER_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_SIZE codeSize; // Specified in bytes
+ const XGL_VOID* pCode;
+ XGL_FLAGS flags; // Reserved
+} XGL_SHADER_CREATE_INFO;
+
+struct _XGL_DESCRIPTOR_SET_MAPPING;
+
+typedef struct _XGL_DESCRIPTOR_SLOT_INFO
+{
+ XGL_DESCRIPTOR_SET_SLOT_TYPE slotObjectType;
+ union
+ {
+ XGL_UINT shaderEntityIndex;// Shader IL slot index for given entity type
+ const struct _XGL_DESCRIPTOR_SET_MAPPING* pNextLevelSet; // Pointer to next descriptor set level
+ };
+} XGL_DESCRIPTOR_SLOT_INFO;
+
+typedef struct _XGL_DESCRIPTOR_SET_MAPPING
+{
+ XGL_UINT descriptorCount;
+ const XGL_DESCRIPTOR_SLOT_INFO* pDescriptorInfo;
+} XGL_DESCRIPTOR_SET_MAPPING;
+
+typedef struct _XGL_LINK_CONST_BUFFER
+{
+ XGL_UINT bufferId;
+ XGL_SIZE bufferSize;
+ const XGL_VOID* pBufferData;
+} XGL_LINK_CONST_BUFFER;
+
+typedef struct _XGL_DYNAMIC_MEMORY_VIEW_SLOT_INFO
+{
+ XGL_DESCRIPTOR_SET_SLOT_TYPE slotObjectType;
+ XGL_UINT shaderEntityIndex;
+} XGL_DYNAMIC_MEMORY_VIEW_SLOT_INFO;
+
+typedef struct _XGL_PIPELINE_SHADER
+{
+ XGL_PIPELINE_SHADER_STAGE stage;
+ XGL_SHADER shader;
+ XGL_DESCRIPTOR_SET_MAPPING descriptorSetMapping[XGL_MAX_DESCRIPTOR_SETS];
+ XGL_UINT linkConstBufferCount;
+ const XGL_LINK_CONST_BUFFER* pLinkConstBufferInfo;
+ XGL_DYNAMIC_MEMORY_VIEW_SLOT_INFO dynamicMemoryViewMapping;
+} XGL_PIPELINE_SHADER;
+
+typedef struct _XGL_COMPUTE_PIPELINE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_PIPELINE_SHADER cs;
+ XGL_FLAGS flags; // XGL_PIPELINE_CREATE_FLAGS
+} XGL_COMPUTE_PIPELINE_CREATE_INFO;
+
+typedef struct _XGL_PIPELINE_IA_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_PRIMITIVE_TOPOLOGY topology;
+ XGL_BOOL disableVertexReuse;
+ XGL_PROVOKING_VERTEX_CONVENTION provokingVertex;
+ XGL_BOOL primitiveRestartEnable;
+ XGL_UINT32 primitiveRestartIndex;
+} XGL_PIPELINE_IA_STATE_CREATE_INFO;
+
+typedef struct _XGL_PIPELINE_TESS_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_UINT patchControlPoints;
+ XGL_FLOAT optimalTessFactor;
+ XGL_FLOAT fixedTessFactor;
+} XGL_PIPELINE_TESS_STATE_CREATE_INFO;
+
+typedef struct _XGL_PIPELINE_RS_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_BOOL depthClipEnable;
+ XGL_BOOL rasterizerDiscardEnable;
+ XGL_FLOAT pointSize; // Size of points
+} XGL_PIPELINE_RS_STATE_CREATE_INFO;
+
+typedef struct _XGL_PIPELINE_CB_ATTACHMENT_STATE
+{
+ XGL_BOOL blendEnable;
+ XGL_FORMAT format;
+ XGL_UINT8 channelWriteMask;
+} XGL_PIPELINE_CB_ATTACHMENT_STATE;
+
+typedef struct _XGL_PIPELINE_CB_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_BOOL alphaToCoverageEnable;
+ XGL_BOOL dualSourceBlendEnable;
+ XGL_LOGIC_OP logicOp;
+ XGL_PIPELINE_CB_ATTACHMENT_STATE attachment[XGL_MAX_COLOR_ATTACHMENTS];
+} XGL_PIPELINE_CB_STATE;
+
+typedef struct _XGL_PIPELINE_DB_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_DB_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_FORMAT format;
+} XGL_PIPELINE_DB_STATE_CREATE_INFO;
+
+typedef struct _XGL_PIPELINE_SHADER_STAGE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_PIPELINE_SHADER shader;
+} XGL_PIPELINE_SHADER_STAGE_CREATE_INFO;
+
+typedef struct _XGL_GRAPHICS_PIPELINE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_FLAGS flags; // XGL_PIPELINE_CREATE_FLAGS
+} XGL_GRAPHICS_PIPELINE_CREATE_INFO;
+
+typedef struct _XGL_SAMPLER_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_TEX_FILTER magFilter; // Filter mode for magnification
+ XGL_TEX_FILTER minFilter; // Filter mode for minifiation
+ XGL_TEX_MIPMAP_MODE mipMode; // Mipmap selection mode
+ XGL_TEX_ADDRESS addressU;
+ XGL_TEX_ADDRESS addressV;
+ XGL_TEX_ADDRESS addressW;
+ XGL_FLOAT mipLodBias;
+ XGL_UINT maxAnisotropy;
+ XGL_COMPARE_FUNC compareFunc;
+ XGL_FLOAT minLod;
+ XGL_FLOAT maxLod;
+ XGL_BORDER_COLOR_TYPE borderColorType;
+} XGL_SAMPLER_CREATE_INFO;
+
+typedef struct _XGL_DESCRIPTOR_SET_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_UINT slots;
+} XGL_DESCRIPTOR_SET_CREATE_INFO;
+
+typedef struct _XGL_DESCRIPTOR_SET_ATTACH_INFO
+{
+ XGL_DESCRIPTOR_SET descriptorSet;
+ XGL_UINT slotOffset;
+} XGL_DESCRIPTOR_SET_ATTACH_INFO;
+
+typedef struct _XGL_VIEWPORT_STATE_CREATE_INFO
+{
+ XGL_UINT viewportCount;
+ XGL_BOOL scissorEnable;
+ XGL_VIEWPORT viewports[XGL_MAX_VIEWPORTS];
+ XGL_RECT scissors[XGL_MAX_VIEWPORTS];
+} XGL_VIEWPORT_STATE_CREATE_INFO;
+
+typedef struct _XGL_RASTER_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_RASTER_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_FILL_MODE fillMode;
+ XGL_CULL_MODE cullMode;
+ XGL_FACE_ORIENTATION frontFace;
+ XGL_INT depthBias;
+ XGL_FLOAT depthBiasClamp;
+ XGL_FLOAT slopeScaledDepthBias;
+} XGL_RASTER_STATE_CREATE_INFO;
+
+typedef struct _XGL_MSAA_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_MSAA_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_UINT samples;
+ XGL_SAMPLE_MASK sampleMask;
+} XGL_MSAA_STATE_CREATE_INFO;
+
+typedef struct _XGL_COLOR_ATTACHMENT_BLEND_STATE
+{
+ XGL_BOOL blendEnable;
+ XGL_BLEND srcBlendColor;
+ XGL_BLEND destBlendColor;
+ XGL_BLEND_FUNC blendFuncColor;
+ XGL_BLEND srcBlendAlpha;
+ XGL_BLEND destBlendAlpha;
+ XGL_BLEND_FUNC blendFuncAlpha;
+} XGL_COLOR_ATTACHMENT_BLEND_STATE;
+
+typedef struct _XGL_COLOR_BLEND_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_COLOR_BLEND_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_COLOR_ATTACHMENT_BLEND_STATE attachment[XGL_MAX_COLOR_ATTACHMENTS];
+ XGL_FLOAT blendConst[4];
+} XGL_COLOR_BLEND_STATE_CREATE_INFO;
+
+typedef struct _XGL_STENCIL_OP_STATE
+{
+ XGL_STENCIL_OP stencilFailOp;
+ XGL_STENCIL_OP stencilPassOp;
+ XGL_STENCIL_OP stencilDepthFailOp;
+ XGL_COMPARE_FUNC stencilFunc;
+ XGL_UINT32 stencilRef;
+} XGL_STENCIL_OP_STATE;
+
+typedef struct _XGL_DEPTH_STENCIL_STATE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_DEPTH_STENCIL_STATE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_BOOL depthTestEnable;
+ XGL_BOOL depthWriteEnable;
+ XGL_COMPARE_FUNC depthFunc;
+ XGL_BOOL depthBoundsEnable;
+ XGL_FLOAT minDepth;
+ XGL_FLOAT maxDepth;
+ XGL_BOOL stencilTestEnable;
+ XGL_UINT32 stencilReadMask;
+ XGL_UINT32 stencilWriteMask;
+ XGL_STENCIL_OP_STATE front;
+ XGL_STENCIL_OP_STATE back;
+} XGL_DEPTH_STENCIL_STATE_CREATE_INFO;
+
+typedef struct _XGL_CMD_BUFFER_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO
+ const XGL_VOID* pNext;
+ XGL_QUEUE_TYPE queueType;
+ XGL_FLAGS flags;
+} XGL_CMD_BUFFER_CREATE_INFO;
+
+typedef struct _XGL_MEMORY_REF
+{
+ XGL_GPU_MEMORY mem;
+ XGL_FLAGS flags; // XGL_MEMORY_REF_FLAGS
+} XGL_MEMORY_REF;
+
+typedef struct _XGL_EVENT_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_EVENT_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_FLAGS flags; // Reserved
+} XGL_EVENT_CREATE_INFO;
+
+typedef struct _XGL_FENCE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_FENCE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_FLAGS flags; // Reserved
+} XGL_FENCE_CREATE_INFO;
+
+typedef struct _XGL_QUEUE_SEMAPHORE_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_UINT initialCount;
+ XGL_FLAGS flags; // XGL_SEMAPHORE_CREATE_FLAGS
+} XGL_QUEUE_SEMAPHORE_CREATE_INFO;
+
+typedef struct _XGL_QUEUE_SEMAPHORE_OPEN_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_SEMAPHORE_OPEN_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_QUEUE_SEMAPHORE sharedSemaphore;
+} XGL_QUEUE_SEMAPHORE_OPEN_INFO;
+
+typedef struct _XGL_PIPELINE_STATISTICS_DATA
+{
+ XGL_UINT64 fsInvocations; // Fragment shader invocations
+ XGL_UINT64 cPrimitives; // Clipper primitives
+ XGL_UINT64 cInvocations; // Clipper invocations
+ XGL_UINT64 vsInvocations; // Vertex shader invocations
+ XGL_UINT64 gsInvocations; // Geometry shader invocations
+ XGL_UINT64 gsPrimitives; // Geometry shader primitives
+ XGL_UINT64 iaPrimitives; // Input primitives
+ XGL_UINT64 iaVertices; // Input vertices
+ XGL_UINT64 tcsInvocations; // Tessellation control shader invocations
+ XGL_UINT64 tesInvocations; // Tessellation evaluation shader invocations
+ XGL_UINT64 csInvocations; // Compute shader invocations
+} XGL_PIPELINE_STATISTICS_DATA;
+
+typedef struct _XGL_QUERY_POOL_CREATE_INFO
+{
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_QUERY_TYPE queryType;
+ XGL_UINT slots;
+} XGL_QUERY_POOL_CREATE_INFO;
+
+typedef struct _XGL_DRAW_INDIRECT_CMD
+{
+ XGL_UINT32 vertexCount;
+ XGL_UINT32 instanceCount;
+ XGL_UINT32 firstVertex;
+ XGL_UINT32 firstInstance;
+} XGL_DRAW_INDIRECT_CMD;
+
+typedef struct _XGL_DRAW_INDEXED_INDIRECT_CMD
+{
+ XGL_UINT32 indexCount;
+ XGL_UINT32 instanceCount;
+ XGL_UINT32 firstIndex;
+ XGL_INT32 vertexOffset;
+ XGL_UINT32 firstInstance;
+} XGL_DRAW_INDEXED_INDIRECT_CMD;
+
+typedef struct _XGL_DISPATCH_INDIRECT_CMD
+{
+ XGL_UINT32 x;
+ XGL_UINT32 y;
+ XGL_UINT32 z;
+} XGL_DISPATCH_INDIRECT_CMD;
+
+// ------------------------------------------------------------------------------------------------
+// API functions
+
+// GPU initialization
+
+XGL_RESULT XGLAPI xglInitAndEnumerateGpus(
+ const XGL_APPLICATION_INFO* pAppInfo,
+ const XGL_ALLOC_CALLBACKS* pAllocCb,
+ XGL_UINT maxGpus,
+ XGL_UINT* pGpuCount,
+ XGL_PHYSICAL_GPU* pGpus);
+
+XGL_RESULT XGLAPI xglGetGpuInfo(
+ XGL_PHYSICAL_GPU gpu,
+ XGL_PHYSICAL_GPU_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Device functions
+
+XGL_RESULT XGLAPI xglCreateDevice(
+ XGL_PHYSICAL_GPU gpu,
+ const XGL_DEVICE_CREATE_INFO* pCreateInfo,
+ XGL_DEVICE* pDevice);
+
+XGL_RESULT XGLAPI xglDestroyDevice(
+ XGL_DEVICE device);
+
+// Extension discovery functions
+
+XGL_RESULT XGLAPI xglGetExtensionSupport(
+ XGL_PHYSICAL_GPU gpu,
+ const XGL_CHAR* pExtName);
+
+// Queue functions
+
+XGL_RESULT XGLAPI xglGetDeviceQueue(
+ XGL_DEVICE device,
+ XGL_QUEUE_TYPE queueType,
+ XGL_UINT queueIndex,
+ XGL_QUEUE* pQueue);
+
+XGL_RESULT XGLAPI xglQueueSubmit(
+ XGL_QUEUE queue,
+ XGL_UINT cmdBufferCount,
+ const XGL_CMD_BUFFER* pCmdBuffers,
+ XGL_UINT memRefCount,
+ const XGL_MEMORY_REF* pMemRefs,
+ XGL_FENCE fence);
+
+XGL_RESULT XGLAPI xglQueueSetGlobalMemReferences(
+ XGL_QUEUE queue,
+ XGL_UINT memRefCount,
+ const XGL_MEMORY_REF* pMemRefs);
+
+XGL_RESULT XGLAPI xglQueueWaitIdle(
+ XGL_QUEUE queue);
+
+XGL_RESULT XGLAPI xglDeviceWaitIdle(
+ XGL_DEVICE device);
+
+// Memory functions
+
+XGL_RESULT XGLAPI xglGetMemoryHeapCount(
+ XGL_DEVICE device,
+ XGL_UINT* pCount);
+
+XGL_RESULT XGLAPI xglGetMemoryHeapInfo(
+ XGL_DEVICE device,
+ XGL_UINT heapId,
+ XGL_MEMORY_HEAP_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+XGL_RESULT XGLAPI xglAllocMemory(
+ XGL_DEVICE device,
+ const XGL_MEMORY_ALLOC_INFO* pAllocInfo,
+ XGL_GPU_MEMORY* pMem);
+
+XGL_RESULT XGLAPI xglFreeMemory(
+ XGL_GPU_MEMORY mem);
+
+XGL_RESULT XGLAPI xglSetMemoryPriority(
+ XGL_GPU_MEMORY mem,
+ XGL_MEMORY_PRIORITY priority);
+
+XGL_RESULT XGLAPI xglMapMemory(
+ XGL_GPU_MEMORY mem,
+ XGL_FLAGS flags, // Reserved
+ XGL_VOID** ppData);
+
+XGL_RESULT XGLAPI xglUnmapMemory(
+ XGL_GPU_MEMORY mem);
+
+XGL_RESULT XGLAPI xglPinSystemMemory(
+ XGL_DEVICE device,
+ const XGL_VOID* pSysMem,
+ XGL_SIZE memSize,
+ XGL_GPU_MEMORY* pMem);
+
+XGL_RESULT XGLAPI xglRemapVirtualMemoryPages(
+ XGL_DEVICE device,
+ XGL_UINT rangeCount,
+ const XGL_VIRTUAL_MEMORY_REMAP_RANGE* pRanges,
+ XGL_UINT preWaitSemaphoreCount,
+ const XGL_QUEUE_SEMAPHORE* pPreWaitSemaphores,
+ XGL_UINT postSignalSemaphoreCount,
+ const XGL_QUEUE_SEMAPHORE* pPostSignalSemaphores);
+
+// Multi-device functions
+
+XGL_RESULT XGLAPI xglGetMultiGpuCompatibility(
+ XGL_PHYSICAL_GPU gpu0,
+ XGL_PHYSICAL_GPU gpu1,
+ XGL_GPU_COMPATIBILITY_INFO* pInfo);
+
+XGL_RESULT XGLAPI xglOpenSharedMemory(
+ XGL_DEVICE device,
+ const XGL_MEMORY_OPEN_INFO* pOpenInfo,
+ XGL_GPU_MEMORY* pMem);
+
+XGL_RESULT XGLAPI xglOpenSharedQueueSemaphore(
+ XGL_DEVICE device,
+ const XGL_QUEUE_SEMAPHORE_OPEN_INFO* pOpenInfo,
+ XGL_QUEUE_SEMAPHORE* pSemaphore);
+
+XGL_RESULT XGLAPI xglOpenPeerMemory(
+ XGL_DEVICE device,
+ const XGL_PEER_MEMORY_OPEN_INFO* pOpenInfo,
+ XGL_GPU_MEMORY* pMem);
+
+XGL_RESULT XGLAPI xglOpenPeerImage(
+ XGL_DEVICE device,
+ const XGL_PEER_IMAGE_OPEN_INFO* pOpenInfo,
+ XGL_IMAGE* pImage,
+ XGL_GPU_MEMORY* pMem);
+
+// Generic API object functions
+
+XGL_RESULT XGLAPI xglDestroyObject(
+ XGL_OBJECT object);
+
+XGL_RESULT XGLAPI xglGetObjectInfo(
+ XGL_BASE_OBJECT object,
+ XGL_OBJECT_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+XGL_RESULT XGLAPI xglBindObjectMemory(
+ XGL_OBJECT object,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset);
+
+// Fence functions
+
+XGL_RESULT XGLAPI xglCreateFence(
+ XGL_DEVICE device,
+ const XGL_FENCE_CREATE_INFO* pCreateInfo,
+ XGL_FENCE* pFence);
+
+XGL_RESULT XGLAPI xglGetFenceStatus(
+ XGL_FENCE fence);
+
+XGL_RESULT XGLAPI xglWaitForFences(
+ XGL_DEVICE device,
+ XGL_UINT fenceCount,
+ const XGL_FENCE* pFences,
+ XGL_BOOL waitAll,
+ XGL_UINT64 timeout);
+
+// Queue semaphore functions
+
+XGL_RESULT XGLAPI xglCreateQueueSemaphore(
+ XGL_DEVICE device,
+ const XGL_QUEUE_SEMAPHORE_CREATE_INFO* pCreateInfo,
+ XGL_QUEUE_SEMAPHORE* pSemaphore);
+
+XGL_RESULT XGLAPI xglSignalQueueSemaphore(
+ XGL_QUEUE queue,
+ XGL_QUEUE_SEMAPHORE semaphore);
+
+XGL_RESULT XGLAPI xglWaitQueueSemaphore(
+ XGL_QUEUE queue,
+ XGL_QUEUE_SEMAPHORE semaphore);
+
+// Event functions
+
+XGL_RESULT XGLAPI xglCreateEvent(
+ XGL_DEVICE device,
+ const XGL_EVENT_CREATE_INFO* pCreateInfo,
+ XGL_EVENT* pEvent);
+
+XGL_RESULT XGLAPI xglGetEventStatus(
+ XGL_EVENT event);
+
+XGL_RESULT XGLAPI xglSetEvent(
+ XGL_EVENT event);
+
+XGL_RESULT XGLAPI xglResetEvent(
+ XGL_EVENT event);
+
+// Query functions
+
+XGL_RESULT XGLAPI xglCreateQueryPool(
+ XGL_DEVICE device,
+ const XGL_QUERY_POOL_CREATE_INFO* pCreateInfo,
+ XGL_QUERY_POOL* pQueryPool);
+
+XGL_RESULT XGLAPI xglGetQueryPoolResults(
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT startQuery,
+ XGL_UINT queryCount,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Format capabilities
+
+XGL_RESULT XGLAPI xglGetFormatInfo(
+ XGL_DEVICE device,
+ XGL_FORMAT format,
+ XGL_FORMAT_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Image functions
+
+XGL_RESULT XGLAPI xglCreateImage(
+ XGL_DEVICE device,
+ const XGL_IMAGE_CREATE_INFO* pCreateInfo,
+ XGL_IMAGE* pImage);
+
+XGL_RESULT XGLAPI xglGetImageSubresourceInfo(
+ XGL_IMAGE image,
+ const XGL_IMAGE_SUBRESOURCE* pSubresource,
+ XGL_SUBRESOURCE_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Image view functions
+
+XGL_RESULT XGLAPI xglCreateImageView(
+ XGL_DEVICE device,
+ const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_IMAGE_VIEW* pView);
+
+XGL_RESULT XGLAPI xglCreateColorAttachmentView(
+ XGL_DEVICE device,
+ const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_COLOR_ATTACHMENT_VIEW* pView);
+
+XGL_RESULT XGLAPI xglCreateDepthStencilView(
+ XGL_DEVICE device,
+ const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_DEPTH_STENCIL_VIEW* pView);
+
+// Shader functions
+
+XGL_RESULT XGLAPI xglCreateShader(
+ XGL_DEVICE device,
+ const XGL_SHADER_CREATE_INFO* pCreateInfo,
+ XGL_SHADER* pShader);
+
+// Pipeline functions
+
+XGL_RESULT XGLAPI xglCreateGraphicsPipeline(
+ XGL_DEVICE device,
+ const XGL_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
+ XGL_PIPELINE* pPipeline);
+
+XGL_RESULT XGLAPI xglCreateComputePipeline(
+ XGL_DEVICE device,
+ const XGL_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo,
+ XGL_PIPELINE* pPipeline);
+
+XGL_RESULT XGLAPI xglStorePipeline(
+ XGL_PIPELINE pipeline,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+XGL_RESULT XGLAPI xglLoadPipeline(
+ XGL_DEVICE device,
+ XGL_SIZE dataSize,
+ const XGL_VOID* pData,
+ XGL_PIPELINE* pPipeline);
+
+XGL_RESULT XGLAPI xglCreatePipelineDelta(
+ XGL_DEVICE device,
+ XGL_PIPELINE p1,
+ XGL_PIPELINE p2,
+ XGL_PIPELINE_DELTA* delta);
+
+// Sampler functions
+
+XGL_RESULT XGLAPI xglCreateSampler(
+ XGL_DEVICE device,
+ const XGL_SAMPLER_CREATE_INFO* pCreateInfo,
+ XGL_SAMPLER* pSampler);
+
+// Descriptor set functions
+
+XGL_RESULT XGLAPI xglCreateDescriptorSet(
+ XGL_DEVICE device,
+ const XGL_DESCRIPTOR_SET_CREATE_INFO* pCreateInfo,
+ XGL_DESCRIPTOR_SET* pDescriptorSet);
+
+XGL_VOID XGLAPI xglBeginDescriptorSetUpdate(
+ XGL_DESCRIPTOR_SET descriptorSet);
+
+XGL_VOID XGLAPI xglEndDescriptorSetUpdate(
+ XGL_DESCRIPTOR_SET descriptorSet);
+
+XGL_VOID XGLAPI xglAttachSamplerDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_SAMPLER* pSamplers);
+
+XGL_VOID XGLAPI xglAttachImageViewDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_IMAGE_VIEW_ATTACH_INFO* pImageViews);
+
+XGL_VOID XGLAPI xglAttachMemoryViewDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemViews);
+
+XGL_VOID XGLAPI xglAttachNestedDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_DESCRIPTOR_SET_ATTACH_INFO* pNestedDescriptorSets);
+
+XGL_VOID XGLAPI xglClearDescriptorSetSlots(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount);
+
+// State object functions
+
+XGL_RESULT XGLAPI xglCreateViewportState(
+ XGL_DEVICE device,
+ const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
+ XGL_VIEWPORT_STATE_OBJECT* pState);
+
+XGL_RESULT XGLAPI xglCreateRasterState(
+ XGL_DEVICE device,
+ const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
+ XGL_RASTER_STATE_OBJECT* pState);
+
+XGL_RESULT XGLAPI xglCreateMsaaState(
+ XGL_DEVICE device,
+ const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
+ XGL_MSAA_STATE_OBJECT* pState);
+
+XGL_RESULT XGLAPI xglCreateColorBlendState(
+ XGL_DEVICE device,
+ const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
+ XGL_COLOR_BLEND_STATE_OBJECT* pState);
+
+XGL_RESULT XGLAPI xglCreateDepthStencilState(
+ XGL_DEVICE device,
+ const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
+ XGL_DEPTH_STENCIL_STATE_OBJECT* pState);
+
+// Command buffer functions
+
+XGL_RESULT XGLAPI xglCreateCommandBuffer(
+ XGL_DEVICE device,
+ const XGL_CMD_BUFFER_CREATE_INFO* pCreateInfo,
+ XGL_CMD_BUFFER* pCmdBuffer);
+
+XGL_RESULT XGLAPI xglBeginCommandBuffer(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_FLAGS flags); // XGL_CMD_BUFFER_BUILD_FLAGS
+
+XGL_RESULT XGLAPI xglEndCommandBuffer(
+ XGL_CMD_BUFFER cmdBuffer);
+
+XGL_RESULT XGLAPI xglResetCommandBuffer(
+ XGL_CMD_BUFFER cmdBuffer);
+
+// Command buffer building functions
+
+XGL_VOID XGLAPI xglCmdBindPipeline(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_PIPELINE pipeline);
+
+XGL_VOID XGLAPI xglCmdBindPipelineDelta(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_PIPELINE_DELTA delta);
+
+XGL_VOID XGLAPI xglCmdBindStateObject(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_STATE_BIND_POINT stateBindPoint,
+ XGL_STATE_OBJECT state);
+
+XGL_VOID XGLAPI xglCmdBindDescriptorSet(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT index,
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT slotOffset);
+
+XGL_VOID XGLAPI xglCmdBindDynamicMemoryView(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemView);
+
+XGL_VOID XGLAPI xglCmdBindIndexData(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_INDEX_TYPE indexType);
+
+XGL_VOID XGLAPI xglCmdBindAttachments(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT colorAttachmentCount,
+ const XGL_COLOR_ATTACHMENT_BIND_INFO* pColorAttachments,
+ const XGL_DEPTH_STENCIL_BIND_INFO* pDepthStencilAttachment);
+
+XGL_VOID XGLAPI xglCmdPrepareMemoryRegions(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT transitionCount,
+ const XGL_MEMORY_STATE_TRANSITION* pStateTransitions);
+
+XGL_VOID XGLAPI xglCmdPrepareImages(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT transitionCount,
+ const XGL_IMAGE_STATE_TRANSITION* pStateTransitions);
+
+XGL_VOID XGLAPI xglCmdDraw(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT firstVertex,
+ XGL_UINT vertexCount,
+ XGL_UINT firstInstance,
+ XGL_UINT instanceCount);
+
+XGL_VOID XGLAPI xglCmdDrawIndexed(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT firstIndex,
+ XGL_UINT indexCount,
+ XGL_INT vertexOffset,
+ XGL_UINT firstInstance,
+ XGL_UINT instanceCount);
+
+XGL_VOID XGLAPI xglCmdDrawIndirect(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_UINT32 count,
+ XGL_UINT32 stride);
+
+XGL_VOID XGLAPI xglCmdDrawIndexedIndirect(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_UINT32 count,
+ XGL_UINT32 stride);
+
+XGL_VOID XGLAPI xglCmdDispatch(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT x,
+ XGL_UINT y,
+ XGL_UINT z);
+
+XGL_VOID XGLAPI xglCmdDispatchIndirect(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset);
+
+XGL_VOID XGLAPI xglCmdCopyMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY srcMem,
+ XGL_GPU_MEMORY destMem,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_COPY* pRegions);
+
+XGL_VOID XGLAPI xglCmdCopyImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE destImage,
+ XGL_UINT regionCount,
+ const XGL_IMAGE_COPY* pRegions);
+
+XGL_VOID XGLAPI xglCmdCopyMemoryToImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY srcMem,
+ XGL_IMAGE destImage,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_IMAGE_COPY* pRegions);
+
+XGL_VOID XGLAPI xglCmdCopyImageToMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_GPU_MEMORY destMem,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_IMAGE_COPY* pRegions);
+
+XGL_VOID XGLAPI xglCmdCloneImageData(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE_STATE srcImageState,
+ XGL_IMAGE destImage,
+ XGL_IMAGE_STATE destImageState);
+
+XGL_VOID XGLAPI xglCmdUpdateMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_GPU_SIZE dataSize,
+ const XGL_UINT32* pData);
+
+XGL_VOID XGLAPI xglCmdFillMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_GPU_SIZE fillSize,
+ XGL_UINT32 data);
+
+XGL_VOID XGLAPI xglCmdClearColorImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ const XGL_FLOAT color[4],
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+XGL_VOID XGLAPI xglCmdClearColorImageRaw(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ const XGL_UINT32 color[4],
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+XGL_VOID XGLAPI xglCmdClearDepthStencil(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ XGL_FLOAT depth,
+ XGL_UINT32 stencil,
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+XGL_VOID XGLAPI xglCmdResolveImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE destImage,
+ XGL_UINT rectCount,
+ const XGL_IMAGE_RESOLVE* pRects);
+
+XGL_VOID XGLAPI xglCmdSetEvent(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_EVENT event);
+
+XGL_VOID XGLAPI xglCmdResetEvent(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_EVENT event);
+
+XGL_VOID XGLAPI xglCmdMemoryAtomic(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_UINT64 srcData,
+ XGL_ATOMIC_OP atomicOp);
+
+XGL_VOID XGLAPI xglCmdBeginQuery(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT slot,
+ XGL_FLAGS flags);
+
+XGL_VOID XGLAPI xglCmdEndQuery(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT slot);
+
+XGL_VOID XGLAPI xglCmdResetQueryPool(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT startQuery,
+ XGL_UINT queryCount);
+
+XGL_VOID XGLAPI xglCmdWriteTimestamp(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_TIMESTAMP_TYPE timestampType,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset);
+
+XGL_VOID XGLAPI xglCmdInitAtomicCounters(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ const XGL_UINT32* pData);
+
+XGL_VOID XGLAPI xglCmdLoadAtomicCounters(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ XGL_GPU_MEMORY srcMem,
+ XGL_GPU_SIZE srcOffset);
+
+XGL_VOID XGLAPI xglCmdSaveAtomicCounters(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // __XGL_H__
+
+/******************************************************************************************
+
+ Open Issues + Missing Features
+ ------------------------------
+
+ Here are a few higher level issues that we'd like to fix given time. A feature missing
+ from this header (or the following list) isn't necessarily an indication that we want
+ to drop that feature. Only that we either haven't thought of it or haven't had time
+ to add it yet.
+
+ 1) Transform Feedback (XFB)
+
+ OpenGL supports transform feedback (XFB). That is not included in this header, but
+ we feel there is likely value in including it.
+
+ To incorporate trasnform feedback, we could create a new pipeline stage. This would
+ be injected into a PSO by including the following in the chain:
+
+ typedef struct _XGL_XFB_CREATE_INFO
+ {
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_XFB_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ // More XFB state, if any goes here
+ } XGL_DEPTH_STENCIL_VIEW_CREATE_INFO;
+
+ We expect that only the shader-side configuration (via layout qualifiers or their IR
+ equivalent) is used to configure the data written to each stream. When transform
+ feedback is part of the pipeline, transform feedback binding would be available
+ through a new API bind point:
+
+ xglCmdBindTransformFeedbackMemoryView(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint, // = GRAPHICS
+ XGL_UINT index,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemView);
+
+ 2) Framebuffer Objects
+
+ The XGL API here doesn't have a direct equivalent for a framebuffer object. In GL,
+ the framebuffer object owns attachments, and the indirection table for glDrawBuffers, etc.
+ The indirection is gone - only identity is supported here.
+
+ We may introduce an analog to the framebuffer object that packages all color
+ attachments. You would create a framebuffer thus:
+
+ typedef struct _XGL_FRAMEBUFFER_CREATE_INFO
+ {
+ XGL_STRUCTURE_TYPE sType; // Must be XGL_STRUCTURE_TYPE_PIPELINE_XFB_CREATE_INFO
+ const XGL_VOID* pNext; // Pointer to next structure
+ XGL_UINT32 colorAttachmentCount;
+ XGL_COLOR_ATTACHMENT_BIND_INFO* pColorAttachments;
+ XGL_DEPTH_STENCIL_BIND_INFO pDepthStencilAttachment;
+ } XGL_FRAMEBUFFER_CREATE_INFO;
+
+ xglCreateFramebuffer(
+ XGL_DEVICE device,
+ const XGL_FRAMEBUFFER_CREATE_INFO* pInfo,
+ XGL_FRAMEBUFFER* pFramebuffer);
+
+ We then replace the xglCmdBindAttachments API with:
+
+ xglBindFramebuffer(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint, // = GRAPHICS
+ XGL_FRAMEBUFFER framebuffer);
+
+ 3) "Bindless" + support for non-bindless hardware.
+
+ XGL doesn't have bindless textures the way that GL does. It has resource descriptor
+ sets, or resource tables. Resource tables can be nested and hold references to more
+ resource tables. They are explicitly sized by the application and have no artificial
+ upper size limit. An application can still attach as many textures as they want to
+ a resource descriptor set, and can modify the set asynchronously to GPU work.
+ Therefore, we can still have "unlimited textures". An application hoping to use
+ bindless can use an index into a large table of textures and achieve the same effect.
+
+ For non-bindless hardware, with fixed (but potentially large) register files for
+ resource bindings, the table approach should still work if a limited size can be
+ reported somehow.
+
+ 4) Clean up some remaining Mantle'isms.
+
+ Queue types: It's a bit hand wavey. In Mantle, we have a "universal" queue type that
+ supports compute and graphics and a "compute" queue that only supports compute. Devices
+ must support at least one universal queue and DMA queues are an extension. I would like
+ to do the following (and have attempted to do that here, but am only half done):
+
+ a) Separate out the queue capabilities (compute, DMA, graphics) and allow support
+ for any number of queues with any combination of capabilities each.
+
+ b) Allow compute-only or even DMA-only (like video capture or SDI) devices to
+ be supported.
+
+ c) Allow new queue types to be supported by extensions without having to allocate
+ bits in the bitfield until they're promoted to core.
+
+ Terminology: There are still some references to "targets" (render targets) and other
+ terminology that has been changed from Mantle. Need to do a clean-up pass.
+
+ 4) The window system interface is an extension in Mantle. We have not tried to fold
+ any of it into core here. There is no mention of SwapBuffers, presentation, default
+ framebuffers or anything like that. In the extension, presentation is queued up into
+ the graphics queue just like any other command.
+
+*******************************************************************************************/
diff --git a/main/CMakeLists.txt b/main/CMakeLists.txt
new file mode 100644
index 0000000..67ee29f
--- /dev/null
+++ b/main/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Create the XGL library
+
+add_library(libxgl SHARED xglapi.c)
+
diff --git a/main/Makefile.am b/main/Makefile.am
new file mode 100644
index 0000000..38758a0
--- /dev/null
+++ b/main/Makefile.am
@@ -0,0 +1,131 @@
+# Copyright © 2012 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+AM_CFLAGS = \
+ -I$(top_srcdir)/include \
+ -I$(top_srcdir)/src/gbm/main \
+ $(DEFINES) \
+ $(VISIBILITY_CFLAGS) \
+ $(EGL_CFLAGS) \
+ -D_EGL_NATIVE_PLATFORM=$(EGL_NATIVE_PLATFORM) \
+ -D_EGL_DRIVER_SEARCH_DIR=\"$(EGL_DRIVER_INSTALL_DIR)\" \
+ -D_EGL_OS_UNIX=1
+
+lib_LTLIBRARIES = libEGL.la
+
+libEGL_la_SOURCES = \
+ eglapi.c \
+ eglapi.h \
+ eglarray.c \
+ eglarray.h \
+ eglcompiler.h \
+ eglconfig.c \
+ eglconfig.h \
+ eglcontext.c \
+ eglcontext.h \
+ eglcurrent.c \
+ eglcurrent.h \
+ egldefines.h \
+ egldisplay.c \
+ egldisplay.h \
+ egldriver.c \
+ egldriver.h \
+ eglfallbacks.c \
+ eglglobals.c \
+ eglglobals.h \
+ eglimage.c \
+ eglimage.h \
+ egllog.c \
+ egllog.h \
+ eglmisc.c \
+ eglmisc.h \
+ eglmode.c \
+ eglmode.h \
+ eglmutex.h \
+ eglscreen.c \
+ eglscreen.h \
+ eglstring.c \
+ eglstring.h \
+ eglsurface.c \
+ eglsurface.h \
+ eglsync.c \
+ eglsync.h \
+ egltypedefs.h
+
+libEGL_la_LIBADD = \
+ $(EGL_LIB_DEPS)
+libEGL_la_LDFLAGS = \
+ -no-undefined \
+ -version-number 1:0 \
+ $(BSYMBOLIC) \
+ $(GC_SECTIONS) \
+ $(LD_NO_UNDEFINED)
+
+if HAVE_EGL_PLATFORM_X11
+AM_CFLAGS += -DHAVE_X11_PLATFORM
+AM_CFLAGS += $(XCB_DRI2_CFLAGS)
+libEGL_la_LIBADD += $(XCB_DRI2_LIBS)
+endif
+
+if HAVE_EGL_PLATFORM_WAYLAND
+AM_CFLAGS += -DHAVE_WAYLAND_PLATFORM
+AM_CFLAGS += $(WAYLAND_CFLAGS)
+libEGL_la_LIBADD += $(WAYLAND_LIBS)
+libEGL_la_LIBADD += $(LIBDRM_LIBS)
+libEGL_la_LIBADD += ../wayland/wayland-drm/libwayland-drm.la
+endif
+
+if HAVE_EGL_PLATFORM_DRM
+AM_CFLAGS += -DHAVE_DRM_PLATFORM
+libEGL_la_LIBADD += ../../gbm/libgbm.la
+endif
+
+if HAVE_EGL_PLATFORM_FBDEV
+AM_CFLAGS += -DHAVE_FBDEV_PLATFORM
+endif
+
+if HAVE_EGL_PLATFORM_NULL
+AM_CFLAGS += -DHAVE_NULL_PLATFORM
+endif
+
+if HAVE_EGL_DRIVER_DRI2
+AM_CFLAGS += -D_EGL_BUILT_IN_DRIVER_DRI2
+AM_CFLAGS += -DHAVE_XCB_DRI2
+libEGL_la_LIBADD += ../drivers/dri2/libegl_dri2.la
+libEGL_la_LIBADD += $(DLOPEN_LIBS) $(LIBDRM_LIBS)
+endif
+
+include $(top_srcdir)/install-lib-links.mk
+
+pkgconfigdir = $(libdir)/pkgconfig
+
+pkgconfig_DATA = egl.pc
+
+khrdir = $(includedir)/KHR
+khr_HEADERS = $(top_srcdir)/include/KHR/khrplatform.h
+
+egldir = $(includedir)/EGL
+egl_HEADERS = \
+ $(top_srcdir)/include/EGL/eglext.h \
+ $(top_srcdir)/include/EGL/egl.h \
+ $(top_srcdir)/include/EGL/eglextchromium.h \
+ $(top_srcdir)/include/EGL/eglmesaext.h \
+ $(top_srcdir)/include/EGL/eglplatform.h
diff --git a/main/xglapi.c b/main/xglapi.c
new file mode 100644
index 0000000..01f3060
--- /dev/null
+++ b/main/xglapi.c
@@ -0,0 +1,854 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2012-2013 LunarG, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Courtney Goeltzenleuchter <courtney@lunarg.com>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "xglapi.h"
+
+// GPU initialization
+
+XGL_RESULT XGLAPI xglInitAndEnumerateGpus(
+ const XGL_APPLICATION_INFO* pAppInfo,
+ const XGL_ALLOC_CALLBACKS* pAllocCb,
+ XGL_UINT maxGpus,
+ XGL_UINT* pGpuCount,
+ XGL_PHYSICAL_GPU* pGpus)
+{
+}
+
+XGL_RESULT XGLAPI xglGetGpuInfo(
+ XGL_PHYSICAL_GPU gpu,
+ XGL_PHYSICAL_GPU_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+// Device functions
+
+XGL_RESULT XGLAPI xglCreateDevice(
+ XGL_PHYSICAL_GPU gpu,
+ const XGL_DEVICE_CREATE_INFO* pCreateInfo,
+ XGL_DEVICE* pDevice)
+{
+}
+
+XGL_RESULT XGLAPI xglDestroyDevice(
+ XGL_DEVICE device)
+{
+}
+
+// Extension discovery functions
+
+XGL_RESULT XGLAPI xglGetExtensionSupport(
+ XGL_PHYSICAL_GPU gpu,
+ const XGL_CHAR* pExtName)
+{
+}
+
+// Queue functions
+
+XGL_RESULT XGLAPI xglGetDeviceQueue(
+ XGL_DEVICE device,
+ XGL_QUEUE_TYPE queueType,
+ XGL_UINT queueIndex,
+ XGL_QUEUE* pQueue)
+{
+}
+
+XGL_RESULT XGLAPI xglQueueSubmit(
+ XGL_QUEUE queue,
+ XGL_UINT cmdBufferCount,
+ const XGL_CMD_BUFFER* pCmdBuffers,
+ XGL_UINT memRefCount,
+ const XGL_MEMORY_REF* pMemRefs,
+ XGL_FENCE fence)
+{
+}
+
+XGL_RESULT XGLAPI xglQueueSetGlobalMemReferences(
+ XGL_QUEUE queue,
+ XGL_UINT memRefCount,
+ const XGL_MEMORY_REF* pMemRefs)
+{
+}
+
+XGL_RESULT XGLAPI xglQueueWaitIdle(
+ XGL_QUEUE queue)
+{
+}
+
+XGL_RESULT XGLAPI xglDeviceWaitIdle(
+ XGL_DEVICE device)
+{
+}
+
+// Memory functions
+
+XGL_RESULT XGLAPI xglGetMemoryHeapCount(
+ XGL_DEVICE device,
+ XGL_UINT* pCount)
+{
+}
+
+XGL_RESULT XGLAPI xglGetMemoryHeapInfo(
+ XGL_DEVICE device,
+ XGL_UINT heapId,
+ XGL_MEMORY_HEAP_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+XGL_RESULT XGLAPI xglAllocMemory(
+ XGL_DEVICE device,
+ const XGL_MEMORY_ALLOC_INFO* pAllocInfo,
+ XGL_GPU_MEMORY* pMem)
+{
+}
+
+XGL_RESULT XGLAPI xglFreeMemory(
+ XGL_GPU_MEMORY mem)
+{
+}
+
+XGL_RESULT XGLAPI xglSetMemoryPriority(
+ XGL_GPU_MEMORY mem,
+ XGL_MEMORY_PRIORITY priority)
+{
+}
+
+XGL_RESULT XGLAPI xglMapMemory(
+ XGL_GPU_MEMORY mem,
+ XGL_FLAGS flags, // Reserved
+ XGL_VOID** ppData)
+{
+}
+
+XGL_RESULT XGLAPI xglUnmapMemory(
+ XGL_GPU_MEMORY mem)
+{
+}
+
+XGL_RESULT XGLAPI xglPinSystemMemory(
+ XGL_DEVICE device,
+ const XGL_VOID* pSysMem,
+ XGL_SIZE memSize,
+ XGL_GPU_MEMORY* pMem)
+{
+}
+
+XGL_RESULT XGLAPI xglRemapVirtualMemoryPages(
+ XGL_DEVICE device,
+ XGL_UINT rangeCount,
+ const XGL_VIRTUAL_MEMORY_REMAP_RANGE* pRanges,
+ XGL_UINT preWaitSemaphoreCount,
+ const XGL_QUEUE_SEMAPHORE* pPreWaitSemaphores,
+ XGL_UINT postSignalSemaphoreCount,
+ const XGL_QUEUE_SEMAPHORE* pPostSignalSemaphores)
+{
+}
+
+// Multi-device functions
+
+XGL_RESULT XGLAPI xglGetMultiGpuCompatibility(
+ XGL_PHYSICAL_GPU gpu0,
+ XGL_PHYSICAL_GPU gpu1,
+ XGL_GPU_COMPATIBILITY_INFO* pInfo)
+{
+}
+
+XGL_RESULT XGLAPI xglOpenSharedMemory(
+ XGL_DEVICE device,
+ const XGL_MEMORY_OPEN_INFO* pOpenInfo,
+ XGL_GPU_MEMORY* pMem)
+{
+}
+
+XGL_RESULT XGLAPI xglOpenSharedQueueSemaphore(
+ XGL_DEVICE device,
+ const XGL_QUEUE_SEMAPHORE_OPEN_INFO* pOpenInfo,
+ XGL_QUEUE_SEMAPHORE* pSemaphore)
+{
+}
+
+XGL_RESULT XGLAPI xglOpenPeerMemory(
+ XGL_DEVICE device,
+ const XGL_PEER_MEMORY_OPEN_INFO* pOpenInfo,
+ XGL_GPU_MEMORY* pMem)
+{
+}
+
+XGL_RESULT XGLAPI xglOpenPeerImage(
+ XGL_DEVICE device,
+ const XGL_PEER_IMAGE_OPEN_INFO* pOpenInfo,
+ XGL_IMAGE* pImage,
+ XGL_GPU_MEMORY* pMem)
+{
+}
+
+// Generic API object functions
+
+XGL_RESULT XGLAPI xglDestroyObject(
+ XGL_OBJECT object)
+{
+}
+
+XGL_RESULT XGLAPI xglGetObjectInfo(
+ XGL_BASE_OBJECT object,
+ XGL_OBJECT_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+XGL_RESULT XGLAPI xglBindObjectMemory(
+ XGL_OBJECT object,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset)
+{
+}
+
+// Fence functions
+
+XGL_RESULT XGLAPI xglCreateFence(
+ XGL_DEVICE device,
+ const XGL_FENCE_CREATE_INFO* pCreateInfo,
+ XGL_FENCE* pFence)
+{
+}
+
+XGL_RESULT XGLAPI xglGetFenceStatus(
+ XGL_FENCE fence)
+{
+}
+
+XGL_RESULT XGLAPI xglWaitForFences(
+ XGL_DEVICE device,
+ XGL_UINT fenceCount,
+ const XGL_FENCE* pFences,
+ XGL_BOOL waitAll,
+ XGL_UINT64 timeout)
+{
+}
+
+// Queue semaphore functions
+
+XGL_RESULT XGLAPI xglCreateQueueSemaphore(
+ XGL_DEVICE device,
+ const XGL_QUEUE_SEMAPHORE_CREATE_INFO* pCreateInfo,
+ XGL_QUEUE_SEMAPHORE* pSemaphore)
+{
+}
+
+XGL_RESULT XGLAPI xglSignalQueueSemaphore(
+ XGL_QUEUE queue,
+ XGL_QUEUE_SEMAPHORE semaphore)
+{
+}
+
+XGL_RESULT XGLAPI xglWaitQueueSemaphore(
+ XGL_QUEUE queue,
+ XGL_QUEUE_SEMAPHORE semaphore)
+{
+}
+
+// Event functions
+
+XGL_RESULT XGLAPI xglCreateEvent(
+ XGL_DEVICE device,
+ const XGL_EVENT_CREATE_INFO* pCreateInfo,
+ XGL_EVENT* pEvent)
+{
+}
+
+XGL_RESULT XGLAPI xglGetEventStatus(
+ XGL_EVENT event)
+{
+}
+
+XGL_RESULT XGLAPI xglSetEvent(
+ XGL_EVENT event)
+{
+}
+
+XGL_RESULT XGLAPI xglResetEvent(
+ XGL_EVENT event)
+{
+}
+
+// Query functions
+
+XGL_RESULT XGLAPI xglCreateQueryPool(
+ XGL_DEVICE device,
+ const XGL_QUERY_POOL_CREATE_INFO* pCreateInfo,
+ XGL_QUERY_POOL* pQueryPool)
+{
+}
+
+XGL_RESULT XGLAPI xglGetQueryPoolResults(
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT startQuery,
+ XGL_UINT queryCount,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+// Format capabilities
+
+XGL_RESULT XGLAPI xglGetFormatInfo(
+ XGL_DEVICE device,
+ XGL_FORMAT format,
+ XGL_FORMAT_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+// Image functions
+
+XGL_RESULT XGLAPI xglCreateImage(
+ XGL_DEVICE device,
+ const XGL_IMAGE_CREATE_INFO* pCreateInfo,
+ XGL_IMAGE* pImage)
+{
+}
+
+XGL_RESULT XGLAPI xglGetImageSubresourceInfo(
+ XGL_IMAGE image,
+ const XGL_IMAGE_SUBRESOURCE* pSubresource,
+ XGL_SUBRESOURCE_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+// Image view functions
+
+XGL_RESULT XGLAPI xglCreateImageView(
+ XGL_DEVICE device,
+ const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_IMAGE_VIEW* pView)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateColorAttachmentView(
+ XGL_DEVICE device,
+ const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_COLOR_ATTACHMENT_VIEW* pView)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateDepthStencilView(
+ XGL_DEVICE device,
+ const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_DEPTH_STENCIL_VIEW* pView)
+{
+}
+
+// Shader functions
+
+XGL_RESULT XGLAPI xglCreateShader(
+ XGL_DEVICE device,
+ const XGL_SHADER_CREATE_INFO* pCreateInfo,
+ XGL_SHADER* pShader)
+{
+}
+
+// Pipeline functions
+
+XGL_RESULT XGLAPI xglCreateGraphicsPipeline(
+ XGL_DEVICE device,
+ const XGL_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
+ XGL_PIPELINE* pPipeline)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateComputePipeline(
+ XGL_DEVICE device,
+ const XGL_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo,
+ XGL_PIPELINE* pPipeline)
+{
+}
+
+XGL_RESULT XGLAPI xglStorePipeline(
+ XGL_PIPELINE pipeline,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData)
+{
+}
+
+XGL_RESULT XGLAPI xglLoadPipeline(
+ XGL_DEVICE device,
+ XGL_SIZE dataSize,
+ const XGL_VOID* pData,
+ XGL_PIPELINE* pPipeline)
+{
+}
+
+XGL_RESULT XGLAPI xglCreatePipelineDelta(
+ XGL_DEVICE device,
+ XGL_PIPELINE p1,
+ XGL_PIPELINE p2,
+ XGL_PIPELINE_DELTA* delta)
+{
+}
+
+// Sampler functions
+
+XGL_RESULT XGLAPI xglCreateSampler(
+ XGL_DEVICE device,
+ const XGL_SAMPLER_CREATE_INFO* pCreateInfo,
+ XGL_SAMPLER* pSampler)
+{
+}
+
+// Descriptor set functions
+
+XGL_RESULT XGLAPI xglCreateDescriptorSet(
+ XGL_DEVICE device,
+ const XGL_DESCRIPTOR_SET_CREATE_INFO* pCreateInfo,
+ XGL_DESCRIPTOR_SET* pDescriptorSet)
+{
+}
+
+XGL_VOID XGLAPI xglBeginDescriptorSetUpdate(
+ XGL_DESCRIPTOR_SET descriptorSet)
+{
+}
+
+XGL_VOID XGLAPI xglEndDescriptorSetUpdate(
+ XGL_DESCRIPTOR_SET descriptorSet)
+{
+}
+
+XGL_VOID XGLAPI xglAttachSamplerDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_SAMPLER* pSamplers)
+{
+}
+
+XGL_VOID XGLAPI xglAttachImageViewDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_IMAGE_VIEW_ATTACH_INFO* pImageViews)
+{
+}
+
+XGL_VOID XGLAPI xglAttachMemoryViewDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemViews)
+{
+}
+
+XGL_VOID XGLAPI xglAttachNestedDescriptors(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_DESCRIPTOR_SET_ATTACH_INFO* pNestedDescriptorSets)
+{
+}
+
+XGL_VOID XGLAPI xglClearDescriptorSetSlots(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount)
+{
+}
+
+// State object functions
+
+XGL_RESULT XGLAPI xglCreateViewportState(
+ XGL_DEVICE device,
+ const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
+ XGL_VIEWPORT_STATE_OBJECT* pState)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateRasterState(
+ XGL_DEVICE device,
+ const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
+ XGL_RASTER_STATE_OBJECT* pState)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateMsaaState(
+ XGL_DEVICE device,
+ const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
+ XGL_MSAA_STATE_OBJECT* pState)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateColorBlendState(
+ XGL_DEVICE device,
+ const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
+ XGL_COLOR_BLEND_STATE_OBJECT* pState)
+{
+}
+
+XGL_RESULT XGLAPI xglCreateDepthStencilState(
+ XGL_DEVICE device,
+ const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
+ XGL_DEPTH_STENCIL_STATE_OBJECT* pState)
+{
+}
+
+// Command buffer functions
+
+XGL_RESULT XGLAPI xglCreateCommandBuffer(
+ XGL_DEVICE device,
+ const XGL_CMD_BUFFER_CREATE_INFO* pCreateInfo,
+ XGL_CMD_BUFFER* pCmdBuffer)
+{
+}
+
+XGL_RESULT XGLAPI xglBeginCommandBuffer(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_FLAGS flags)
+{
+} // XGL_CMD_BUFFER_BUILD_FLAGS
+
+XGL_RESULT XGLAPI xglEndCommandBuffer(
+ XGL_CMD_BUFFER cmdBuffer)
+{
+}
+
+XGL_RESULT XGLAPI xglResetCommandBuffer(
+ XGL_CMD_BUFFER cmdBuffer)
+{
+}
+
+// Command buffer building functions
+
+XGL_VOID XGLAPI xglCmdBindPipeline(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_PIPELINE pipeline)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBindPipelineDelta(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_PIPELINE_DELTA delta)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBindStateObject(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_STATE_BIND_POINT stateBindPoint,
+ XGL_STATE_OBJECT state)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBindDescriptorSet(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT index,
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT slotOffset)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBindDynamicMemoryView(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemView)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBindIndexData(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_INDEX_TYPE indexType)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBindAttachments(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT colorAttachmentCount,
+ const XGL_COLOR_ATTACHMENT_BIND_INFO* pColorAttachments,
+ const XGL_DEPTH_STENCIL_BIND_INFO* pDepthStencilAttachment)
+{
+}
+
+XGL_VOID XGLAPI xglCmdPrepareMemoryRegions(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT transitionCount,
+ const XGL_MEMORY_STATE_TRANSITION* pStateTransitions)
+{
+}
+
+XGL_VOID XGLAPI xglCmdPrepareImages(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT transitionCount,
+ const XGL_IMAGE_STATE_TRANSITION* pStateTransitions)
+{
+}
+
+XGL_VOID XGLAPI xglCmdDraw(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT firstVertex,
+ XGL_UINT vertexCount,
+ XGL_UINT firstInstance,
+ XGL_UINT instanceCount)
+{
+}
+
+XGL_VOID XGLAPI xglCmdDrawIndexed(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT firstIndex,
+ XGL_UINT indexCount,
+ XGL_INT vertexOffset,
+ XGL_UINT firstInstance,
+ XGL_UINT instanceCount)
+{
+}
+
+XGL_VOID XGLAPI xglCmdDrawIndirect(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_UINT32 count,
+ XGL_UINT32 stride)
+{
+}
+
+XGL_VOID XGLAPI xglCmdDrawIndexedIndirect(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_UINT32 count,
+ XGL_UINT32 stride)
+{
+}
+
+XGL_VOID XGLAPI xglCmdDispatch(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT x,
+ XGL_UINT y,
+ XGL_UINT z)
+{
+}
+
+XGL_VOID XGLAPI xglCmdDispatchIndirect(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset)
+{
+}
+
+XGL_VOID XGLAPI xglCmdCopyMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY srcMem,
+ XGL_GPU_MEMORY destMem,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_COPY* pRegions)
+{
+}
+
+XGL_VOID XGLAPI xglCmdCopyImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE destImage,
+ XGL_UINT regionCount,
+ const XGL_IMAGE_COPY* pRegions)
+{
+}
+
+XGL_VOID XGLAPI xglCmdCopyMemoryToImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY srcMem,
+ XGL_IMAGE destImage,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_IMAGE_COPY* pRegions)
+{
+}
+
+XGL_VOID XGLAPI xglCmdCopyImageToMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_GPU_MEMORY destMem,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_IMAGE_COPY* pRegions)
+{
+}
+
+XGL_VOID XGLAPI xglCmdCloneImageData(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE_STATE srcImageState,
+ XGL_IMAGE destImage,
+ XGL_IMAGE_STATE destImageState)
+{
+}
+
+XGL_VOID XGLAPI xglCmdUpdateMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_GPU_SIZE dataSize,
+ const XGL_UINT32* pData)
+{
+}
+
+XGL_VOID XGLAPI xglCmdFillMemory(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_GPU_SIZE fillSize,
+ XGL_UINT32 data)
+{
+}
+
+XGL_VOID XGLAPI xglCmdClearColorImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ const XGL_FLOAT color[4],
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges)
+{
+}
+
+XGL_VOID XGLAPI xglCmdClearColorImageRaw(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ const XGL_UINT32 color[4],
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges)
+{
+}
+
+XGL_VOID XGLAPI xglCmdClearDepthStencil(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ XGL_FLOAT depth,
+ XGL_UINT32 stencil,
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges)
+{
+}
+
+XGL_VOID XGLAPI xglCmdResolveImage(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE destImage,
+ XGL_UINT rectCount,
+ const XGL_IMAGE_RESOLVE* pRects)
+{
+}
+
+XGL_VOID XGLAPI xglCmdSetEvent(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_EVENT event)
+{
+}
+
+XGL_VOID XGLAPI xglCmdResetEvent(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_EVENT event)
+{
+}
+
+XGL_VOID XGLAPI xglCmdMemoryAtomic(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_UINT64 srcData,
+ XGL_ATOMIC_OP atomicOp)
+{
+}
+
+XGL_VOID XGLAPI xglCmdBeginQuery(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT slot,
+ XGL_FLAGS flags)
+{
+}
+
+XGL_VOID XGLAPI xglCmdEndQuery(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT slot)
+{
+}
+
+XGL_VOID XGLAPI xglCmdResetQueryPool(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT startQuery,
+ XGL_UINT queryCount)
+{
+}
+
+XGL_VOID XGLAPI xglCmdWriteTimestamp(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_TIMESTAMP_TYPE timestampType,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset)
+{
+}
+
+XGL_VOID XGLAPI xglCmdInitAtomicCounters(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ const XGL_UINT32* pData)
+{
+}
+
+XGL_VOID XGLAPI xglCmdLoadAtomicCounters(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ XGL_GPU_MEMORY srcMem,
+ XGL_GPU_SIZE srcOffset)
+{
+}
+
+XGL_VOID XGLAPI xglCmdSaveAtomicCounters(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset)
+{
+}
+
diff --git a/main/xglapi.h b/main/xglapi.h
new file mode 100644
index 0000000..1d4407a
--- /dev/null
+++ b/main/xglapi.h
@@ -0,0 +1,833 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2012-2013 LunarG, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Courtney Goeltzenleuchter <courtney@lunarg.com>
+ */
+
+#ifndef XGLAPI_INCLUDED
+#define XGLAPI_INCLUDED
+#include "xgl.h"
+
+/**
+ * A generic function ptr type
+ */
+typedef void (*_XGLProc)(void);
+
+/**
+ * Typedefs for all XGL API entrypoint functions.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+// GPU initialization
+
+typedef XGL_RESULT XGLAPI (*InitAndEnumerateGpus_t)(
+ const XGL_APPLICATION_INFO* pAppInfo,
+ const XGL_ALLOC_CALLBACKS* pAllocCb,
+ XGL_UINT maxGpus,
+ XGL_UINT* pGpuCount,
+ XGL_PHYSICAL_GPU* pGpus);
+
+typedef XGL_RESULT XGLAPI (*GetGpuInfo_t)(
+ XGL_PHYSICAL_GPU gpu,
+ XGL_PHYSICAL_GPU_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Device functions
+
+typedef XGL_RESULT XGLAPI (*CreateDevice_t)(
+ XGL_PHYSICAL_GPU gpu,
+ const XGL_DEVICE_CREATE_INFO* pCreateInfo,
+ XGL_DEVICE* pDevice);
+
+typedef XGL_RESULT XGLAPI (*DestroyDevice_t)(
+ XGL_DEVICE device);
+
+// Extension discovery functions
+
+typedef XGL_RESULT XGLAPI (*GetExtensionSupport_t)(
+ XGL_PHYSICAL_GPU gpu,
+ const XGL_CHAR* pExtName);
+
+// Queue functions
+
+typedef XGL_RESULT XGLAPI (*GetDeviceQueue_t)(
+ XGL_DEVICE device,
+ XGL_QUEUE_TYPE queueType,
+ XGL_UINT queueIndex,
+ XGL_QUEUE* pQueue);
+
+typedef XGL_RESULT XGLAPI (*QueueSubmit_t)(
+ XGL_QUEUE queue,
+ XGL_UINT cmdBufferCount,
+ const XGL_CMD_BUFFER* pCmdBuffers,
+ XGL_UINT memRefCount,
+ const XGL_MEMORY_REF* pMemRefs,
+ XGL_FENCE fence);
+
+typedef XGL_RESULT XGLAPI (*QueueSetGlobalMemReferences_t)(
+ XGL_QUEUE queue,
+ XGL_UINT memRefCount,
+ const XGL_MEMORY_REF* pMemRefs);
+
+typedef XGL_RESULT XGLAPI (*QueueWaitIdle_t)(
+ XGL_QUEUE queue);
+
+typedef XGL_RESULT XGLAPI (*DeviceWaitIdle_t)(
+ XGL_DEVICE device);
+
+// Memory functions
+
+typedef XGL_RESULT XGLAPI (*GetMemoryHeapCount_t)(
+ XGL_DEVICE device,
+ XGL_UINT* pCount);
+
+typedef XGL_RESULT XGLAPI (*GetMemoryHeapInfo_t)(
+ XGL_DEVICE device,
+ XGL_UINT heapId,
+ XGL_MEMORY_HEAP_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+typedef XGL_RESULT XGLAPI (*AllocMemory_t)(
+ XGL_DEVICE device,
+ const XGL_MEMORY_ALLOC_INFO* pAllocInfo,
+ XGL_GPU_MEMORY* pMem);
+
+typedef XGL_RESULT XGLAPI (*FreeMemory_t)(
+ XGL_GPU_MEMORY mem);
+
+typedef XGL_RESULT XGLAPI (*SetMemoryPriority_t)(
+ XGL_GPU_MEMORY mem,
+ XGL_MEMORY_PRIORITY priority);
+
+typedef XGL_RESULT XGLAPI (*MapMemory_t)(
+ XGL_GPU_MEMORY mem,
+ XGL_FLAGS flags, // Reserved
+ XGL_VOID** ppData);
+
+typedef XGL_RESULT XGLAPI (*UnmapMemory_t)(
+ XGL_GPU_MEMORY mem);
+
+typedef XGL_RESULT XGLAPI (*PinSystemMemory_t)(
+ XGL_DEVICE device,
+ const XGL_VOID* pSysMem,
+ XGL_SIZE memSize,
+ XGL_GPU_MEMORY* pMem);
+
+typedef XGL_RESULT XGLAPI (*RemapVirtualMemoryPages_t)(
+ XGL_DEVICE device,
+ XGL_UINT rangeCount,
+ const XGL_VIRTUAL_MEMORY_REMAP_RANGE* pRanges,
+ XGL_UINT preWaitSemaphoreCount,
+ const XGL_QUEUE_SEMAPHORE* pPreWaitSemaphores,
+ XGL_UINT postSignalSemaphoreCount,
+ const XGL_QUEUE_SEMAPHORE* pPostSignalSemaphores);
+
+// Multi-device functions
+
+typedef XGL_RESULT XGLAPI (*GetMultiGpuCompatibility_t)(
+ XGL_PHYSICAL_GPU gpu0,
+ XGL_PHYSICAL_GPU gpu1,
+ XGL_GPU_COMPATIBILITY_INFO* pInfo);
+
+typedef XGL_RESULT XGLAPI (*OpenSharedMemory_t)(
+ XGL_DEVICE device,
+ const XGL_MEMORY_OPEN_INFO* pOpenInfo,
+ XGL_GPU_MEMORY* pMem);
+
+typedef XGL_RESULT XGLAPI (*OpenSharedQueueSemaphore_t)(
+ XGL_DEVICE device,
+ const XGL_QUEUE_SEMAPHORE_OPEN_INFO* pOpenInfo,
+ XGL_QUEUE_SEMAPHORE* pSemaphore);
+
+typedef XGL_RESULT XGLAPI (*OpenPeerMemory_t)(
+ XGL_DEVICE device,
+ const XGL_PEER_MEMORY_OPEN_INFO* pOpenInfo,
+ XGL_GPU_MEMORY* pMem);
+
+typedef XGL_RESULT XGLAPI (*OpenPeerImage_t)(
+ XGL_DEVICE device,
+ const XGL_PEER_IMAGE_OPEN_INFO* pOpenInfo,
+ XGL_IMAGE* pImage,
+ XGL_GPU_MEMORY* pMem);
+
+// Generic API object functions
+
+typedef XGL_RESULT XGLAPI (*DestroyObject_t)(
+ XGL_OBJECT object);
+
+typedef XGL_RESULT XGLAPI (*GetObjectInfo_t)(
+ XGL_BASE_OBJECT object,
+ XGL_OBJECT_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+typedef XGL_RESULT XGLAPI (*BindObjectMemory_t)(
+ XGL_OBJECT object,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset);
+
+// Fence functions
+
+typedef XGL_RESULT XGLAPI (*CreateFence_t)(
+ XGL_DEVICE device,
+ const XGL_FENCE_CREATE_INFO* pCreateInfo,
+ XGL_FENCE* pFence);
+
+typedef XGL_RESULT XGLAPI (*GetFenceStatus_t)(
+ XGL_FENCE fence);
+
+typedef XGL_RESULT XGLAPI (*WaitForFences_t)(
+ XGL_DEVICE device,
+ XGL_UINT fenceCount,
+ const XGL_FENCE* pFences,
+ XGL_BOOL waitAll,
+ XGL_UINT64 timeout);
+
+// Queue semaphore functions
+
+typedef XGL_RESULT XGLAPI (*CreateQueueSemaphore_t)(
+ XGL_DEVICE device,
+ const XGL_QUEUE_SEMAPHORE_CREATE_INFO* pCreateInfo,
+ XGL_QUEUE_SEMAPHORE* pSemaphore);
+
+typedef XGL_RESULT XGLAPI (*SignalQueueSemaphore_t)(
+ XGL_QUEUE queue,
+ XGL_QUEUE_SEMAPHORE semaphore);
+
+typedef XGL_RESULT XGLAPI (*WaitQueueSemaphore_t)(
+ XGL_QUEUE queue,
+ XGL_QUEUE_SEMAPHORE semaphore);
+
+// Event functions
+
+typedef XGL_RESULT XGLAPI (*CreateEvent_t)(
+ XGL_DEVICE device,
+ const XGL_EVENT_CREATE_INFO* pCreateInfo,
+ XGL_EVENT* pEvent);
+
+typedef XGL_RESULT XGLAPI (*GetEventStatus_t)(
+ XGL_EVENT event);
+
+typedef XGL_RESULT XGLAPI (*SetEvent_t)(
+ XGL_EVENT event);
+
+typedef XGL_RESULT XGLAPI (*ResetEvent_t)(
+ XGL_EVENT event);
+
+// Query functions
+
+typedef XGL_RESULT XGLAPI (*CreateQueryPool_t)(
+ XGL_DEVICE device,
+ const XGL_QUERY_POOL_CREATE_INFO* pCreateInfo,
+ XGL_QUERY_POOL* pQueryPool);
+
+typedef XGL_RESULT XGLAPI (*GetQueryPoolResults_t)(
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT startQuery,
+ XGL_UINT queryCount,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Format capabilities
+
+typedef XGL_RESULT XGLAPI (*GetFormatInfo_t)(
+ XGL_DEVICE device,
+ XGL_FORMAT format,
+ XGL_FORMAT_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Image functions
+
+typedef XGL_RESULT XGLAPI (*CreateImage_t)(
+ XGL_DEVICE device,
+ const XGL_IMAGE_CREATE_INFO* pCreateInfo,
+ XGL_IMAGE* pImage);
+
+typedef XGL_RESULT XGLAPI (*GetImageSubresourceInfo_t)(
+ XGL_IMAGE image,
+ const XGL_IMAGE_SUBRESOURCE* pSubresource,
+ XGL_SUBRESOURCE_INFO_TYPE infoType,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+// Image view functions
+
+typedef XGL_RESULT XGLAPI (*CreateImageView_t)(
+ XGL_DEVICE device,
+ const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_IMAGE_VIEW* pView);
+
+typedef XGL_RESULT XGLAPI (*CreateColorAttachmentView_t)(
+ XGL_DEVICE device,
+ const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_COLOR_ATTACHMENT_VIEW* pView);
+
+typedef XGL_RESULT XGLAPI (*CreateDepthStencilView_t)(
+ XGL_DEVICE device,
+ const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
+ XGL_DEPTH_STENCIL_VIEW* pView);
+
+// Shader functions
+
+typedef XGL_RESULT XGLAPI (*CreateShader_t)(
+ XGL_DEVICE device,
+ const XGL_SHADER_CREATE_INFO* pCreateInfo,
+ XGL_SHADER* pShader);
+
+// Pipeline functions
+
+typedef XGL_RESULT XGLAPI (*CreateGraphicsPipeline_t)(
+ XGL_DEVICE device,
+ const XGL_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
+ XGL_PIPELINE* pPipeline);
+
+typedef XGL_RESULT XGLAPI (*CreateComputePipeline_t)(
+ XGL_DEVICE device,
+ const XGL_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo,
+ XGL_PIPELINE* pPipeline);
+
+typedef XGL_RESULT XGLAPI (*StorePipeline_t)(
+ XGL_PIPELINE pipeline,
+ XGL_SIZE* pDataSize,
+ XGL_VOID* pData);
+
+typedef XGL_RESULT XGLAPI (*LoadPipeline_t)(
+ XGL_DEVICE device,
+ XGL_SIZE dataSize,
+ const XGL_VOID* pData,
+ XGL_PIPELINE* pPipeline);
+
+typedef XGL_RESULT XGLAPI (*CreatePipelineDelta_t)(
+ XGL_DEVICE device,
+ XGL_PIPELINE p1,
+ XGL_PIPELINE p2,
+ XGL_PIPELINE_DELTA* delta);
+
+// Sampler functions
+
+typedef XGL_RESULT XGLAPI (*CreateSampler_t)(
+ XGL_DEVICE device,
+ const XGL_SAMPLER_CREATE_INFO* pCreateInfo,
+ XGL_SAMPLER* pSampler);
+
+// Descriptor set functions
+
+typedef XGL_RESULT XGLAPI (*CreateDescriptorSet_t)(
+ XGL_DEVICE device,
+ const XGL_DESCRIPTOR_SET_CREATE_INFO* pCreateInfo,
+ XGL_DESCRIPTOR_SET* pDescriptorSet);
+
+typedef XGL_VOID XGLAPI (*BeginDescriptorSetUpdate_t)(
+ XGL_DESCRIPTOR_SET descriptorSet);
+
+typedef XGL_VOID XGLAPI (*EndDescriptorSetUpdate_t)(
+ XGL_DESCRIPTOR_SET descriptorSet);
+
+typedef XGL_VOID XGLAPI (*AttachSamplerDescriptors_t)(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_SAMPLER* pSamplers);
+
+typedef XGL_VOID XGLAPI (*AttachImageViewDescriptors_t)(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_IMAGE_VIEW_ATTACH_INFO* pImageViews);
+
+typedef XGL_VOID XGLAPI (*AttachMemoryViewDescriptors_t)(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemViews);
+
+typedef XGL_VOID XGLAPI (*AttachNestedDescriptors_t)(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount,
+ const XGL_DESCRIPTOR_SET_ATTACH_INFO* pNestedDescriptorSets);
+
+typedef XGL_VOID XGLAPI (*ClearDescriptorSetSlots_t)(
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT startSlot,
+ XGL_UINT slotCount);
+
+// State object functions
+
+typedef XGL_RESULT XGLAPI (*CreateViewportState_t)(
+ XGL_DEVICE device,
+ const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
+ XGL_VIEWPORT_STATE_OBJECT* pState);
+
+typedef XGL_RESULT XGLAPI (*CreateRasterState_t)(
+ XGL_DEVICE device,
+ const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
+ XGL_RASTER_STATE_OBJECT* pState);
+
+typedef XGL_RESULT XGLAPI (*CreateMsaaState_t)(
+ XGL_DEVICE device,
+ const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
+ XGL_MSAA_STATE_OBJECT* pState);
+
+typedef XGL_RESULT XGLAPI (*CreateColorBlendState_t)(
+ XGL_DEVICE device,
+ const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
+ XGL_COLOR_BLEND_STATE_OBJECT* pState);
+
+typedef XGL_RESULT XGLAPI (*CreateDepthStencilState_t)(
+ XGL_DEVICE device,
+ const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
+ XGL_DEPTH_STENCIL_STATE_OBJECT* pState);
+
+// Command buffer functions
+
+typedef XGL_RESULT XGLAPI (*CreateCommandBuffer_t)(
+ XGL_DEVICE device,
+ const XGL_CMD_BUFFER_CREATE_INFO* pCreateInfo,
+ XGL_CMD_BUFFER* pCmdBuffer);
+
+typedef XGL_RESULT XGLAPI (*BeginCommandBuffer_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_FLAGS flags); // XGL_CMD_BUFFER_BUILD_FLAGS
+
+typedef XGL_RESULT XGLAPI (*EndCommandBuffer_t)(
+ XGL_CMD_BUFFER cmdBuffer);
+
+typedef XGL_RESULT XGLAPI (*ResetCommandBuffer_t)(
+ XGL_CMD_BUFFER cmdBuffer);
+
+// Command buffer building functions
+
+typedef XGL_VOID XGLAPI (*CmdBindPipeline_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_PIPELINE pipeline);
+
+typedef XGL_VOID XGLAPI (*CmdBindPipelineDelta_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_PIPELINE_DELTA delta);
+
+typedef XGL_VOID XGLAPI (*CmdBindStateObject_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_STATE_BIND_POINT stateBindPoint,
+ XGL_STATE_OBJECT state);
+
+typedef XGL_VOID XGLAPI (*CmdBindDescriptorSet_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT index,
+ XGL_DESCRIPTOR_SET descriptorSet,
+ XGL_UINT slotOffset);
+
+typedef XGL_VOID XGLAPI (*CmdBindDynamicMemoryView_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ const XGL_MEMORY_VIEW_ATTACH_INFO* pMemView);
+
+typedef XGL_VOID XGLAPI (*CmdBindIndexData_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_INDEX_TYPE indexType);
+
+typedef XGL_VOID XGLAPI (*CmdBindAttachments_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT colorAttachmentCount,
+ const XGL_COLOR_ATTACHMENT_BIND_INFO* pColorAttachments,
+ const XGL_DEPTH_STENCIL_BIND_INFO* pDepthStencilAttachment);
+
+typedef XGL_VOID XGLAPI (*CmdPrepareMemoryRegions_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT transitionCount,
+ const XGL_MEMORY_STATE_TRANSITION* pStateTransitions);
+
+typedef XGL_VOID XGLAPI (*CmdPrepareImages_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT transitionCount,
+ const XGL_IMAGE_STATE_TRANSITION* pStateTransitions);
+
+typedef XGL_VOID XGLAPI (*CmdDraw_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT firstVertex,
+ XGL_UINT vertexCount,
+ XGL_UINT firstInstance,
+ XGL_UINT instanceCount);
+
+typedef XGL_VOID XGLAPI (*CmdDrawIndexed_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT firstIndex,
+ XGL_UINT indexCount,
+ XGL_INT vertexOffset,
+ XGL_UINT firstInstance,
+ XGL_UINT instanceCount);
+
+typedef XGL_VOID XGLAPI (*CmdDrawIndirect_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_UINT32 count,
+ XGL_UINT32 stride);
+
+typedef XGL_VOID XGLAPI (*CmdDrawIndexedIndirect_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset,
+ XGL_UINT32 count,
+ XGL_UINT32 stride);
+
+typedef XGL_VOID XGLAPI (*CmdDispatch_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_UINT x,
+ XGL_UINT y,
+ XGL_UINT z);
+
+typedef XGL_VOID XGLAPI (*CmdDispatchIndirect_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY mem,
+ XGL_GPU_SIZE offset);
+
+typedef XGL_VOID XGLAPI (*CmdCopyMemory_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY srcMem,
+ XGL_GPU_MEMORY destMem,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_COPY* pRegions);
+
+typedef XGL_VOID XGLAPI (*CmdCopyImage_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE destImage,
+ XGL_UINT regionCount,
+ const XGL_IMAGE_COPY* pRegions);
+
+typedef XGL_VOID XGLAPI (*CmdCopyMemoryToImage_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY srcMem,
+ XGL_IMAGE destImage,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_IMAGE_COPY* pRegions);
+
+typedef XGL_VOID XGLAPI (*CmdCopyImageToMemory_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_GPU_MEMORY destMem,
+ XGL_UINT regionCount,
+ const XGL_MEMORY_IMAGE_COPY* pRegions);
+
+typedef XGL_VOID XGLAPI (*CmdCloneImageData_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE_STATE srcImageState,
+ XGL_IMAGE destImage,
+ XGL_IMAGE_STATE destImageState);
+
+typedef XGL_VOID XGLAPI (*CmdUpdateMemory_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_GPU_SIZE dataSize,
+ const XGL_UINT32* pData);
+
+typedef XGL_VOID XGLAPI (*CmdFillMemory_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_GPU_SIZE fillSize,
+ XGL_UINT32 data);
+
+typedef XGL_VOID XGLAPI (*CmdClearColorImage_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ const XGL_FLOAT color[4],
+XGL_UINT rangeCount,
+const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+typedef XGL_VOID XGLAPI (*CmdClearColorImageRaw_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ const XGL_UINT32 color[4],
+XGL_UINT rangeCount,
+const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+typedef XGL_VOID XGLAPI (*CmdClearDepthStencil_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE image,
+ XGL_FLOAT depth,
+ XGL_UINT32 stencil,
+ XGL_UINT rangeCount,
+ const XGL_IMAGE_SUBRESOURCE_RANGE* pRanges);
+
+typedef XGL_VOID XGLAPI (*CmdResolveImage_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_IMAGE srcImage,
+ XGL_IMAGE destImage,
+ XGL_UINT rectCount,
+ const XGL_IMAGE_RESOLVE* pRects);
+
+typedef XGL_VOID XGLAPI (*CmdSetEvent_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_EVENT event);
+
+typedef XGL_VOID XGLAPI (*CmdResetEvent_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_EVENT event);
+
+typedef XGL_VOID XGLAPI (*CmdMemoryAtomic_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset,
+ XGL_UINT64 srcData,
+ XGL_ATOMIC_OP atomicOp);
+
+typedef XGL_VOID XGLAPI (*CmdBeginQuery_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT slot,
+ XGL_FLAGS flags);
+
+typedef XGL_VOID XGLAPI (*CmdEndQuery_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT slot);
+
+typedef XGL_VOID XGLAPI (*CmdResetQueryPool_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_QUERY_POOL queryPool,
+ XGL_UINT startQuery,
+ XGL_UINT queryCount);
+
+typedef XGL_VOID XGLAPI (*CmdWriteTimestamp_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_TIMESTAMP_TYPE timestampType,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset);
+
+typedef XGL_VOID XGLAPI (*CmdInitAtomicCounters_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ const XGL_UINT32* pData);
+
+typedef XGL_VOID XGLAPI (*CmdLoadAtomicCounters_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ XGL_GPU_MEMORY srcMem,
+ XGL_GPU_SIZE srcOffset);
+
+typedef XGL_VOID XGLAPI (*CmdSaveAtomicCounters_t)(
+ XGL_CMD_BUFFER cmdBuffer,
+ XGL_PIPELINE_BIND_POINT pipelineBindPoint,
+ XGL_UINT startCounter,
+ XGL_UINT counterCount,
+ XGL_GPU_MEMORY destMem,
+ XGL_GPU_SIZE destOffset);
+
+/**
+ * The API dispatcher jumps through these functions
+ */
+struct _xgl_api
+{
+ // GPU initialization
+
+ InitAndEnumerateGpus_t InitAndEnumerateGpus;
+ GetGpuInfo_t GetGpuInfo;
+
+ // Device functions
+ CreateDevice_t CreateDevice;
+ DestroyDevice_t DestroyDevice;
+
+ // Extension discovery functions
+
+ GetExtensionSupport_t GetExtensionSupport;
+
+ // Queue functions
+ GetDeviceQueue_t GetDeviceQueue;
+ QueueSubmit_t QueueSubmit;
+ QueueSetGlobalMemReferences_t QueueSetGlobalMemReferences;
+ QueueWaitIdle_t QueueWaitIdle;
+ DeviceWaitIdle_t DeviceWaitIdle;
+
+ // Memory functions
+
+ GetMemoryHeapCount_t GetMemoryHeapCount;
+ GetMemoryHeapInfo_t GetMemoryHeapInfo;
+ AllocMemory_t AllocMemory;
+ FreeMemory_t FreeMemory;
+ SetMemoryPriority_t SetMemoryPriority;
+ MapMemory_t MapMemory;
+ UnmapMemory_t UnmapMemory;
+ PinSystemMemory_t PinSystemMemory;
+ RemapVirtualMemoryPages_t RemapVirtualMemoryPages;
+
+ // Multi-device functions
+
+ GetMultiGpuCompatibility_t GetMultiGpuCompatibility;
+ OpenSharedMemory_t OpenSharedMemory;
+ OpenSharedQueueSemaphore_t OpenSharedQueueSemaphore;
+ OpenPeerMemory_t OpenPeerMemory;
+ OpenPeerImage_t OpenPeerImage;
+
+ // Generic API object functions
+
+ DestroyObject_t DestroyObject;
+ GetObjectInfo_t GetObjectInfo;
+ BindObjectMemory_t BindObjectMemory;
+
+ // Fence functions
+
+ CreateFence_t CreateFence;
+ GetFenceStatus_t GetFenceStatus;
+ WaitForFences_t WaitForFences;
+
+ // Queue semaphore functions
+
+ CreateQueueSemaphore_t CreateQueueSemaphore;
+ SignalQueueSemaphore_t SignalQueueSemaphore;
+ WaitQueueSemaphore_t WaitQueueSemaphore;
+
+ // Event functions
+
+ CreateEvent_t CreateEvent;
+ GetEventStatus_t GetEventStatus;
+ SetEvent_t SetEvent;
+ ResetEvent_t ResetEvent;
+
+ // Query functions
+
+ CreateQueryPool_t CreateQueryPool;
+ GetQueryPoolResults_t GetQueryPoolResults;
+
+ // Format capabilities
+
+ GetFormatInfo_t GetFormatInfo;
+
+ // Image functions
+
+ CreateImage_t CreateImage;
+ GetImageSubresourceInfo_t GetImageSubresourceInfo;
+
+ // Image view functions
+
+ CreateImageView_t CreateImageView;
+ CreateColorAttachmentView_t CreateColorAttachmentView;
+ CreateDepthStencilView_t CreateDepthStencilView;
+
+ // Shader functions
+
+ CreateShader_t CreateShader;
+
+ // Pipeline functions
+
+ CreateGraphicsPipeline_t CreateGraphicsPipeline;
+ CreateComputePipeline_t CreateComputePipeline;
+ StorePipeline_t StorePipeline;
+ LoadPipeline_t LoadPipeline;
+ CreatePipelineDelta_t CreatePipelineDelta;
+
+ // Sampler functions
+
+ CreateSampler_t CreateSampler;
+
+ // Descriptor set functions
+
+ CreateDescriptorSet_t CreateDescriptorSet;
+ BeginDescriptorSetUpdate_t BeginDescriptorSetUpdate;
+ EndDescriptorSetUpdate_t EndDescriptorSetUpdate;
+ AttachSamplerDescriptors_t AttachSamplerDescriptors;
+ AttachImageViewDescriptors_t AttachImageViewDescriptors;
+ AttachMemoryViewDescriptors_t AttachMemoryViewDescriptors;
+ AttachNestedDescriptors_t AttachNestedDescriptors;
+ ClearDescriptorSetSlots_t ClearDescriptorSetSlots;
+
+ // State object functions
+
+ CreateViewportState_t CreateViewportState;
+ CreateRasterState_t CreateRasterState;
+ CreateMsaaState_t CreateMsaaState;
+ CreateColorBlendState_t CreateColorBlendState;
+ CreateDepthStencilState_t CreateDepthStencilState;
+
+ // Command buffer functions
+
+ CreateCommandBuffer_t CreateCommandBuffer;
+ BeginCommandBuffer_t BeginCommandBuffer;
+ EndCommandBuffer_t EndCommandBuffer;
+ ResetCommandBuffer_t ResetCommandBuffer;
+
+ // Command buffer building functions
+
+ CmdBindPipeline_t CmdBindPipeline;
+ CmdBindPipelineDelta_t CmdBindPipelineDelta;
+ CmdBindStateObject_t CmdBindStateObject;
+ CmdBindDescriptorSet_t CmdBindDescriptorSet;
+ CmdBindDynamicMemoryView_t CmdBindDynamicMemoryView;
+ CmdBindIndexData_t CmdBindIndexData;
+ CmdBindAttachments_t CmdBindAttachments;
+ CmdPrepareMemoryRegions_t CmdPrepareMemoryRegions;
+ CmdPrepareImages_t CmdPrepareImages;
+ CmdDraw_t CmdDraw;
+ CmdDrawIndexed_t CmdDrawIndexed;
+ CmdDrawIndirect_t CmdDrawIndirect;
+ CmdDrawIndexedIndirect_t CmdDrawIndexedIndirect;
+ CmdDispatch_t CmdDispatch;
+ CmdDispatchIndirect_t CmdDispatchIndirect;
+ CmdCopyMemory_t CmdCopyMemory;
+ CmdCopyImage_t CmdCopyImage;
+ CmdCopyMemoryToImage_t CmdCopyMemoryToImage;
+ CmdCopyImageToMemory_t CmdCopyImageToMemory;
+ CmdCloneImageData_t CmdCloneImageData;
+ CmdUpdateMemory_t CmdUpdateMemory;
+ CmdFillMemory_t CmdFillMemory;
+ CmdClearColorImage_t CmdClearColorImage;
+ CmdClearColorImageRaw_t CmdClearColorImageRaw;
+ CmdClearDepthStencil_t CmdClearDepthStencil;
+ CmdResolveImage_t CmdResolveImage;
+ CmdSetEvent_t CmdSetEvent;
+ CmdResetEvent_t CmdResetEvent;
+ CmdMemoryAtomic_t CmdMemoryAtomic;
+ CmdBeginQuery_t CmdBeginQuery;
+ CmdEndQuery_t CmdEndQuery;
+ CmdResetQueryPool_t CmdResetQueryPool;
+ CmdWriteTimestamp_t CmdWriteTimestamp;
+ CmdInitAtomicCounters_t CmdInitAtomicCounters;
+ CmdLoadAtomicCounters_t CmdLoadAtomicCounters;
+ CmdSaveAtomicCounters_t CmdSaveAtomicCounters;
+};
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif /* EGLAPI_INCLUDED */
diff --git a/main/xgldriver.c b/main/xgldriver.c
new file mode 100644
index 0000000..78d8130
--- /dev/null
+++ b/main/xgldriver.c
@@ -0,0 +1,711 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * Copyright 2009-2010 Chia-I Wu <olvaffe@gmail.com>
+ * Copyright 2010-2011 LunarG, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Functions for choosing and opening/loading device drivers.
+ */
+
+
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "eglstring.h"
+#include "egldefines.h"
+#include "egldisplay.h"
+#include "egldriver.h"
+#include "egllog.h"
+#include "eglmutex.h"
+
+#if defined(_EGL_OS_UNIX)
+#include <dlfcn.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <unistd.h>
+#endif
+
+
+typedef struct _egl_module {
+ char *Path;
+ _EGLMain_t BuiltIn;
+ void *Handle;
+ _EGLDriver *Driver;
+} _EGLModule;
+
+static _EGLMutex _eglModuleMutex = _EGL_MUTEX_INITIALIZER;
+static _EGLArray *_eglModules;
+
+const struct {
+ const char *name;
+ _EGLMain_t main;
+} _eglBuiltInDrivers[] = {
+#ifdef _EGL_BUILT_IN_DRIVER_GALLIUM
+ { "egl_gallium", _eglBuiltInDriverGALLIUM },
+#endif
+#ifdef _EGL_BUILT_IN_DRIVER_DRI2
+ { "egl_dri2", _eglBuiltInDriverDRI2 },
+#endif
+ { NULL, NULL }
+};
+
+/**
+ * Wrappers for dlopen/dlclose()
+ */
+#if defined(_EGL_OS_WINDOWS)
+
+
+typedef HMODULE lib_handle;
+
+static HMODULE
+open_library(const char *filename)
+{
+ return LoadLibrary(filename);
+}
+
+static void
+close_library(HMODULE lib)
+{
+ FreeLibrary(lib);
+}
+
+
+static const char *
+library_suffix(void)
+{
+ return ".dll";
+}
+
+
+#elif defined(_EGL_OS_UNIX)
+
+
+typedef void * lib_handle;
+
+static void *
+open_library(const char *filename)
+{
+ return dlopen(filename, RTLD_LAZY);
+}
+
+static void
+close_library(void *lib)
+{
+ dlclose(lib);
+}
+
+
+static const char *
+library_suffix(void)
+{
+ return ".so";
+}
+
+
+#endif
+
+
+/**
+ * Open the named driver and find its bootstrap function: _eglMain().
+ */
+static _EGLMain_t
+_eglOpenLibrary(const char *driverPath, lib_handle *handle)
+{
+ lib_handle lib;
+ _EGLMain_t mainFunc = NULL;
+ const char *error = "unknown error";
+
+ assert(driverPath);
+
+ _eglLog(_EGL_DEBUG, "dlopen(%s)", driverPath);
+ lib = open_library(driverPath);
+
+#if defined(_EGL_OS_WINDOWS)
+ /* XXX untested */
+ if (lib)
+ mainFunc = (_EGLMain_t) GetProcAddress(lib, "_eglMain");
+#elif defined(_EGL_OS_UNIX)
+ if (lib) {
+ union {
+ _EGLMain_t func;
+ void *ptr;
+ } tmp = { NULL };
+ /* direct cast gives a warning when compiled with -pedantic */
+ tmp.ptr = dlsym(lib, "_eglMain");
+ mainFunc = tmp.func;
+ if (!mainFunc)
+ error = dlerror();
+ }
+ else {
+ error = dlerror();
+ }
+#endif
+
+ if (!lib) {
+ _eglLog(_EGL_WARNING, "Could not open driver %s (%s)",
+ driverPath, error);
+ return NULL;
+ }
+
+ if (!mainFunc) {
+ _eglLog(_EGL_WARNING, "_eglMain not found in %s (%s)",
+ driverPath, error);
+ if (lib)
+ close_library(lib);
+ return NULL;
+ }
+
+ *handle = lib;
+ return mainFunc;
+}
+
+
+/**
+ * Load a module and create the driver object.
+ */
+static EGLBoolean
+_eglLoadModule(_EGLModule *mod)
+{
+ _EGLMain_t mainFunc;
+ lib_handle lib;
+ _EGLDriver *drv;
+
+ if (mod->Driver)
+ return EGL_TRUE;
+
+ if (mod->BuiltIn) {
+ lib = (lib_handle) NULL;
+ mainFunc = mod->BuiltIn;
+ }
+ else {
+ mainFunc = _eglOpenLibrary(mod->Path, &lib);
+ if (!mainFunc)
+ return EGL_FALSE;
+ }
+
+ drv = mainFunc(NULL);
+ if (!drv) {
+ if (lib)
+ close_library(lib);
+ return EGL_FALSE;
+ }
+
+ if (!drv->Name) {
+ _eglLog(_EGL_WARNING, "Driver loaded from %s has no name", mod->Path);
+ drv->Name = "UNNAMED";
+ }
+
+ mod->Handle = (void *) lib;
+ mod->Driver = drv;
+
+ return EGL_TRUE;
+}
+
+
+/**
+ * Unload a module.
+ */
+static void
+_eglUnloadModule(_EGLModule *mod)
+{
+#if defined(_EGL_OS_UNIX)
+ /* destroy the driver */
+ if (mod->Driver && mod->Driver->Unload)
+ mod->Driver->Unload(mod->Driver);
+
+ /*
+ * XXX At this point (atexit), the module might be the last reference to
+ * libEGL. Closing the module might unmap libEGL and give problems.
+ */
+#if 0
+ if (mod->Handle)
+ close_library(mod->Handle);
+#endif
+#elif defined(_EGL_OS_WINDOWS)
+ /* XXX Windows unloads DLLs before atexit */
+#endif
+
+ mod->Driver = NULL;
+ mod->Handle = NULL;
+}
+
+
+/**
+ * Add a module to the module array.
+ */
+static _EGLModule *
+_eglAddModule(const char *path)
+{
+ _EGLModule *mod;
+ EGLint i;
+
+ if (!_eglModules) {
+ _eglModules = _eglCreateArray("Module", 8);
+ if (!_eglModules)
+ return NULL;
+ }
+
+ /* find duplicates */
+ for (i = 0; i < _eglModules->Size; i++) {
+ mod = _eglModules->Elements[i];
+ if (strcmp(mod->Path, path) == 0)
+ return mod;
+ }
+
+ /* allocate a new one */
+ mod = calloc(1, sizeof(*mod));
+ if (mod) {
+ mod->Path = _eglstrdup(path);
+ if (!mod->Path) {
+ free(mod);
+ mod = NULL;
+ }
+ }
+ if (mod) {
+ _eglAppendArray(_eglModules, (void *) mod);
+ _eglLog(_EGL_DEBUG, "added %s to module array", mod->Path);
+ }
+
+ return mod;
+}
+
+
+/**
+ * Free a module.
+ */
+static void
+_eglFreeModule(void *module)
+{
+ _EGLModule *mod = (_EGLModule *) module;
+
+ _eglUnloadModule(mod);
+ free(mod->Path);
+ free(mod);
+}
+
+
+/**
+ * A loader function for use with _eglPreloadForEach. The loader data is the
+ * filename of the driver. This function stops on the first valid driver.
+ */
+static EGLBoolean
+_eglLoaderFile(const char *dir, size_t len, void *loader_data)
+{
+ char path[1024];
+ const char *filename = (const char *) loader_data;
+ size_t flen = strlen(filename);
+
+ /* make a full path */
+ if (len + flen + 2 > sizeof(path))
+ return EGL_TRUE;
+ if (len) {
+ memcpy(path, dir, len);
+ path[len++] = '/';
+ }
+ memcpy(path + len, filename, flen);
+ len += flen;
+ path[len] = '\0';
+
+ if (library_suffix()) {
+ const char *suffix = library_suffix();
+ size_t slen = strlen(suffix);
+ const char *p;
+ EGLBoolean need_suffix;
+
+ p = filename + flen - slen;
+ need_suffix = (p < filename || strcmp(p, suffix) != 0);
+ if (need_suffix) {
+ /* overflow */
+ if (len + slen + 1 > sizeof(path))
+ return EGL_TRUE;
+ strcpy(path + len, suffix);
+ }
+ }
+
+#if defined(_EGL_OS_UNIX)
+ /* check if the file exists */
+ if (access(path, F_OK))
+ return EGL_TRUE;
+#endif
+
+ _eglAddModule(path);
+
+ return EGL_TRUE;
+}
+
+
+/**
+ * Run the callback function on each driver directory.
+ *
+ * The process may end prematurely if the callback function returns false.
+ */
+static void
+_eglPreloadForEach(const char *search_path,
+ EGLBoolean (*loader)(const char *, size_t, void *),
+ void *loader_data)
+{
+ const char *cur, *next;
+ size_t len;
+
+ cur = search_path;
+ while (cur) {
+ next = strchr(cur, ':');
+ len = (next) ? next - cur : strlen(cur);
+
+ if (!loader(cur, len, loader_data))
+ break;
+
+ cur = (next) ? next + 1 : NULL;
+ }
+}
+
+
+/**
+ * Return a list of colon-separated driver directories.
+ */
+static const char *
+_eglGetSearchPath(void)
+{
+ static char search_path[1024];
+
+#if defined(_EGL_OS_UNIX) || defined(_EGL_OS_WINDOWS)
+ if (search_path[0] == '\0') {
+ char *buf = search_path;
+ size_t len = sizeof(search_path);
+ EGLBoolean use_env;
+ char dir_sep;
+ int ret;
+
+#if defined(_EGL_OS_UNIX)
+ use_env = (geteuid() == getuid() && getegid() == getgid());
+ dir_sep = '/';
+#else
+ use_env = EGL_TRUE;
+ dir_sep = '\\';
+#endif
+
+ if (use_env) {
+ char *p;
+
+ /* extract the dirname from EGL_DRIVER */
+ p = getenv("EGL_DRIVER");
+ if (p && strchr(p, dir_sep)) {
+ ret = _eglsnprintf(buf, len, "%s", p);
+ if (ret > 0 && ret < len) {
+ p = strrchr(buf, dir_sep);
+ *p++ = ':';
+
+ len -= p - buf;
+ buf = p;
+ }
+ }
+
+ /* append EGL_DRIVERS_PATH */
+ p = getenv("EGL_DRIVERS_PATH");
+ if (p) {
+ ret = _eglsnprintf(buf, len, "%s:", p);
+ if (ret > 0 && ret < len) {
+ buf += ret;
+ len -= ret;
+ }
+ }
+ }
+ else {
+ _eglLog(_EGL_DEBUG,
+ "ignore EGL_DRIVERS_PATH for setuid/setgid binaries");
+ }
+
+ ret = _eglsnprintf(buf, len, "%s", _EGL_DRIVER_SEARCH_DIR);
+ if (ret < 0 || ret >= len)
+ search_path[0] = '\0';
+
+ _eglLog(_EGL_DEBUG, "EGL search path is %s", search_path);
+ }
+#endif /* defined(_EGL_OS_UNIX) || defined(_EGL_OS_WINDOWS) */
+
+ return search_path;
+}
+
+
+/**
+ * Add the user driver to the module array.
+ *
+ * The user driver is specified by EGL_DRIVER.
+ */
+static EGLBoolean
+_eglAddUserDriver(void)
+{
+ const char *search_path = _eglGetSearchPath();
+ char *env;
+ size_t name_len = 0;
+
+ env = getenv("EGL_DRIVER");
+#if defined(_EGL_OS_UNIX)
+ if (env && strchr(env, '/')) {
+ search_path = "";
+ if ((geteuid() != getuid() || getegid() != getgid())) {
+ _eglLog(_EGL_DEBUG,
+ "ignore EGL_DRIVER for setuid/setgid binaries");
+ env = NULL;
+ }
+ }
+ else if (env) {
+ char *suffix = strchr(env, '.');
+ name_len = (suffix) ? suffix - env : strlen(env);
+ }
+#else
+ if (env)
+ name_len = strlen(env);
+#endif /* _EGL_OS_UNIX */
+
+ /*
+ * Try built-in drivers first if we know the driver name. This makes sure
+ * we do not load the outdated external driver that is still on the
+ * filesystem.
+ */
+ if (name_len) {
+ _EGLModule *mod;
+ EGLint i;
+
+ for (i = 0; _eglBuiltInDrivers[i].name; i++) {
+ if (strlen(_eglBuiltInDrivers[i].name) == name_len &&
+ !strncmp(_eglBuiltInDrivers[i].name, env, name_len)) {
+ mod = _eglAddModule(env);
+ if (mod)
+ mod->BuiltIn = _eglBuiltInDrivers[i].main;
+
+ return EGL_TRUE;
+ }
+ }
+ }
+
+ /* otherwise, treat env as a path */
+ if (env) {
+ _eglPreloadForEach(search_path, _eglLoaderFile, (void *) env);
+
+ return EGL_TRUE;
+ }
+
+ return EGL_FALSE;
+}
+
+
+/**
+ * Add egl_gallium to the module array.
+ */
+static void
+_eglAddGalliumDriver(void)
+{
+#ifndef _EGL_BUILT_IN_DRIVER_GALLIUM
+ void *external = (void *) "egl_gallium";
+ _eglPreloadForEach(_eglGetSearchPath(), _eglLoaderFile, external);
+#endif
+}
+
+
+/**
+ * Add built-in drivers to the module array.
+ */
+static void
+_eglAddBuiltInDrivers(void)
+{
+ _EGLModule *mod;
+ EGLint i;
+
+ for (i = 0; _eglBuiltInDrivers[i].name; i++) {
+ mod = _eglAddModule(_eglBuiltInDrivers[i].name);
+ if (mod)
+ mod->BuiltIn = _eglBuiltInDrivers[i].main;
+ }
+}
+
+
+/**
+ * Add drivers to the module array. Drivers will be loaded as they are matched
+ * to displays.
+ */
+static EGLBoolean
+_eglAddDrivers(void)
+{
+ if (_eglModules)
+ return EGL_TRUE;
+
+ if (!_eglAddUserDriver()) {
+ /*
+ * Add other drivers only when EGL_DRIVER is not set. The order here
+ * decides the priorities.
+ */
+ _eglAddGalliumDriver();
+ _eglAddBuiltInDrivers();
+ }
+
+ return (_eglModules != NULL);
+}
+
+
+/**
+ * A helper function for _eglMatchDriver. It finds the first driver that can
+ * initialize the display and return.
+ */
+static _EGLDriver *
+_eglMatchAndInitialize(_EGLDisplay *dpy)
+{
+ _EGLDriver *drv = NULL;
+ EGLint i = 0;
+
+ if (!_eglAddDrivers()) {
+ _eglLog(_EGL_WARNING, "failed to find any driver");
+ return NULL;
+ }
+
+ if (dpy->Driver) {
+ drv = dpy->Driver;
+ /* no re-matching? */
+ if (!drv->API.Initialize(drv, dpy))
+ drv = NULL;
+ return drv;
+ }
+
+ while (i < _eglModules->Size) {
+ _EGLModule *mod = (_EGLModule *) _eglModules->Elements[i];
+
+ if (!_eglLoadModule(mod)) {
+ /* remove invalid modules */
+ _eglEraseArray(_eglModules, i, _eglFreeModule);
+ continue;
+ }
+
+ if (mod->Driver->API.Initialize(mod->Driver, dpy)) {
+ drv = mod->Driver;
+ break;
+ }
+ else {
+ i++;
+ }
+ }
+
+ return drv;
+}
+
+
+/**
+ * Match a display to a driver. The display is initialized unless test_only is
+ * true. The matching is done by finding the first driver that can initialize
+ * the display.
+ */
+_EGLDriver *
+_eglMatchDriver(_EGLDisplay *dpy, EGLBoolean test_only)
+{
+ _EGLDriver *best_drv;
+
+ assert(!dpy->Initialized);
+
+ _eglLockMutex(&_eglModuleMutex);
+
+ /* set options */
+ dpy->Options.TestOnly = test_only;
+ dpy->Options.UseFallback = EGL_FALSE;
+
+ best_drv = _eglMatchAndInitialize(dpy);
+ if (!best_drv) {
+ dpy->Options.UseFallback = EGL_TRUE;
+ best_drv = _eglMatchAndInitialize(dpy);
+ }
+
+ _eglUnlockMutex(&_eglModuleMutex);
+
+ if (best_drv) {
+ _eglLog(_EGL_DEBUG, "the best driver is %s%s",
+ best_drv->Name, (test_only) ? " (test only) " : "");
+ if (!test_only) {
+ dpy->Driver = best_drv;
+ dpy->Initialized = EGL_TRUE;
+ }
+ }
+
+ return best_drv;
+}
+
+
+__eglMustCastToProperFunctionPointerType
+_eglGetDriverProc(const char *procname)
+{
+ EGLint i;
+ _EGLProc proc = NULL;
+
+ if (!_eglModules) {
+ /* load the driver for the default display */
+ EGLDisplay egldpy = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ _EGLDisplay *dpy = _eglLookupDisplay(egldpy);
+ if (!dpy || !_eglMatchDriver(dpy, EGL_TRUE))
+ return NULL;
+ }
+
+ for (i = 0; i < _eglModules->Size; i++) {
+ _EGLModule *mod = (_EGLModule *) _eglModules->Elements[i];
+
+ if (!mod->Driver)
+ break;
+ proc = mod->Driver->API.GetProcAddress(mod->Driver, procname);
+ if (proc)
+ break;
+ }
+
+ return proc;
+}
+
+
+/**
+ * Unload all drivers.
+ */
+void
+_eglUnloadDrivers(void)
+{
+ /* this is called at atexit time */
+ if (_eglModules) {
+ _eglDestroyArray(_eglModules, _eglFreeModule);
+ _eglModules = NULL;
+ }
+}
+
+
+/**
+ * Invoke a callback function on each EGL search path.
+ *
+ * The first argument of the callback function is the name of the search path.
+ * The second argument is the length of the name.
+ */
+void
+_eglSearchPathForEach(EGLBoolean (*callback)(const char *, size_t, void *),
+ void *callback_data)
+{
+ const char *search_path = _eglGetSearchPath();
+ _eglPreloadForEach(search_path, callback, callback_data);
+}
diff --git a/main/xgldriver.h b/main/xgldriver.h
new file mode 100644
index 0000000..e34f19f
--- /dev/null
+++ b/main/xgldriver.h
@@ -0,0 +1,125 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * Copyright 2009-2010 Chia-I Wu <olvaffe@gmail.com>
+ * Copyright 2010-2011 LunarG, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef EGLDRIVER_INCLUDED
+#define EGLDRIVER_INCLUDED
+
+
+#include "egltypedefs.h"
+#include "eglapi.h"
+#include <stddef.h>
+
+/**
+ * Define an inline driver typecast function.
+ *
+ * Note that this macro defines a function and should not be ended with a
+ * semicolon when used.
+ */
+#define _EGL_DRIVER_TYPECAST(drvtype, egltype, code) \
+ static INLINE struct drvtype *drvtype(const egltype *obj) \
+ { return (struct drvtype *) code; }
+
+
+/**
+ * Define the driver typecast functions for _EGLDriver, _EGLDisplay,
+ * _EGLContext, _EGLSurface, and _EGLConfig.
+ *
+ * Note that this macro defines several functions and should not be ended with
+ * a semicolon when used.
+ */
+#define _EGL_DRIVER_STANDARD_TYPECASTS(drvname) \
+ _EGL_DRIVER_TYPECAST(drvname ## _driver, _EGLDriver, obj) \
+ /* note that this is not a direct cast */ \
+ _EGL_DRIVER_TYPECAST(drvname ## _display, _EGLDisplay, obj->DriverData) \
+ _EGL_DRIVER_TYPECAST(drvname ## _context, _EGLContext, obj) \
+ _EGL_DRIVER_TYPECAST(drvname ## _surface, _EGLSurface, obj) \
+ _EGL_DRIVER_TYPECAST(drvname ## _config, _EGLConfig, obj)
+
+
+typedef _EGLDriver *(*_EGLMain_t)(const char *args);
+
+
+/**
+ * Base class for device drivers.
+ */
+struct _egl_driver
+{
+ const char *Name; /**< name of this driver */
+
+ /**
+ * Release the driver resource.
+ *
+ * It is called before dlclose().
+ */
+ void (*Unload)(_EGLDriver *drv);
+
+ _EGLAPI API; /**< EGL API dispatch table */
+};
+
+
+extern _EGLDriver *
+_eglBuiltInDriverGALLIUM(const char *args);
+
+
+extern _EGLDriver *
+_eglBuiltInDriverDRI2(const char *args);
+
+
+extern _EGLDriver *
+_eglBuiltInDriverGLX(const char *args);
+
+
+PUBLIC _EGLDriver *
+_eglMain(const char *args);
+
+
+extern _EGLDriver *
+_eglMatchDriver(_EGLDisplay *dpy, EGLBoolean test_only);
+
+
+extern __eglMustCastToProperFunctionPointerType
+_eglGetDriverProc(const char *procname);
+
+
+extern void
+_eglUnloadDrivers(void);
+
+
+/* defined in eglfallbacks.c */
+PUBLIC void
+_eglInitDriverFallbacks(_EGLDriver *drv);
+
+
+PUBLIC void
+_eglSearchPathForEach(EGLBoolean (*callback)(const char *, size_t, void *),
+ void *callback_data);
+
+
+#endif /* EGLDRIVER_INCLUDED */
diff --git a/main/xgltypedefs.h b/main/xgltypedefs.h
new file mode 100644
index 0000000..e90959a
--- /dev/null
+++ b/main/xgltypedefs.h
@@ -0,0 +1,71 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * Copyright 2009-2010 Chia-I Wu <olvaffe@gmail.com>
+ * Copyright 2010 LunarG, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef EGLTYPEDEFS_INCLUDED
+#define EGLTYPEDEFS_INCLUDED
+
+#define EGL_EGLEXT_PROTOTYPES
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include "eglcompiler.h"
+
+typedef struct _egl_api _EGLAPI;
+
+typedef struct _egl_array _EGLArray;
+
+typedef struct _egl_config _EGLConfig;
+
+typedef struct _egl_context _EGLContext;
+
+typedef struct _egl_display _EGLDisplay;
+
+typedef struct _egl_driver _EGLDriver;
+
+typedef struct _egl_extensions _EGLExtensions;
+
+typedef struct _egl_image _EGLImage;
+
+typedef struct _egl_image_attribs _EGLImageAttribs;
+
+typedef struct _egl_mode _EGLMode;
+
+typedef struct _egl_resource _EGLResource;
+
+typedef struct _egl_screen _EGLScreen;
+
+typedef struct _egl_surface _EGLSurface;
+
+typedef struct _egl_sync _EGLSync;
+
+typedef struct _egl_thread_info _EGLThreadInfo;
+
+#endif /* EGLTYPEDEFS_INCLUDED */