Stage 1 of rename

TODO: re-enable glave build, advance API for glave

v2: get rid of outdated code in tri introduced by rebase
    rename wsi_null.c (olv)
diff --git a/icd/intel/CMakeLists.txt b/icd/intel/CMakeLists.txt
index 9cba46a..59118ba 100644
--- a/icd/intel/CMakeLists.txt
+++ b/icd/intel/CMakeLists.txt
@@ -1,6 +1,6 @@
 # Create the i965 XGL DRI library
 
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DXGL_PROTOTYPES -Wno-sign-compare")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DVK_PROTOTYPES -Wno-sign-compare")
 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare")
 
 add_subdirectory(kmd)
@@ -60,12 +60,12 @@
     list(APPEND sources wsi_null.c)
 endif()
 
-add_library(XGL_i965 SHARED ${sources})
-target_compile_definitions(XGL_i965 PRIVATE ${definitions})
-target_include_directories(XGL_i965 PRIVATE ${include_dirs})
-target_link_libraries(XGL_i965 ${libraries})
+add_library(VK_i965 SHARED ${sources})
+target_compile_definitions(VK_i965 PRIVATE ${definitions})
+target_include_directories(VK_i965 PRIVATE ${include_dirs})
+target_link_libraries(VK_i965 ${libraries})
 
 # set -Bsymbolic for xglGetProcAddr()
-set_target_properties(XGL_i965 PROPERTIES
+set_target_properties(VK_i965 PROPERTIES
     COMPILE_FLAGS "-Wmissing-declarations"
     LINK_FLAGS "-Wl,-Bsymbolic -Wl,-no-undefined -Wl,--exclude-libs,ALL")
diff --git a/icd/intel/buf.c b/icd/intel/buf.c
index 339fdad..55d1dad 100644
--- a/icd/intel/buf.c
+++ b/icd/intel/buf.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -36,18 +36,18 @@
     intel_buf_destroy(buf);
 }
 
-static XGL_RESULT buf_get_info(struct intel_base *base, int type,
+static VK_RESULT buf_get_info(struct intel_base *base, int type,
                                size_t *size, void *data)
 {
     struct intel_buf *buf = intel_buf_from_base(base);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (type) {
-    case XGL_INFO_TYPE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
         {
-            XGL_MEMORY_REQUIREMENTS *mem_req = data;
+            VK_MEMORY_REQUIREMENTS *mem_req = data;
 
-            *size = sizeof(XGL_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
 
@@ -60,19 +60,19 @@
              *      bytes added beyond that to account for the L1 cache line."
              */
             mem_req->size = buf->size;
-            if (buf->usage & XGL_BUFFER_USAGE_SHADER_ACCESS_READ_BIT)
+            if (buf->usage & VK_BUFFER_USAGE_SHADER_ACCESS_READ_BIT)
                 mem_req->size = u_align(mem_req->size, 256) + 16;
 
             mem_req->alignment = 4096;
-            mem_req->memType = XGL_MEMORY_TYPE_BUFFER;
+            mem_req->memType = VK_MEMORY_TYPE_BUFFER;
 
         }
         break;
-        case XGL_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
+        case VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
         {
-            XGL_BUFFER_MEMORY_REQUIREMENTS *buf_req = data;
+            VK_BUFFER_MEMORY_REQUIREMENTS *buf_req = data;
 
-            *size = sizeof(XGL_BUFFER_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
             buf_req->usage = buf->usage;
@@ -86,16 +86,16 @@
     return ret;
 }
 
-XGL_RESULT intel_buf_create(struct intel_dev *dev,
-                            const XGL_BUFFER_CREATE_INFO *info,
+VK_RESULT intel_buf_create(struct intel_dev *dev,
+                            const VK_BUFFER_CREATE_INFO *info,
                             struct intel_buf **buf_ret)
 {
     struct intel_buf *buf;
 
     buf = (struct intel_buf *) intel_base_create(&dev->base.handle,
-            sizeof(*buf), dev->base.dbg, XGL_DBG_OBJECT_BUFFER, info, 0);
+            sizeof(*buf), dev->base.dbg, VK_DBG_OBJECT_BUFFER, info, 0);
     if (!buf)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     buf->size = info->size;
     buf->usage = info->usage;
@@ -105,7 +105,7 @@
 
     *buf_ret = buf;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_buf_destroy(struct intel_buf *buf)
@@ -113,10 +113,10 @@
     intel_base_destroy(&buf->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateBuffer(
-    XGL_DEVICE                                  device,
-    const XGL_BUFFER_CREATE_INFO*               pCreateInfo,
-    XGL_BUFFER*                                 pBuffer)
+ICD_EXPORT VK_RESULT VKAPI vkCreateBuffer(
+    VK_DEVICE                                  device,
+    const VK_BUFFER_CREATE_INFO*               pCreateInfo,
+    VK_BUFFER*                                 pBuffer)
 {
     struct intel_dev *dev = intel_dev(device);
 
diff --git a/icd/intel/buf.h b/icd/intel/buf.h
index 9b5f08b..f570141 100644
--- a/icd/intel/buf.h
+++ b/icd/intel/buf.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -34,11 +34,11 @@
 struct intel_buf {
     struct intel_obj obj;
 
-    XGL_GPU_SIZE size;
-    XGL_FLAGS usage;
+    VK_GPU_SIZE size;
+    VK_FLAGS usage;
 };
 
-static inline struct intel_buf *intel_buf(XGL_BUFFER buf)
+static inline struct intel_buf *intel_buf(VK_BUFFER buf)
 {
     return (struct intel_buf *) buf;
 }
@@ -53,8 +53,8 @@
     return intel_buf_from_base(&obj->base);
 }
 
-XGL_RESULT intel_buf_create(struct intel_dev *dev,
-                            const XGL_BUFFER_CREATE_INFO *info,
+VK_RESULT intel_buf_create(struct intel_dev *dev,
+                            const VK_BUFFER_CREATE_INFO *info,
                             struct intel_buf **buf_ret);
 
 void intel_buf_destroy(struct intel_buf *buf);
diff --git a/icd/intel/cmd.c b/icd/intel/cmd.c
index 8edfd85..698f404 100644
--- a/icd/intel/cmd.c
+++ b/icd/intel/cmd.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -93,7 +93,7 @@
 /**
  * Allocate and map the buffer for writing.
  */
-static XGL_RESULT cmd_writer_alloc_and_map(struct intel_cmd *cmd,
+static VK_RESULT cmd_writer_alloc_and_map(struct intel_cmd *cmd,
                                            enum intel_cmd_writer_type which)
 {
     struct intel_cmd_writer *writer = &cmd->writers[which];
@@ -107,7 +107,7 @@
         /* reuse the old bo */
         cmd_writer_discard(cmd, which);
     } else {
-        return XGL_ERROR_OUT_OF_GPU_MEMORY;
+        return VK_ERROR_OUT_OF_GPU_MEMORY;
     }
 
     writer->used = 0;
@@ -115,9 +115,9 @@
 
     writer->ptr = intel_bo_map(writer->bo, true);
     if (!writer->ptr)
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 /**
@@ -152,7 +152,7 @@
     new_bo = alloc_writer_bo(cmd->dev->winsys, which, new_size);
     if (!new_bo) {
         cmd_writer_discard(cmd, which);
-        cmd_fail(cmd, XGL_ERROR_OUT_OF_GPU_MEMORY);
+        cmd_fail(cmd, VK_ERROR_OUT_OF_GPU_MEMORY);
         return;
     }
 
@@ -161,7 +161,7 @@
     if (!new_ptr) {
         intel_bo_unref(new_bo);
         cmd_writer_discard(cmd, which);
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -192,10 +192,10 @@
         struct intel_cmd_item *items;
 
         items = intel_alloc(cmd, sizeof(writer->items[0]) * new_alloc,
-                0, XGL_SYSTEM_ALLOC_DEBUG);
+                0, VK_SYSTEM_ALLOC_DEBUG);
         if (!items) {
             writer->item_used = 0;
-            cmd_fail(cmd, XGL_ERROR_OUT_OF_MEMORY);
+            cmd_fail(cmd, VK_ERROR_OUT_OF_MEMORY);
             return;
         }
 
@@ -246,7 +246,7 @@
     memset(&cmd->bind, 0, sizeof(cmd->bind));
 
     cmd->reloc_used = 0;
-    cmd->result = XGL_SUCCESS;
+    cmd->result = VK_SUCCESS;
 }
 
 static void cmd_destroy(struct intel_obj *obj)
@@ -256,8 +256,8 @@
     intel_cmd_destroy(cmd);
 }
 
-XGL_RESULT intel_cmd_create(struct intel_dev *dev,
-                            const XGL_CMD_BUFFER_CREATE_INFO *info,
+VK_RESULT intel_cmd_create(struct intel_dev *dev,
+                            const VK_CMD_BUFFER_CREATE_INFO *info,
                             struct intel_cmd **cmd_ret)
 {
     int pipeline_select;
@@ -268,14 +268,14 @@
         pipeline_select = GEN6_PIPELINE_SELECT_DW0_SELECT_3D;
         break;
     default:
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
         break;
     }
 
     cmd = (struct intel_cmd *) intel_base_create(&dev->base.handle,
-            sizeof(*cmd), dev->base.dbg, XGL_DBG_OBJECT_CMD_BUFFER, info, 0);
+            sizeof(*cmd), dev->base.dbg, VK_DBG_OBJECT_CMD_BUFFER, info, 0);
     if (!cmd)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     cmd->obj.destroy = cmd_destroy;
 
@@ -290,15 +290,15 @@
      */
     cmd->reloc_count = dev->gpu->batch_buffer_reloc_count;
     cmd->relocs = intel_alloc(cmd, sizeof(cmd->relocs[0]) * cmd->reloc_count,
-            4096, XGL_SYSTEM_ALLOC_INTERNAL);
+            4096, VK_SYSTEM_ALLOC_INTERNAL);
     if (!cmd->relocs) {
         intel_cmd_destroy(cmd);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     *cmd_ret = cmd;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_cmd_destroy(struct intel_cmd *cmd)
@@ -309,31 +309,31 @@
     intel_base_destroy(&cmd->obj.base);
 }
 
-XGL_RESULT intel_cmd_begin(struct intel_cmd *cmd, const XGL_CMD_BUFFER_BEGIN_INFO *info)
+VK_RESULT intel_cmd_begin(struct intel_cmd *cmd, const VK_CMD_BUFFER_BEGIN_INFO *info)
 {
-    const XGL_CMD_BUFFER_GRAPHICS_BEGIN_INFO *ginfo;
-    XGL_RESULT ret;
+    const VK_CMD_BUFFER_GRAPHICS_BEGIN_INFO *ginfo;
+    VK_RESULT ret;
     uint32_t i;
-    XGL_FLAGS flags = 0;
+    VK_FLAGS flags = 0;
 
     cmd_reset(cmd);
 
     while (info != NULL) {
         switch (info->sType) {
-        case XGL_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO:
+        case VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO:
             flags = info->flags;
             break;
-        case XGL_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO:
-            ginfo = (const XGL_CMD_BUFFER_GRAPHICS_BEGIN_INFO *) info;
+        case VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO:
+            ginfo = (const VK_CMD_BUFFER_GRAPHICS_BEGIN_INFO *) info;
             cmd_begin_render_pass(cmd, intel_render_pass(ginfo->renderPassContinue.renderPass),
                                   intel_fb(ginfo->renderPassContinue.framebuffer));
             break;
         default:
-            return XGL_ERROR_INVALID_VALUE;
+            return VK_ERROR_INVALID_VALUE;
             break;
         }
 
-        info = (const XGL_CMD_BUFFER_BEGIN_INFO*) info->pNext;
+        info = (const VK_CMD_BUFFER_BEGIN_INFO*) info->pNext;
     }
 
     if (cmd->flags != flags) {
@@ -345,7 +345,7 @@
         const uint32_t size = cmd->dev->gpu->max_batch_buffer_size / 2;
         uint32_t divider = 1;
 
-        if (flags & XGL_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT)
+        if (flags & VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT)
             divider *= 4;
 
         cmd->writers[INTEL_CMD_WRITER_BATCH].size = size / divider;
@@ -356,7 +356,7 @@
 
     for (i = 0; i < INTEL_CMD_WRITER_COUNT; i++) {
         ret = cmd_writer_alloc_and_map(cmd, i);
-        if (ret != XGL_SUCCESS) {
+        if (ret != VK_SUCCESS) {
             cmd_reset(cmd);
             return  ret;
         }
@@ -364,17 +364,17 @@
 
     cmd_batch_begin(cmd);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_cmd_end(struct intel_cmd *cmd)
+VK_RESULT intel_cmd_end(struct intel_cmd *cmd)
 {
     struct intel_winsys *winsys = cmd->dev->winsys;
     uint32_t i;
 
     /* no matching intel_cmd_begin() */
     if (!cmd->writers[INTEL_CMD_WRITER_BATCH].ptr)
-        return XGL_ERROR_INCOMPLETE_COMMAND_BUFFER;
+        return VK_ERROR_INCOMPLETE_COMMAND_BUFFER;
 
     cmd_batch_end(cmd);
 
@@ -398,7 +398,7 @@
                 (struct intel_bo *) reloc->target, reloc->target_offset,
                 reloc->flags, &presumed_offset);
         if (err) {
-            cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+            cmd_fail(cmd, VK_ERROR_UNKNOWN);
             break;
         }
 
@@ -420,7 +420,7 @@
                 reloc->flags & ~INTEL_CMD_RELOC_TARGET_IS_WRITER,
                 &presumed_offset);
         if (err) {
-            cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+            cmd_fail(cmd, VK_ERROR_UNKNOWN);
             break;
         }
 
@@ -432,20 +432,20 @@
     for (i = 0; i < INTEL_CMD_WRITER_COUNT; i++)
         cmd_writer_unmap(cmd, i);
 
-    if (cmd->result != XGL_SUCCESS)
+    if (cmd->result != VK_SUCCESS)
         return cmd->result;
 
     if (intel_winsys_can_submit_bo(winsys,
                 &cmd->writers[INTEL_CMD_WRITER_BATCH].bo, 1))
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
     else
-        return XGL_ERROR_TOO_MANY_MEMORY_REFERENCES;
+        return VK_ERROR_TOO_MANY_MEMORY_REFERENCES;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateCommandBuffer(
-    XGL_DEVICE                                  device,
-    const XGL_CMD_BUFFER_CREATE_INFO*           pCreateInfo,
-    XGL_CMD_BUFFER*                             pCmdBuffer)
+ICD_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(
+    VK_DEVICE                                  device,
+    const VK_CMD_BUFFER_CREATE_INFO*           pCreateInfo,
+    VK_CMD_BUFFER*                             pCmdBuffer)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -453,69 +453,69 @@
             (struct intel_cmd **) pCmdBuffer);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglBeginCommandBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    const XGL_CMD_BUFFER_BEGIN_INFO            *info)
+ICD_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    const VK_CMD_BUFFER_BEGIN_INFO            *info)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
     return intel_cmd_begin(cmd, info);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglEndCommandBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer)
+ICD_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(
+    VK_CMD_BUFFER                              cmdBuffer)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
     return intel_cmd_end(cmd);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglResetCommandBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer)
+ICD_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(
+    VK_CMD_BUFFER                              cmdBuffer)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
     cmd_reset(cmd);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT void XGLAPI xglCmdInitAtomicCounters(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_PIPELINE_BIND_POINT                     pipelineBindPoint,
+ICD_EXPORT void VKAPI vkCmdInitAtomicCounters(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_PIPELINE_BIND_POINT                     pipelineBindPoint,
     uint32_t                                    startCounter,
     uint32_t                                    counterCount,
     const uint32_t*                             pData)
 {
 }
 
-ICD_EXPORT void XGLAPI xglCmdLoadAtomicCounters(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_PIPELINE_BIND_POINT                     pipelineBindPoint,
+ICD_EXPORT void VKAPI vkCmdLoadAtomicCounters(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_PIPELINE_BIND_POINT                     pipelineBindPoint,
     uint32_t                                    startCounter,
     uint32_t                                    counterCount,
-    XGL_BUFFER                                  srcBuffer,
-    XGL_GPU_SIZE                                srcOffset)
+    VK_BUFFER                                  srcBuffer,
+    VK_GPU_SIZE                                srcOffset)
 {
 }
 
-ICD_EXPORT void XGLAPI xglCmdSaveAtomicCounters(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_PIPELINE_BIND_POINT                     pipelineBindPoint,
+ICD_EXPORT void VKAPI vkCmdSaveAtomicCounters(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_PIPELINE_BIND_POINT                     pipelineBindPoint,
     uint32_t                                    startCounter,
     uint32_t                                    counterCount,
-    XGL_BUFFER                                  destBuffer,
-    XGL_GPU_SIZE                                destOffset)
+    VK_BUFFER                                  destBuffer,
+    VK_GPU_SIZE                                destOffset)
 {
 }
 
-ICD_EXPORT void XGLAPI xglCmdDbgMarkerBegin(
-    XGL_CMD_BUFFER                              cmdBuffer,
+ICD_EXPORT void VKAPI vkCmdDbgMarkerBegin(
+    VK_CMD_BUFFER                              cmdBuffer,
     const char*                                 pMarker)
 {
 }
 
-ICD_EXPORT void XGLAPI xglCmdDbgMarkerEnd(
-    XGL_CMD_BUFFER                              cmdBuffer)
+ICD_EXPORT void VKAPI vkCmdDbgMarkerEnd(
+    VK_CMD_BUFFER                              cmdBuffer)
 {
 }
diff --git a/icd/intel/cmd.h b/icd/intel/cmd.h
index 2379b3a..a832ef5 100644
--- a/icd/intel/cmd.h
+++ b/icd/intel/cmd.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -179,13 +179,13 @@
 
     struct {
         const struct intel_buf *buf[INTEL_MAX_VERTEX_BINDING_COUNT];
-        XGL_GPU_SIZE offset[INTEL_MAX_VERTEX_BINDING_COUNT];
+        VK_GPU_SIZE offset[INTEL_MAX_VERTEX_BINDING_COUNT];
     } vertex;
 
     struct {
         const struct intel_buf *buf;
-        XGL_GPU_SIZE offset;
-        XGL_INDEX_TYPE type;
+        VK_GPU_SIZE offset;
+        VK_INDEX_TYPE type;
     } index;
 
 
@@ -222,17 +222,17 @@
     struct intel_cmd_reloc *relocs;
     uint32_t reloc_count;
 
-    XGL_FLAGS flags;
+    VK_FLAGS flags;
 
     struct intel_cmd_writer writers[INTEL_CMD_WRITER_COUNT];
 
     uint32_t reloc_used;
-    XGL_RESULT result;
+    VK_RESULT result;
 
     struct intel_cmd_bind bind;
 };
 
-static inline struct intel_cmd *intel_cmd(XGL_CMD_BUFFER cmd)
+static inline struct intel_cmd *intel_cmd(VK_CMD_BUFFER cmd)
 {
     return (struct intel_cmd *) cmd;
 }
@@ -242,18 +242,18 @@
     return (struct intel_cmd *) obj;
 }
 
-XGL_RESULT intel_cmd_create(struct intel_dev *dev,
-                            const XGL_CMD_BUFFER_CREATE_INFO *info,
+VK_RESULT intel_cmd_create(struct intel_dev *dev,
+                            const VK_CMD_BUFFER_CREATE_INFO *info,
                             struct intel_cmd **cmd_ret);
 void intel_cmd_destroy(struct intel_cmd *cmd);
 
-XGL_RESULT intel_cmd_begin(struct intel_cmd *cmd, const XGL_CMD_BUFFER_BEGIN_INFO* pBeginInfo);
-XGL_RESULT intel_cmd_end(struct intel_cmd *cmd);
+VK_RESULT intel_cmd_begin(struct intel_cmd *cmd, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo);
+VK_RESULT intel_cmd_end(struct intel_cmd *cmd);
 
 void intel_cmd_decode(struct intel_cmd *cmd, bool decode_inst_writer);
 
 static inline struct intel_bo *intel_cmd_get_batch(const struct intel_cmd *cmd,
-                                                   XGL_GPU_SIZE *used)
+                                                   VK_GPU_SIZE *used)
 {
     const struct intel_cmd_writer *writer =
         &cmd->writers[INTEL_CMD_WRITER_BATCH];
diff --git a/icd/intel/cmd_barrier.c b/icd/intel/cmd_barrier.c
index da46bca..f3868e7 100644
--- a/icd/intel/cmd_barrier.c
+++ b/icd/intel/cmd_barrier.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -45,36 +45,36 @@
 };
 
 static uint32_t img_get_layout_ops(const struct intel_img *img,
-                                   XGL_IMAGE_LAYOUT layout)
+                                   VK_IMAGE_LAYOUT layout)
 {
     uint32_t ops;
 
     switch (layout) {
-    case XGL_IMAGE_LAYOUT_GENERAL:
+    case VK_IMAGE_LAYOUT_GENERAL:
         ops = READ_OP | WRITE_OP;
         break;
-    case XGL_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
         ops = READ_OP | WRITE_OP;
         break;
-    case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
         ops = READ_OP | WRITE_OP | HIZ_OP;
         break;
-    case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
         ops = READ_OP | HIZ_OP;
         break;
-    case XGL_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
         ops = READ_OP;
         break;
-    case XGL_IMAGE_LAYOUT_CLEAR_OPTIMAL:
+    case VK_IMAGE_LAYOUT_CLEAR_OPTIMAL:
         ops = WRITE_OP | HIZ_OP;
         break;
-    case XGL_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
+    case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
         ops = READ_OP;
         break;
-    case XGL_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
+    case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
         ops = WRITE_OP;
         break;
-    case XGL_IMAGE_LAYOUT_UNDEFINED:
+    case VK_IMAGE_LAYOUT_UNDEFINED:
     default:
         ops = 0;
         break;
@@ -84,40 +84,40 @@
 }
 
 static uint32_t img_get_layout_caches(const struct intel_img *img,
-                                     XGL_IMAGE_LAYOUT layout)
+                                     VK_IMAGE_LAYOUT layout)
 {
     uint32_t caches;
 
     switch (layout) {
-    case XGL_IMAGE_LAYOUT_GENERAL:
+    case VK_IMAGE_LAYOUT_GENERAL:
         // General layout when image can be used for any kind of access
         caches = MEM_CACHE | DATA_READ_CACHE | DATA_WRITE_CACHE | RENDER_CACHE | SAMPLER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
         // Optimal layout when image is only used for color attachment read/write
         caches = DATA_WRITE_CACHE | RENDER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
         // Optimal layout when image is only used for depth/stencil attachment read/write
         caches = DATA_WRITE_CACHE | RENDER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
         // Optimal layout when image is used for read only depth/stencil attachment and shader access
         caches = RENDER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
         // Optimal layout when image is used for read only shader access
         caches = DATA_READ_CACHE | SAMPLER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_CLEAR_OPTIMAL:
+    case VK_IMAGE_LAYOUT_CLEAR_OPTIMAL:
         // Optimal layout when image is used only for clear operations
         caches = RENDER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
+    case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
         // Optimal layout when image is used only as source of transfer operations
         caches = MEM_CACHE | DATA_READ_CACHE | RENDER_CACHE | SAMPLER_CACHE;
         break;
-    case XGL_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
+    case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
         // Optimal layout when image is used only as destination of transfer operations
         caches = MEM_CACHE | DATA_WRITE_CACHE | RENDER_CACHE;
         break;
@@ -131,9 +131,9 @@
 
 static void cmd_resolve_depth(struct intel_cmd *cmd,
                               struct intel_img *img,
-                              XGL_IMAGE_LAYOUT old_layout,
-                              XGL_IMAGE_LAYOUT new_layout,
-                              const XGL_IMAGE_SUBRESOURCE_RANGE *range)
+                              VK_IMAGE_LAYOUT old_layout,
+                              VK_IMAGE_LAYOUT new_layout,
+                              const VK_IMAGE_SUBRESOURCE_RANGE *range)
 {
     const uint32_t old_ops = img_get_layout_ops(img, old_layout);
     const uint32_t new_ops = img_get_layout_ops(img, new_layout);
@@ -190,30 +190,30 @@
                                 const void** memory_barriers)
 {
     uint32_t i;
-    XGL_FLAGS input_mask = 0;
-    XGL_FLAGS output_mask = 0;
+    VK_FLAGS input_mask = 0;
+    VK_FLAGS output_mask = 0;
 
     for (i = 0; i < memory_barrier_count; i++) {
 
         const union {
-            XGL_STRUCTURE_TYPE type;
+            VK_STRUCTURE_TYPE type;
 
-            XGL_MEMORY_BARRIER mem;
-            XGL_BUFFER_MEMORY_BARRIER buf;
-            XGL_IMAGE_MEMORY_BARRIER img;
+            VK_MEMORY_BARRIER mem;
+            VK_BUFFER_MEMORY_BARRIER buf;
+            VK_IMAGE_MEMORY_BARRIER img;
         } *u = memory_barriers[i];
 
         switch(u->type)
         {
-        case XGL_STRUCTURE_TYPE_MEMORY_BARRIER:
+        case VK_STRUCTURE_TYPE_MEMORY_BARRIER:
             output_mask |= u->mem.outputMask;
             input_mask  |= u->mem.inputMask;
             break;
-        case XGL_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER:
+        case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER:
             output_mask |= u->buf.outputMask;
             input_mask  |= u->buf.inputMask;
             break;
-        case XGL_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER:
+        case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER:
             output_mask |= u->img.outputMask;
             input_mask  |= u->img.inputMask;
             {
@@ -233,58 +233,58 @@
         }
     }
 
-    if (output_mask & XGL_MEMORY_OUTPUT_SHADER_WRITE_BIT) {
+    if (output_mask & VK_MEMORY_OUTPUT_SHADER_WRITE_BIT) {
         flush_flags |= GEN7_PIPE_CONTROL_DC_FLUSH;
     }
-    if (output_mask & XGL_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT) {
+    if (output_mask & VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT) {
         flush_flags |= GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH;
     }
-    if (output_mask & XGL_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT) {
+    if (output_mask & VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT) {
         flush_flags |= GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH;
     }
 
-    /* CPU write is cache coherent, so XGL_MEMORY_OUTPUT_CPU_WRITE_BIT needs no flush. */
-    /* Meta handles flushes, so XGL_MEMORY_OUTPUT_COPY_BIT needs no flush. */
+    /* CPU write is cache coherent, so VK_MEMORY_OUTPUT_CPU_WRITE_BIT needs no flush. */
+    /* Meta handles flushes, so VK_MEMORY_OUTPUT_COPY_BIT needs no flush. */
 
-    if (input_mask & (XGL_MEMORY_INPUT_SHADER_READ_BIT | XGL_MEMORY_INPUT_UNIFORM_READ_BIT)) {
+    if (input_mask & (VK_MEMORY_INPUT_SHADER_READ_BIT | VK_MEMORY_INPUT_UNIFORM_READ_BIT)) {
         flush_flags |= GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
     }
 
-    if (input_mask & XGL_MEMORY_INPUT_UNIFORM_READ_BIT) {
+    if (input_mask & VK_MEMORY_INPUT_UNIFORM_READ_BIT) {
         flush_flags |= GEN6_PIPE_CONTROL_CONSTANT_CACHE_INVALIDATE;
     }
 
-    if (input_mask & XGL_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT) {
+    if (input_mask & VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT) {
         flush_flags |= GEN6_PIPE_CONTROL_VF_CACHE_INVALIDATE;
     }
 
     /* These bits have no corresponding cache invalidate operation.
-     * XGL_MEMORY_INPUT_CPU_READ_BIT
-     * XGL_MEMORY_INPUT_INDIRECT_COMMAND_BIT
-     * XGL_MEMORY_INPUT_INDEX_FETCH_BIT
-     * XGL_MEMORY_INPUT_COLOR_ATTACHMENT_BIT
-     * XGL_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT
-     * XGL_MEMORY_INPUT_COPY_BIT
+     * VK_MEMORY_INPUT_CPU_READ_BIT
+     * VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT
+     * VK_MEMORY_INPUT_INDEX_FETCH_BIT
+     * VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT
+     * VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT
+     * VK_MEMORY_INPUT_COPY_BIT
      */
 
     cmd_batch_flush(cmd, flush_flags);
 }
 
-ICD_EXPORT void XGLAPI xglCmdWaitEvents(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    const XGL_EVENT_WAIT_INFO*                  pWaitInfo)
+ICD_EXPORT void VKAPI vkCmdWaitEvents(
+    VK_CMD_BUFFER                              cmdBuffer,
+    const VK_EVENT_WAIT_INFO*                  pWaitInfo)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
-    /* This hardware will always wait at XGL_WAIT_EVENT_TOP_OF_PIPE.
-     * Passing a pWaitInfo->waitEvent of XGL_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
+    /* This hardware will always wait at VK_WAIT_EVENT_TOP_OF_PIPE.
+     * Passing a pWaitInfo->waitEvent of VK_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
      * does not change that.
      */
 
     /* Because the command buffer is serialized, reaching
      * a pipelined wait is always after completion of prior events.
      * pWaitInfo->pEvents need not be examined.
-     * xglCmdWaitEvents is equivalent to memory barrier part of xglCmdPipelineBarrier.
+     * vkCmdWaitEvents is equivalent to memory barrier part of vkCmdPipelineBarrier.
      * cmd_memory_barriers will wait for GEN6_PIPE_CONTROL_CS_STALL and perform
      * appropriate cache control.
      */
@@ -293,39 +293,39 @@
             pWaitInfo->memBarrierCount, pWaitInfo->ppMemBarriers);
 }
 
-ICD_EXPORT void XGLAPI xglCmdPipelineBarrier(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    const XGL_PIPELINE_BARRIER*                 pBarrier)
+ICD_EXPORT void VKAPI vkCmdPipelineBarrier(
+    VK_CMD_BUFFER                              cmdBuffer,
+    const VK_PIPELINE_BARRIER*                 pBarrier)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     uint32_t pipe_control_flags = 0;
     uint32_t i;
 
-    /* This hardware will always wait at XGL_WAIT_EVENT_TOP_OF_PIPE.
-     * Passing a pBarrier->waitEvent of XGL_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
+    /* This hardware will always wait at VK_WAIT_EVENT_TOP_OF_PIPE.
+     * Passing a pBarrier->waitEvent of VK_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
      * does not change that.
      */
 
     /* Cache control is done with PIPE_CONTROL flags.
-     * With no GEN6_PIPE_CONTROL_CS_STALL flag set, it behaves as XGL_PIPE_EVENT_TOP_OF_PIPE.
-     * All other pEvents values will behave as XGL_PIPE_EVENT_GPU_COMMANDS_COMPLETE.
+     * With no GEN6_PIPE_CONTROL_CS_STALL flag set, it behaves as VK_PIPE_EVENT_TOP_OF_PIPE.
+     * All other pEvents values will behave as VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE.
      */
     for (i = 0; i < pBarrier->eventCount; i++) {
         switch(pBarrier->pEvents[i])
         {
-        case XGL_PIPE_EVENT_TOP_OF_PIPE:
+        case VK_PIPE_EVENT_TOP_OF_PIPE:
             break;
-        case XGL_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE:
-        case XGL_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE:
-        case XGL_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE:
-        case XGL_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE:
-        case XGL_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE:
-        case XGL_PIPE_EVENT_TRANSFER_COMPLETE:
-        case XGL_PIPE_EVENT_GPU_COMMANDS_COMPLETE:
+        case VK_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE:
+        case VK_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE:
+        case VK_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE:
+        case VK_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE:
+        case VK_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE:
+        case VK_PIPE_EVENT_TRANSFER_COMPLETE:
+        case VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE:
             pipe_control_flags |= GEN6_PIPE_CONTROL_CS_STALL;
             break;
         default:
-            cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+            cmd_fail(cmd, VK_ERROR_UNKNOWN);
             return;
             break;
         }
diff --git a/icd/intel/cmd_decode.c b/icd/intel/cmd_decode.c
index 944ef9f..333635d 100644
--- a/icd/intel/cmd_decode.c
+++ b/icd/intel/cmd_decode.c
@@ -604,7 +604,7 @@
 {
     int i;
 
-    assert(cmd->result == XGL_SUCCESS);
+    assert(cmd->result == VK_SUCCESS);
 
     for (i = 0; i < INTEL_CMD_WRITER_COUNT; i++)
         cmd_writer_decode(cmd, i, decode_inst_writer);
diff --git a/icd/intel/cmd_meta.c b/icd/intel/cmd_meta.c
index fb454da..ee042bd 100644
--- a/icd/intel/cmd_meta.c
+++ b/icd/intel/cmd_meta.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -31,19 +31,19 @@
 #include "state.h"
 #include "cmd_priv.h"
 
-static XGL_RESULT cmd_meta_create_buf_view(struct intel_cmd *cmd,
-                                           XGL_BUFFER buf,
-                                           XGL_GPU_SIZE range,
-                                           XGL_FORMAT format,
+static VK_RESULT cmd_meta_create_buf_view(struct intel_cmd *cmd,
+                                           VK_BUFFER buf,
+                                           VK_GPU_SIZE range,
+                                           VK_FORMAT format,
                                            struct intel_buf_view **view)
 {
-    XGL_BUFFER_VIEW_CREATE_INFO info;
-    XGL_GPU_SIZE stride;
+    VK_BUFFER_VIEW_CREATE_INFO info;
+    VK_GPU_SIZE stride;
 
     memset(&info, 0, sizeof(info));
-    info.sType = XGL_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
+    info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
     info.buffer = buf;
-    info.viewType = XGL_BUFFER_VIEW_TYPED;
+    info.viewType = VK_BUFFER_VIEW_TYPED;
     info.format = format;
     info.range = range;
 
@@ -60,15 +60,15 @@
 
 static void cmd_meta_set_src_for_buf(struct intel_cmd *cmd,
                                      const struct intel_buf *buf,
-                                     XGL_FORMAT format,
+                                     VK_FORMAT format,
                                      struct intel_cmd_meta *meta)
 {
     struct intel_buf_view *view;
-    XGL_RESULT res;
+    VK_RESULT res;
 
-    res = cmd_meta_create_buf_view(cmd, (XGL_BUFFER) buf,
+    res = cmd_meta_create_buf_view(cmd, (VK_BUFFER) buf,
             buf->size, format, &view);
-    if (res != XGL_SUCCESS) {
+    if (res != VK_SUCCESS) {
         cmd_fail(cmd, res);
         return;
     }
@@ -88,15 +88,15 @@
 
 static void cmd_meta_set_dst_for_buf(struct intel_cmd *cmd,
                                      const struct intel_buf *buf,
-                                     XGL_FORMAT format,
+                                     VK_FORMAT format,
                                      struct intel_cmd_meta *meta)
 {
     struct intel_buf_view *view;
-    XGL_RESULT res;
+    VK_RESULT res;
 
-    res = cmd_meta_create_buf_view(cmd, (XGL_BUFFER) buf,
+    res = cmd_meta_create_buf_view(cmd, (VK_BUFFER) buf,
             buf->size, format, &view);
-    if (res != XGL_SUCCESS) {
+    if (res != VK_SUCCESS) {
         cmd_fail(cmd, res);
         return;
     }
@@ -116,45 +116,45 @@
 
 static void cmd_meta_set_src_for_img(struct intel_cmd *cmd,
                                      const struct intel_img *img,
-                                     XGL_FORMAT format,
-                                     XGL_IMAGE_ASPECT aspect,
+                                     VK_FORMAT format,
+                                     VK_IMAGE_ASPECT aspect,
                                      struct intel_cmd_meta *meta)
 {
-    XGL_IMAGE_VIEW_CREATE_INFO info;
+    VK_IMAGE_VIEW_CREATE_INFO info;
     struct intel_img_view *view;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     memset(&info, 0, sizeof(info));
-    info.sType = XGL_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
-    info.image = (XGL_IMAGE) img;
+    info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+    info.image = (VK_IMAGE) img;
 
     switch (img->type) {
-    case XGL_IMAGE_1D:
-        info.viewType = XGL_IMAGE_VIEW_1D;
+    case VK_IMAGE_1D:
+        info.viewType = VK_IMAGE_VIEW_1D;
         break;
-    case XGL_IMAGE_2D:
-        info.viewType = XGL_IMAGE_VIEW_2D;
+    case VK_IMAGE_2D:
+        info.viewType = VK_IMAGE_VIEW_2D;
         break;
-    case XGL_IMAGE_3D:
-        info.viewType = XGL_IMAGE_VIEW_3D;
+    case VK_IMAGE_3D:
+        info.viewType = VK_IMAGE_VIEW_3D;
         break;
     default:
         break;
     }
 
     info.format = format;
-    info.channels.r = XGL_CHANNEL_SWIZZLE_R;
-    info.channels.g = XGL_CHANNEL_SWIZZLE_G;
-    info.channels.b = XGL_CHANNEL_SWIZZLE_B;
-    info.channels.a = XGL_CHANNEL_SWIZZLE_A;
+    info.channels.r = VK_CHANNEL_SWIZZLE_R;
+    info.channels.g = VK_CHANNEL_SWIZZLE_G;
+    info.channels.b = VK_CHANNEL_SWIZZLE_B;
+    info.channels.a = VK_CHANNEL_SWIZZLE_A;
     info.subresourceRange.aspect = aspect;
     info.subresourceRange.baseMipLevel = 0;
-    info.subresourceRange.mipLevels = XGL_LAST_MIP_OR_SLICE;
+    info.subresourceRange.mipLevels = VK_LAST_MIP_OR_SLICE;
     info.subresourceRange.baseArraySlice = 0;
-    info.subresourceRange.arraySize = XGL_LAST_MIP_OR_SLICE;
+    info.subresourceRange.arraySize = VK_LAST_MIP_OR_SLICE;
 
     ret = intel_img_view_create(cmd->dev, &info, &view);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         cmd_fail(cmd, ret);
         return;
     }
@@ -236,24 +236,24 @@
 
 static void cmd_meta_set_dst_for_img(struct intel_cmd *cmd,
                                      const struct intel_img *img,
-                                     XGL_FORMAT format,
+                                     VK_FORMAT format,
                                      uint32_t lod, uint32_t layer,
                                      struct intel_cmd_meta *meta)
 {
-    XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO info;
+    VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO info;
     struct intel_rt_view *rt;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     memset(&info, 0, sizeof(info));
-    info.sType = XGL_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO;
-    info.image = (XGL_IMAGE) img;
+    info.sType = VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO;
+    info.image = (VK_IMAGE) img;
     info.format = format;
     info.mipLevel = lod;
     info.baseArraySlice = layer;
     info.arraySize = 1;
 
     ret = intel_rt_view_create(cmd->dev, &info, &rt);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         cmd_fail(cmd, ret);
         return;
     }
@@ -275,16 +275,16 @@
 
 static void cmd_meta_set_src_for_writer(struct intel_cmd *cmd,
                                         enum intel_cmd_writer_type writer,
-                                        XGL_GPU_SIZE size,
-                                        XGL_FORMAT format,
+                                        VK_GPU_SIZE size,
+                                        VK_FORMAT format,
                                         struct intel_cmd_meta *meta)
 {
     struct intel_buf_view *view;
-    XGL_RESULT res;
+    VK_RESULT res;
 
-    res = cmd_meta_create_buf_view(cmd, (XGL_BUFFER) XGL_NULL_HANDLE,
+    res = cmd_meta_create_buf_view(cmd, (VK_BUFFER) VK_NULL_HANDLE,
             size, format, &view);
-    if (res != XGL_SUCCESS) {
+    if (res != VK_SUCCESS) {
         cmd_fail(cmd, res);
         return;
     }
@@ -307,19 +307,19 @@
                                  uint32_t lod, uint32_t layer,
                                  struct intel_cmd_meta *meta)
 {
-    XGL_DEPTH_STENCIL_VIEW_CREATE_INFO info;
+    VK_DEPTH_STENCIL_VIEW_CREATE_INFO info;
     struct intel_ds_view *ds;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     memset(&info, 0, sizeof(info));
-    info.sType = XGL_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO;
-    info.image = (XGL_IMAGE) img;
+    info.sType = VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO;
+    info.image = (VK_IMAGE) img;
     info.mipLevel = lod;
     info.baseArraySlice = layer;
     info.arraySize = 1;
 
     ret = intel_ds_view_create(cmd->dev, &info, &ds);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         cmd_fail(cmd, ret);
         return;
     }
@@ -328,7 +328,7 @@
 }
 
 static void cmd_meta_set_ds_state(struct intel_cmd *cmd,
-                                  XGL_IMAGE_ASPECT aspect,
+                                  VK_IMAGE_ASPECT aspect,
                                   uint32_t stencil_ref,
                                   struct intel_cmd_meta *meta)
 {
@@ -343,16 +343,16 @@
     enum intel_dev_meta_shader shader_id;
 
     switch (img->type) {
-    case XGL_IMAGE_1D:
+    case VK_IMAGE_1D:
         shader_id = (copy_array) ?
             INTEL_DEV_META_FS_COPY_1D_ARRAY : INTEL_DEV_META_FS_COPY_1D;
         break;
-    case XGL_IMAGE_2D:
+    case VK_IMAGE_2D:
         shader_id = (img->samples > 1) ? INTEL_DEV_META_FS_COPY_2D_MS :
                     (copy_array) ?  INTEL_DEV_META_FS_COPY_2D_ARRAY :
                     INTEL_DEV_META_FS_COPY_2D;
         break;
-    case XGL_IMAGE_3D:
+    case VK_IMAGE_3D:
     default:
         shader_id = INTEL_DEV_META_FS_COPY_2D_ARRAY;
         break;
@@ -362,53 +362,53 @@
 }
 
 static bool cmd_meta_mem_dword_aligned(const struct intel_cmd *cmd,
-                                       XGL_GPU_SIZE src_offset,
-                                       XGL_GPU_SIZE dst_offset,
-                                       XGL_GPU_SIZE size)
+                                       VK_GPU_SIZE src_offset,
+                                       VK_GPU_SIZE dst_offset,
+                                       VK_GPU_SIZE size)
 {
     return !((src_offset | dst_offset | size) & 0x3);
 }
 
-static XGL_FORMAT cmd_meta_img_raw_format(const struct intel_cmd *cmd,
-                                          XGL_FORMAT format)
+static VK_FORMAT cmd_meta_img_raw_format(const struct intel_cmd *cmd,
+                                          VK_FORMAT format)
 {
     switch (icd_format_get_size(format)) {
     case 1:
-        format = XGL_FMT_R8_UINT;
+        format = VK_FMT_R8_UINT;
         break;
     case 2:
-        format = XGL_FMT_R16_UINT;
+        format = VK_FMT_R16_UINT;
         break;
     case 4:
-        format = XGL_FMT_R32_UINT;
+        format = VK_FMT_R32_UINT;
         break;
     case 8:
-        format = XGL_FMT_R32G32_UINT;
+        format = VK_FMT_R32G32_UINT;
         break;
     case 16:
-        format = XGL_FMT_R32G32B32A32_UINT;
+        format = VK_FMT_R32G32B32A32_UINT;
         break;
     default:
         assert(!"unsupported image format for raw blit op");
-        format = XGL_FMT_UNDEFINED;
+        format = VK_FMT_UNDEFINED;
         break;
     }
 
     return format;
 }
 
-ICD_EXPORT void XGLAPI xglCmdCopyBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  srcBuffer,
-    XGL_BUFFER                                  destBuffer,
+ICD_EXPORT void VKAPI vkCmdCopyBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  srcBuffer,
+    VK_BUFFER                                  destBuffer,
     uint32_t                                    regionCount,
-    const XGL_BUFFER_COPY*                      pRegions)
+    const VK_BUFFER_COPY*                      pRegions)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_buf *src = intel_buf(srcBuffer);
     struct intel_buf *dst = intel_buf(destBuffer);
     struct intel_cmd_meta meta;
-    XGL_FORMAT format;
+    VK_FORMAT format;
     uint32_t i;
 
     memset(&meta, 0, sizeof(meta));
@@ -417,11 +417,11 @@
     meta.height = 1;
     meta.samples = 1;
 
-    format = XGL_FMT_UNDEFINED;
+    format = VK_FMT_UNDEFINED;
 
     for (i = 0; i < regionCount; i++) {
-        const XGL_BUFFER_COPY *region = &pRegions[i];
-        XGL_FORMAT fmt;
+        const VK_BUFFER_COPY *region = &pRegions[i];
+        VK_FORMAT fmt;
 
         meta.src.x = region->srcOffset;
         meta.dst.x = region->destOffset;
@@ -438,13 +438,13 @@
              * INTEL_DEV_META_VS_COPY_MEM is untyped but expects the stride to
              * be 16
              */
-            fmt = XGL_FMT_R32G32B32A32_UINT;
+            fmt = VK_FMT_R32G32B32A32_UINT;
         } else {
             if (cmd_gen(cmd) == INTEL_GEN(6)) {
-                intel_dev_log(cmd->dev, XGL_DBG_MSG_ERROR,
-                        XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
-                        "unaligned xglCmdCopyBuffer unsupported");
-                cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+                intel_dev_log(cmd->dev, VK_DBG_MSG_ERROR,
+                        VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
+                        "unaligned vkCmdCopyBuffer unsupported");
+                cmd_fail(cmd, VK_ERROR_UNKNOWN);
                 continue;
             }
 
@@ -454,7 +454,7 @@
              * INTEL_DEV_META_VS_COPY_MEM_UNALIGNED is untyped but expects the
              * stride to be 4
              */
-            fmt = XGL_FMT_R8G8B8A8_UINT;
+            fmt = VK_FMT_R8G8B8A8_UINT;
         }
 
         if (format != fmt) {
@@ -468,25 +468,25 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdCopyImage(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   srcImage,
-    XGL_IMAGE_LAYOUT                            srcImageLayout,
-    XGL_IMAGE                                   destImage,
-    XGL_IMAGE_LAYOUT                            destImageLayout,
+ICD_EXPORT void VKAPI vkCmdCopyImage(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   srcImage,
+    VK_IMAGE_LAYOUT                            srcImageLayout,
+    VK_IMAGE                                   destImage,
+    VK_IMAGE_LAYOUT                            destImageLayout,
     uint32_t                                    regionCount,
-    const XGL_IMAGE_COPY*                       pRegions)
+    const VK_IMAGE_COPY*                       pRegions)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_img *src = intel_img(srcImage);
     struct intel_img *dst = intel_img(destImage);
     struct intel_cmd_meta meta;
-    XGL_FORMAT raw_format;
+    VK_FORMAT raw_format;
     bool raw_copy = false;
     uint32_t i;
 
     if (src->type != dst->type) {
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -495,7 +495,7 @@
         raw_format = cmd_meta_img_raw_format(cmd, src->layout.format);
     } else if (icd_format_is_compressed(src->layout.format) ||
                icd_format_is_compressed(dst->layout.format)) {
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -504,12 +504,12 @@
 
     cmd_meta_set_src_for_img(cmd, src,
             (raw_copy) ? raw_format : src->layout.format,
-            XGL_IMAGE_ASPECT_COLOR, &meta);
+            VK_IMAGE_ASPECT_COLOR, &meta);
 
     meta.samples = dst->samples;
 
     for (i = 0; i < regionCount; i++) {
-        const XGL_IMAGE_COPY *region = &pRegions[i];
+        const VK_IMAGE_COPY *region = &pRegions[i];
         uint32_t j;
 
         meta.shader_id = get_shader_id(cmd->dev, src,
@@ -555,36 +555,36 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdBlitImage(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   srcImage,
-    XGL_IMAGE_LAYOUT                            srcImageLayout,
-    XGL_IMAGE                                   destImage,
-    XGL_IMAGE_LAYOUT                            destImageLayout,
+ICD_EXPORT void VKAPI vkCmdBlitImage(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   srcImage,
+    VK_IMAGE_LAYOUT                            srcImageLayout,
+    VK_IMAGE                                   destImage,
+    VK_IMAGE_LAYOUT                            destImageLayout,
     uint32_t                                    regionCount,
-    const XGL_IMAGE_BLIT*                       pRegions)
+    const VK_IMAGE_BLIT*                       pRegions)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
     /*
      * TODO: Implement actual blit function.
      */
-    cmd_fail(cmd, XGL_ERROR_UNAVAILABLE);
+    cmd_fail(cmd, VK_ERROR_UNAVAILABLE);
 }
 
-ICD_EXPORT void XGLAPI xglCmdCopyBufferToImage(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  srcBuffer,
-    XGL_IMAGE                                   destImage,
-    XGL_IMAGE_LAYOUT                            destImageLayout,
+ICD_EXPORT void VKAPI vkCmdCopyBufferToImage(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  srcBuffer,
+    VK_IMAGE                                   destImage,
+    VK_IMAGE_LAYOUT                            destImageLayout,
     uint32_t                                    regionCount,
-    const XGL_BUFFER_IMAGE_COPY*                pRegions)
+    const VK_BUFFER_IMAGE_COPY*                pRegions)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_buf *buf = intel_buf(srcBuffer);
     struct intel_img *img = intel_img(destImage);
     struct intel_cmd_meta meta;
-    XGL_FORMAT format;
+    VK_FORMAT format;
     uint32_t block_width, i;
 
     memset(&meta, 0, sizeof(meta));
@@ -598,7 +598,7 @@
     cmd_meta_set_src_for_buf(cmd, buf, format, &meta);
 
     for (i = 0; i < regionCount; i++) {
-        const XGL_BUFFER_IMAGE_COPY *region = &pRegions[i];
+        const VK_BUFFER_IMAGE_COPY *region = &pRegions[i];
         uint32_t j;
 
         meta.src.x = region->bufferOffset / icd_format_get_size(format);
@@ -624,19 +624,19 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdCopyImageToBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   srcImage,
-    XGL_IMAGE_LAYOUT                            srcImageLayout,
-    XGL_BUFFER                                  destBuffer,
+ICD_EXPORT void VKAPI vkCmdCopyImageToBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   srcImage,
+    VK_IMAGE_LAYOUT                            srcImageLayout,
+    VK_BUFFER                                  destBuffer,
     uint32_t                                    regionCount,
-    const XGL_BUFFER_IMAGE_COPY*                pRegions)
+    const VK_BUFFER_IMAGE_COPY*                pRegions)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_img *img = intel_img(srcImage);
     struct intel_buf *buf = intel_buf(destBuffer);
     struct intel_cmd_meta meta;
-    XGL_FORMAT img_format, buf_format;
+    VK_FORMAT img_format, buf_format;
     uint32_t block_width, i;
 
     memset(&meta, 0, sizeof(meta));
@@ -647,51 +647,51 @@
 
     /* buf_format is ignored by hw, but we derive stride from it */
     switch (img_format) {
-    case XGL_FMT_R8_UINT:
+    case VK_FMT_R8_UINT:
         meta.shader_id = INTEL_DEV_META_VS_COPY_R8_TO_MEM;
-        buf_format = XGL_FMT_R8G8B8A8_UINT;
+        buf_format = VK_FMT_R8G8B8A8_UINT;
         break;
-    case XGL_FMT_R16_UINT:
+    case VK_FMT_R16_UINT:
         meta.shader_id = INTEL_DEV_META_VS_COPY_R16_TO_MEM;
-        buf_format = XGL_FMT_R8G8B8A8_UINT;
+        buf_format = VK_FMT_R8G8B8A8_UINT;
         break;
-    case XGL_FMT_R32_UINT:
+    case VK_FMT_R32_UINT:
         meta.shader_id = INTEL_DEV_META_VS_COPY_R32_TO_MEM;
-        buf_format = XGL_FMT_R32G32B32A32_UINT;
+        buf_format = VK_FMT_R32G32B32A32_UINT;
         break;
-    case XGL_FMT_R32G32_UINT:
+    case VK_FMT_R32G32_UINT:
         meta.shader_id = INTEL_DEV_META_VS_COPY_R32G32_TO_MEM;
-        buf_format = XGL_FMT_R32G32B32A32_UINT;
+        buf_format = VK_FMT_R32G32B32A32_UINT;
         break;
-    case XGL_FMT_R32G32B32A32_UINT:
+    case VK_FMT_R32G32B32A32_UINT:
         meta.shader_id = INTEL_DEV_META_VS_COPY_R32G32B32A32_TO_MEM;
-        buf_format = XGL_FMT_R32G32B32A32_UINT;
+        buf_format = VK_FMT_R32G32B32A32_UINT;
         break;
     default:
-        img_format = XGL_FMT_UNDEFINED;
-        buf_format = XGL_FMT_UNDEFINED;
+        img_format = VK_FMT_UNDEFINED;
+        buf_format = VK_FMT_UNDEFINED;
         break;
     }
 
-    if (img_format == XGL_FMT_UNDEFINED ||
+    if (img_format == VK_FMT_UNDEFINED ||
         (cmd_gen(cmd) == INTEL_GEN(6) &&
          icd_format_get_size(img_format) < 4)) {
-        intel_dev_log(cmd->dev, XGL_DBG_MSG_ERROR,
-                XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
-                "xglCmdCopyImageToBuffer with bpp %d unsupported",
+        intel_dev_log(cmd->dev, VK_DBG_MSG_ERROR,
+                VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
+                "vkCmdCopyImageToBuffer with bpp %d unsupported",
                 icd_format_get_size(img->layout.format));
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
     cmd_meta_set_src_for_img(cmd, img, img_format,
-            XGL_IMAGE_ASPECT_COLOR, &meta);
+            VK_IMAGE_ASPECT_COLOR, &meta);
     cmd_meta_set_dst_for_buf(cmd, buf, buf_format, &meta);
 
     meta.samples = 1;
 
     for (i = 0; i < regionCount; i++) {
-        const XGL_BUFFER_IMAGE_COPY *region = &pRegions[i];
+        const VK_BUFFER_IMAGE_COPY *region = &pRegions[i];
         uint32_t j;
 
         meta.src.lod = region->imageSubresource.mipLevel;
@@ -713,36 +713,36 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdCloneImageData(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   srcImage,
-    XGL_IMAGE_LAYOUT                            srcImageLayout,
-    XGL_IMAGE                                   destImage,
-    XGL_IMAGE_LAYOUT                            destImageLayout)
+ICD_EXPORT void VKAPI vkCmdCloneImageData(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   srcImage,
+    VK_IMAGE_LAYOUT                            srcImageLayout,
+    VK_IMAGE                                   destImage,
+    VK_IMAGE_LAYOUT                            destImageLayout)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_img *src = intel_img(srcImage);
     struct intel_img *dst = intel_img(destImage);
     struct intel_buf *src_buf, *dst_buf;
-    XGL_BUFFER_CREATE_INFO buf_info;
-    XGL_BUFFER_COPY buf_region;
-    XGL_RESULT res;
+    VK_BUFFER_CREATE_INFO buf_info;
+    VK_BUFFER_COPY buf_region;
+    VK_RESULT res;
 
     memset(&buf_info, 0, sizeof(buf_info));
-    buf_info.sType = XGL_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+    buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
     buf_info.size = src->obj.mem->size;
 
     memset(&buf_region, 0, sizeof(buf_region));
     buf_region.copySize = src->obj.mem->size;
 
     res = intel_buf_create(cmd->dev, &buf_info, &src_buf);
-    if (res != XGL_SUCCESS) {
+    if (res != VK_SUCCESS) {
         cmd_fail(cmd, res);
         return;
     }
 
     res = intel_buf_create(cmd->dev, &buf_info, &dst_buf);
-    if (res != XGL_SUCCESS) {
+    if (res != VK_SUCCESS) {
         intel_buf_destroy(src_buf);
         cmd_fail(cmd, res);
         return;
@@ -752,30 +752,30 @@
     intel_obj_bind_mem(&dst_buf->obj, dst->obj.mem, 0);
 
     cmd_batch_flush(cmd, GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH);
-    xglCmdCopyBuffer(cmdBuffer, (XGL_BUFFER) src_buf,
-            (XGL_BUFFER) dst_buf, 1, &buf_region);
+    vkCmdCopyBuffer(cmdBuffer, (VK_BUFFER) src_buf,
+            (VK_BUFFER) dst_buf, 1, &buf_region);
 
     intel_buf_destroy(src_buf);
     intel_buf_destroy(dst_buf);
 }
 
-ICD_EXPORT void XGLAPI xglCmdUpdateBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  destBuffer,
-    XGL_GPU_SIZE                                destOffset,
-    XGL_GPU_SIZE                                dataSize,
+ICD_EXPORT void VKAPI vkCmdUpdateBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  destBuffer,
+    VK_GPU_SIZE                                destOffset,
+    VK_GPU_SIZE                                dataSize,
     const uint32_t*                             pData)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_buf *dst = intel_buf(destBuffer);
     struct intel_cmd_meta meta;
-    XGL_FORMAT format;
+    VK_FORMAT format;
     uint32_t *ptr;
     uint32_t offset;
 
     /* must be 4-byte aligned */
     if ((destOffset | dataSize) & 3) {
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -798,7 +798,7 @@
     /*
      * INTEL_DEV_META_VS_COPY_MEM is untyped but expects the stride to be 16
      */
-    format = XGL_FMT_R32G32B32A32_UINT;
+    format = VK_FMT_R32G32B32A32_UINT;
 
     cmd_meta_set_src_for_writer(cmd, INTEL_CMD_WRITER_STATE,
             offset + dataSize, format, &meta);
@@ -807,21 +807,21 @@
     cmd_draw_meta(cmd, &meta);
 }
 
-ICD_EXPORT void XGLAPI xglCmdFillBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  destBuffer,
-    XGL_GPU_SIZE                                destOffset,
-    XGL_GPU_SIZE                                fillSize,
+ICD_EXPORT void VKAPI vkCmdFillBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  destBuffer,
+    VK_GPU_SIZE                                destOffset,
+    VK_GPU_SIZE                                fillSize,
     uint32_t                                    data)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_buf *dst = intel_buf(destBuffer);
     struct intel_cmd_meta meta;
-    XGL_FORMAT format;
+    VK_FORMAT format;
 
     /* must be 4-byte aligned */
     if ((destOffset | fillSize) & 3) {
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -840,7 +840,7 @@
     /*
      * INTEL_DEV_META_VS_FILL_MEM is untyped but expects the stride to be 16
      */
-    format = XGL_FMT_R32G32B32A32_UINT;
+    format = VK_FMT_R32G32B32A32_UINT;
 
     cmd_meta_set_dst_for_buf(cmd, dst, format, &meta);
 
@@ -849,9 +849,9 @@
 
 static void cmd_meta_clear_image(struct intel_cmd *cmd,
                                  struct intel_img *img,
-                                 XGL_FORMAT format,
+                                 VK_FORMAT format,
                                  struct intel_cmd_meta *meta,
-                                 const XGL_IMAGE_SUBRESOURCE_RANGE *range)
+                                 const VK_IMAGE_SUBRESOURCE_RANGE *range)
 {
     uint32_t mip_levels, array_size;
     uint32_t i, j;
@@ -881,7 +881,7 @@
             continue;
 
         for (j = 0; j < array_size; j++) {
-            if (range->aspect == XGL_IMAGE_ASPECT_COLOR) {
+            if (range->aspect == VK_IMAGE_ASPECT_COLOR) {
                 cmd_meta_set_dst_for_img(cmd, img, format,
                         meta->dst.lod, meta->dst.layer, meta);
 
@@ -905,38 +905,38 @@
 void cmd_meta_ds_op(struct intel_cmd *cmd,
                     enum intel_cmd_meta_ds_op op,
                     struct intel_img *img,
-                    const XGL_IMAGE_SUBRESOURCE_RANGE *range)
+                    const VK_IMAGE_SUBRESOURCE_RANGE *range)
 {
     struct intel_cmd_meta meta;
 
     if (img->layout.aux != INTEL_LAYOUT_AUX_HIZ)
         return;
-    if (range->aspect != XGL_IMAGE_ASPECT_DEPTH)
+    if (range->aspect != VK_IMAGE_ASPECT_DEPTH)
         return;
 
     memset(&meta, 0, sizeof(meta));
     meta.mode = INTEL_CMD_META_DEPTH_STENCIL_RECT;
     meta.samples = img->samples;
 
-    meta.ds.aspect = XGL_IMAGE_ASPECT_DEPTH;
+    meta.ds.aspect = VK_IMAGE_ASPECT_DEPTH;
     meta.ds.op = op;
     meta.ds.optimal = true;
 
     cmd_meta_clear_image(cmd, img, img->layout.format, &meta, range);
 }
 
-ICD_EXPORT void XGLAPI xglCmdClearColorImage(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   image,
-    XGL_IMAGE_LAYOUT                            imageLayout,
-    XGL_CLEAR_COLOR                             clearColor,
+ICD_EXPORT void VKAPI vkCmdClearColorImage(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   image,
+    VK_IMAGE_LAYOUT                            imageLayout,
+    VK_CLEAR_COLOR                             clearColor,
     uint32_t                                    rangeCount,
-    const XGL_IMAGE_SUBRESOURCE_RANGE*          pRanges)
+    const VK_IMAGE_SUBRESOURCE_RANGE*          pRanges)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_img *img = intel_img(image);
     struct intel_cmd_meta meta;
-    XGL_FORMAT format;
+    VK_FORMAT format;
     uint32_t i;
 
     memset(&meta, 0, sizeof(meta));
@@ -961,14 +961,14 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdClearDepthStencil(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   image,
-    XGL_IMAGE_LAYOUT                            imageLayout,
+ICD_EXPORT void VKAPI vkCmdClearDepthStencil(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   image,
+    VK_IMAGE_LAYOUT                            imageLayout,
     float                                       depth,
     uint32_t                                    stencil,
     uint32_t                                    rangeCount,
-    const XGL_IMAGE_SUBRESOURCE_RANGE*          pRanges)
+    const VK_IMAGE_SUBRESOURCE_RANGE*          pRanges)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_img *img = intel_img(image);
@@ -984,39 +984,39 @@
     meta.clear_val[0] = u_fui(depth);
     meta.clear_val[1] = stencil;
 
-    if (imageLayout == XGL_IMAGE_LAYOUT_CLEAR_OPTIMAL ||
-        imageLayout == XGL_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
-        imageLayout == XGL_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) {
+    if (imageLayout == VK_IMAGE_LAYOUT_CLEAR_OPTIMAL ||
+        imageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
+        imageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) {
         meta.ds.optimal = true;
     }
 
     for (i = 0; i < rangeCount; i++) {
-        const XGL_IMAGE_SUBRESOURCE_RANGE *range = &pRanges[i];
+        const VK_IMAGE_SUBRESOURCE_RANGE *range = &pRanges[i];
 
         cmd_meta_clear_image(cmd, img, img->layout.format,
                 &meta, range);
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdResolveImage(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_IMAGE                                   srcImage,
-    XGL_IMAGE_LAYOUT                            srcImageLayout,
-    XGL_IMAGE                                   destImage,
-    XGL_IMAGE_LAYOUT                            destImageLayout,
+ICD_EXPORT void VKAPI vkCmdResolveImage(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_IMAGE                                   srcImage,
+    VK_IMAGE_LAYOUT                            srcImageLayout,
+    VK_IMAGE                                   destImage,
+    VK_IMAGE_LAYOUT                            destImageLayout,
     uint32_t                                    rectCount,
-    const XGL_IMAGE_RESOLVE*                    pRects)
+    const VK_IMAGE_RESOLVE*                    pRects)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_img *src = intel_img(srcImage);
     struct intel_img *dst = intel_img(destImage);
     struct intel_cmd_meta meta;
-    XGL_FORMAT format;
+    VK_FORMAT format;
     uint32_t i;
 
     if (src->samples <= 1 || dst->samples > 1 ||
         src->layout.format != dst->layout.format) {
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -1042,10 +1042,10 @@
     meta.samples = 1;
 
     format = cmd_meta_img_raw_format(cmd, src->layout.format);
-    cmd_meta_set_src_for_img(cmd, src, format, XGL_IMAGE_ASPECT_COLOR, &meta);
+    cmd_meta_set_src_for_img(cmd, src, format, VK_IMAGE_ASPECT_COLOR, &meta);
 
     for (i = 0; i < rectCount; i++) {
-        const XGL_IMAGE_RESOLVE *rect = &pRects[i];
+        const VK_IMAGE_RESOLVE *rect = &pRects[i];
 
         meta.src.lod = rect->srcSubresource.mipLevel;
         meta.src.layer = rect->srcSubresource.arraySlice;
diff --git a/icd/intel/cmd_mi.c b/icd/intel/cmd_mi.c
index 5f7997a..e628299 100644
--- a/icd/intel/cmd_mi.c
+++ b/icd/intel/cmd_mi.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -87,7 +87,7 @@
 
 static void cmd_query_pipeline_statistics(struct intel_cmd *cmd,
                                           struct intel_bo *bo,
-                                          XGL_GPU_SIZE offset)
+                                          VK_GPU_SIZE offset)
 {
     const uint32_t regs[] = {
         GEN6_REG_PS_INVOCATION_COUNT,
@@ -121,57 +121,57 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdBeginQuery(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_QUERY_POOL                              queryPool,
+ICD_EXPORT void VKAPI vkCmdBeginQuery(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_QUERY_POOL                              queryPool,
     uint32_t                                    slot,
-    XGL_FLAGS                                   flags)
+    VK_FLAGS                                   flags)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_query *query = intel_query(queryPool);
     struct intel_bo *bo = query->obj.mem->bo;
-    const XGL_GPU_SIZE offset = query->slot_stride * slot;
+    const VK_GPU_SIZE offset = query->slot_stride * slot;
 
     switch (query->type) {
-    case XGL_QUERY_OCCLUSION:
+    case VK_QUERY_OCCLUSION:
         cmd_batch_depth_count(cmd, bo, offset);
         break;
-    case XGL_QUERY_PIPELINE_STATISTICS:
+    case VK_QUERY_PIPELINE_STATISTICS:
         cmd_query_pipeline_statistics(cmd, bo, offset);
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         break;
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdEndQuery(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_QUERY_POOL                              queryPool,
+ICD_EXPORT void VKAPI vkCmdEndQuery(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_QUERY_POOL                              queryPool,
     uint32_t                                    slot)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_query *query = intel_query(queryPool);
     struct intel_bo *bo = query->obj.mem->bo;
-    const XGL_GPU_SIZE offset = query->slot_stride * slot;
+    const VK_GPU_SIZE offset = query->slot_stride * slot;
 
     switch (query->type) {
-    case XGL_QUERY_OCCLUSION:
+    case VK_QUERY_OCCLUSION:
         cmd_batch_depth_count(cmd, bo, offset + sizeof(uint64_t));
         break;
-    case XGL_QUERY_PIPELINE_STATISTICS:
+    case VK_QUERY_PIPELINE_STATISTICS:
         cmd_query_pipeline_statistics(cmd, bo,
-                offset + sizeof(XGL_PIPELINE_STATISTICS_DATA));
+                offset + sizeof(VK_PIPELINE_STATISTICS_DATA));
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         break;
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdResetQueryPool(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_QUERY_POOL                              queryPool,
+ICD_EXPORT void VKAPI vkCmdResetQueryPool(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_QUERY_POOL                              queryPool,
     uint32_t                                    startQuery,
     uint32_t                                    queryCount)
 {
@@ -179,40 +179,40 @@
 }
 
 static void cmd_write_event_value(struct intel_cmd *cmd, struct intel_event *event,
-                            XGL_PIPE_EVENT pipeEvent, uint32_t value)
+                            VK_PIPE_EVENT pipeEvent, uint32_t value)
 {
     uint32_t pipe_control_flags;
 
     /* Event setting is done with PIPE_CONTROL post-sync write immediate.
-     * With no other PIPE_CONTROL flags set, it behaves as XGL_PIPE_EVENT_TOP_OF_PIPE.
-     * All other pipeEvent values will behave as XGL_PIPE_EVENT_GPU_COMMANDS_COMPLETE.
+     * With no other PIPE_CONTROL flags set, it behaves as VK_PIPE_EVENT_TOP_OF_PIPE.
+     * All other pipeEvent values will behave as VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE.
      */
     switch(pipeEvent)
     {
-    case XGL_PIPE_EVENT_TOP_OF_PIPE:
+    case VK_PIPE_EVENT_TOP_OF_PIPE:
         pipe_control_flags = 0;
         break;
-    case XGL_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE:
-    case XGL_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE:
-    case XGL_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE:
-    case XGL_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE:
-    case XGL_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE:
-    case XGL_PIPE_EVENT_TRANSFER_COMPLETE:
-    case XGL_PIPE_EVENT_GPU_COMMANDS_COMPLETE:
+    case VK_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE:
+    case VK_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE:
+    case VK_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE:
+    case VK_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE:
+    case VK_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE:
+    case VK_PIPE_EVENT_TRANSFER_COMPLETE:
+    case VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE:
         pipe_control_flags = GEN6_PIPE_CONTROL_CS_STALL;
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
         break;
     }
     cmd_batch_immediate(cmd, pipe_control_flags, event->obj.mem->bo, 0, value);
 }
 
-ICD_EXPORT void XGLAPI xglCmdSetEvent(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_EVENT                                   event_,
-    XGL_PIPE_EVENT                              pipeEvent)
+ICD_EXPORT void VKAPI vkCmdSetEvent(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_EVENT                                   event_,
+    VK_PIPE_EVENT                              pipeEvent)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_event *event = intel_event(event_);
@@ -220,10 +220,10 @@
     cmd_write_event_value(cmd, event, pipeEvent, 1);
 }
 
-ICD_EXPORT void XGLAPI xglCmdResetEvent(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_EVENT                                   event_,
-    XGL_PIPE_EVENT                              pipeEvent)
+ICD_EXPORT void VKAPI vkCmdResetEvent(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_EVENT                                   event_,
+    VK_PIPE_EVENT                              pipeEvent)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_event *event = intel_event(event_);
@@ -231,28 +231,28 @@
     cmd_write_event_value(cmd, event, pipeEvent, 0);
 }
 
-ICD_EXPORT void XGLAPI xglCmdWriteTimestamp(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_TIMESTAMP_TYPE                          timestampType,
-    XGL_BUFFER                                  destBuffer,
-    XGL_GPU_SIZE                                destOffset)
+ICD_EXPORT void VKAPI vkCmdWriteTimestamp(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_TIMESTAMP_TYPE                          timestampType,
+    VK_BUFFER                                  destBuffer,
+    VK_GPU_SIZE                                destOffset)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_buf *buf = intel_buf(destBuffer);
 
     switch (timestampType) {
-    case XGL_TIMESTAMP_TOP:
+    case VK_TIMESTAMP_TOP:
         /* XXX we are not supposed to use two commands... */
         gen6_MI_STORE_REGISTER_MEM(cmd, buf->obj.mem->bo,
                 destOffset, GEN6_REG_TIMESTAMP);
         gen6_MI_STORE_REGISTER_MEM(cmd, buf->obj.mem->bo,
                 destOffset + 4, GEN6_REG_TIMESTAMP + 4);
         break;
-    case XGL_TIMESTAMP_BOTTOM:
+    case VK_TIMESTAMP_BOTTOM:
         cmd_batch_timestamp(cmd, buf->obj.mem->bo, destOffset);
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_INVALID_VALUE);
+        cmd_fail(cmd, VK_ERROR_INVALID_VALUE);
         break;
     }
 }
diff --git a/icd/intel/cmd_pipeline.c b/icd/intel/cmd_pipeline.c
index 11ab0a7..dbac11c 100644
--- a/icd/intel/cmd_pipeline.c
+++ b/icd/intel/cmd_pipeline.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -218,13 +218,13 @@
         return false;
 
     switch (cmd->bind.index.type) {
-    case XGL_INDEX_8:
+    case VK_INDEX_8:
         supported = (p->primitive_restart_index != 0xffu);
         break;
-    case XGL_INDEX_16:
+    case VK_INDEX_16:
         supported = (p->primitive_restart_index != 0xffffu);
         break;
-    case XGL_INDEX_32:
+    case VK_INDEX_32:
         supported = (p->primitive_restart_index != 0xffffffffu);
         break;
     default:
@@ -237,8 +237,8 @@
 
 static void gen6_3DSTATE_INDEX_BUFFER(struct intel_cmd *cmd,
                                       const struct intel_buf *buf,
-                                      XGL_GPU_SIZE offset,
-                                      XGL_INDEX_TYPE type,
+                                      VK_GPU_SIZE offset,
+                                      VK_INDEX_TYPE type,
                                       bool enable_cut_index)
 {
     const uint8_t cmd_len = 3;
@@ -257,26 +257,26 @@
         dw0 |= GEN6_IB_DW0_CUT_INDEX_ENABLE;
 
     switch (type) {
-    case XGL_INDEX_8:
+    case VK_INDEX_8:
         dw0 |= GEN6_IB_DW0_FORMAT_BYTE;
         offset_align = 1;
         break;
-    case XGL_INDEX_16:
+    case VK_INDEX_16:
         dw0 |= GEN6_IB_DW0_FORMAT_WORD;
         offset_align = 2;
         break;
-    case XGL_INDEX_32:
+    case VK_INDEX_32:
         dw0 |= GEN6_IB_DW0_FORMAT_DWORD;
         offset_align = 4;
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_INVALID_VALUE);
+        cmd_fail(cmd, VK_ERROR_INVALID_VALUE);
         return;
         break;
     }
 
     if (offset % offset_align) {
-        cmd_fail(cmd, XGL_ERROR_INVALID_VALUE);
+        cmd_fail(cmd, VK_ERROR_INVALID_VALUE);
         return;
     }
 
@@ -391,11 +391,11 @@
         int format;
 
         switch (pipeline->db_format) {
-        case XGL_FMT_D16_UNORM:
+        case VK_FMT_D16_UNORM:
             format = GEN6_ZFORMAT_D16_UNORM;
             break;
-        case XGL_FMT_D32_SFLOAT:
-        case XGL_FMT_D32_SFLOAT_S8_UINT:
+        case VK_FMT_D32_SFLOAT:
+        case VK_FMT_D32_SFLOAT_S8_UINT:
             format = GEN6_ZFORMAT_D32_FLOAT;
             break;
         default:
@@ -1357,7 +1357,7 @@
 
 void cmd_batch_depth_count(struct intel_cmd *cmd,
                            struct intel_bo *bo,
-                           XGL_GPU_SIZE offset)
+                           VK_GPU_SIZE offset)
 {
     cmd_wa_gen6_pre_depth_stall_write(cmd);
 
@@ -1369,7 +1369,7 @@
 
 void cmd_batch_timestamp(struct intel_cmd *cmd,
                          struct intel_bo *bo,
-                         XGL_GPU_SIZE offset)
+                         VK_GPU_SIZE offset)
 {
     /* need any WA or stall? */
     gen6_PIPE_CONTROL(cmd, GEN6_PIPE_CONTROL_WRITE_TIMESTAMP, bo, offset, 0);
@@ -1378,7 +1378,7 @@
 void cmd_batch_immediate(struct intel_cmd *cmd,
                          uint32_t pipe_control_flags,
                          struct intel_bo *bo,
-                         XGL_GPU_SIZE offset,
+                         VK_GPU_SIZE offset,
                          uint64_t val)
 {
     /* need any WA or stall? */
@@ -1638,7 +1638,7 @@
 
 static uint32_t emit_binding_table(struct intel_cmd *cmd,
                                    const struct intel_pipeline_rmap *rmap,
-                                   const XGL_PIPELINE_SHADER_STAGE stage)
+                                   const VK_PIPELINE_SHADER_STAGE stage)
 {
     const struct intel_desc_region *region = cmd->dev->desc_region;
     const struct intel_cmd_dset_data *data = &cmd->bind.dset.graphics_data;
@@ -1775,15 +1775,15 @@
         }
 
         switch (pipeline->vb[i].stepRate) {
-        case XGL_VERTEX_INPUT_STEP_RATE_VERTEX:
+        case VK_VERTEX_INPUT_STEP_RATE_VERTEX:
             dw[0] |= GEN6_VB_DW0_ACCESS_VERTEXDATA;
             dw[3] = 0;
             break;
-        case XGL_VERTEX_INPUT_STEP_RATE_INSTANCE:
+        case VK_VERTEX_INPUT_STEP_RATE_INSTANCE:
             dw[0] |= GEN6_VB_DW0_ACCESS_INSTANCEDATA;
             dw[3] = 1;
             break;
-        case XGL_VERTEX_INPUT_STEP_RATE_DRAW:
+        case VK_VERTEX_INPUT_STEP_RATE_DRAW:
             dw[0] |= GEN6_VB_DW0_ACCESS_INSTANCEDATA;
             dw[3] = 0;
             break;
@@ -1796,7 +1796,7 @@
 
         if (cmd->bind.vertex.buf[i]) {
             const struct intel_buf *buf = cmd->bind.vertex.buf[i];
-            const XGL_GPU_SIZE offset = cmd->bind.vertex.offset[i];
+            const VK_GPU_SIZE offset = cmd->bind.vertex.offset[i];
 
             cmd_reserve_reloc(cmd, 2);
             cmd_batch_reloc(cmd, pos + 1, buf->obj.mem->bo, offset, 0);
@@ -1875,19 +1875,19 @@
 
     binding_tables[0] = emit_binding_table(cmd,
             cmd->bind.pipeline.graphics->vs.rmap,
-            XGL_SHADER_STAGE_VERTEX);
+            VK_SHADER_STAGE_VERTEX);
     binding_tables[1] = emit_binding_table(cmd,
             cmd->bind.pipeline.graphics->tcs.rmap,
-            XGL_SHADER_STAGE_TESS_CONTROL);
+            VK_SHADER_STAGE_TESS_CONTROL);
     binding_tables[2] = emit_binding_table(cmd,
             cmd->bind.pipeline.graphics->tes.rmap,
-            XGL_SHADER_STAGE_TESS_EVALUATION);
+            VK_SHADER_STAGE_TESS_EVALUATION);
     binding_tables[3] = emit_binding_table(cmd,
             cmd->bind.pipeline.graphics->gs.rmap,
-            XGL_SHADER_STAGE_GEOMETRY);
+            VK_SHADER_STAGE_GEOMETRY);
     binding_tables[4] = emit_binding_table(cmd,
             cmd->bind.pipeline.graphics->fs.rmap,
-            XGL_SHADER_STAGE_FRAGMENT);
+            VK_SHADER_STAGE_FRAGMENT);
 
     samplers[0] = emit_samplers(cmd, cmd->bind.pipeline.graphics->vs.rmap);
     samplers[1] = emit_samplers(cmd, cmd->bind.pipeline.graphics->tcs.rmap);
@@ -1946,7 +1946,7 @@
         return;
 
     if (fb->sample_count != cmd->bind.pipeline.graphics->sample_count)
-        cmd->result = XGL_ERROR_UNKNOWN;
+        cmd->result = VK_ERROR_UNKNOWN;
 
     cmd_wa_gen6_pre_multisample_depth_flush(cmd);
     gen6_3DSTATE_MULTISAMPLE(cmd, fb->sample_count);
@@ -2010,7 +2010,7 @@
         void *entries;
 
         entries = intel_alloc(cmd, sizeof(cache->entries[0]) * count, 0,
-                XGL_SYSTEM_ALLOC_INTERNAL);
+                VK_SYSTEM_ALLOC_INTERNAL);
         if (entries) {
             if (cache->entries) {
                 memcpy(entries, cache->entries,
@@ -2129,7 +2129,7 @@
 
     CMD_ASSERT(cmd, 6, 7.5);
 
-    if (meta->ds.aspect == XGL_IMAGE_ASPECT_DEPTH) {
+    if (meta->ds.aspect == VK_IMAGE_ASPECT_DEPTH) {
         dw[0] = 0;
         dw[1] = 0;
 
@@ -2141,7 +2141,7 @@
             dw[2] = GEN6_COMPAREFUNCTION_ALWAYS << 27 |
                     GEN6_ZS_DW2_DEPTH_WRITE_ENABLE;
         }
-    } else if (meta->ds.aspect == XGL_IMAGE_ASPECT_STENCIL) {
+    } else if (meta->ds.aspect == VK_IMAGE_ASPECT_STENCIL) {
         dw[0] = GEN6_ZS_DW0_STENCIL_TEST_ENABLE |
                 (GEN6_COMPAREFUNCTION_ALWAYS) << 28 |
                 (GEN6_STENCILOP_KEEP) << 25 |
@@ -2186,7 +2186,7 @@
     }
 
     if (meta->mode != INTEL_CMD_META_VS_POINTS) {
-        if (meta->ds.aspect != XGL_IMAGE_ASPECT_COLOR) {
+        if (meta->ds.aspect != VK_IMAGE_ASPECT_COLOR) {
             const uint32_t blend_color[4] = { 0, 0, 0, 0 };
             uint32_t stencil_ref = (meta->ds.stencil_ref & 0xff) << 24 |
                                    (meta->ds.stencil_ref & 0xff) << 16;
@@ -3044,9 +3044,9 @@
 
         data->set_offsets = intel_alloc(cmd,
                 sizeof(data->set_offsets[0]) * chain->layout_count,
-                sizeof(data->set_offsets[0]), XGL_SYSTEM_ALLOC_INTERNAL);
+                sizeof(data->set_offsets[0]), VK_SYSTEM_ALLOC_INTERNAL);
         if (!data->set_offsets) {
-            cmd_fail(cmd, XGL_ERROR_OUT_OF_MEMORY);
+            cmd_fail(cmd, VK_ERROR_OUT_OF_MEMORY);
             data->set_offset_count = 0;
             return false;
         }
@@ -3060,9 +3060,9 @@
 
         data->dynamic_offsets = intel_alloc(cmd,
                 sizeof(data->dynamic_offsets[0]) * chain->total_dynamic_desc_count,
-                sizeof(data->dynamic_offsets[0]), XGL_SYSTEM_ALLOC_INTERNAL);
+                sizeof(data->dynamic_offsets[0]), VK_SYSTEM_ALLOC_INTERNAL);
         if (!data->dynamic_offsets) {
-            cmd_fail(cmd, XGL_ERROR_OUT_OF_MEMORY);
+            cmd_fail(cmd, VK_ERROR_OUT_OF_MEMORY);
             data->dynamic_offset_count = 0;
             return false;
         }
@@ -3097,10 +3097,10 @@
 
 static void cmd_bind_vertex_data(struct intel_cmd *cmd,
                                  const struct intel_buf *buf,
-                                 XGL_GPU_SIZE offset, uint32_t binding)
+                                 VK_GPU_SIZE offset, uint32_t binding)
 {
     if (binding >= ARRAY_SIZE(cmd->bind.vertex.buf)) {
-        cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+        cmd_fail(cmd, VK_ERROR_UNKNOWN);
         return;
     }
 
@@ -3110,7 +3110,7 @@
 
 static void cmd_bind_index_data(struct intel_cmd *cmd,
                                 const struct intel_buf *buf,
-                                XGL_GPU_SIZE offset, XGL_INDEX_TYPE type)
+                                VK_GPU_SIZE offset, VK_INDEX_TYPE type)
 {
     cmd->bind.index.buf = buf;
     cmd->bind.index.offset = offset;
@@ -3225,7 +3225,7 @@
 
     if (indexed) {
         if (p->primitive_restart && !gen6_can_primitive_restart(cmd))
-            cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+            cmd_fail(cmd, VK_ERROR_UNKNOWN);
 
         if (cmd_gen(cmd) >= INTEL_GEN(7.5)) {
             gen75_3DSTATE_VF(cmd, p->primitive_restart,
@@ -3321,63 +3321,63 @@
         cmd_batch_flush_all(cmd);
 }
 
-ICD_EXPORT void XGLAPI xglCmdBindPipeline(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_PIPELINE_BIND_POINT                     pipelineBindPoint,
-    XGL_PIPELINE                                pipeline)
+ICD_EXPORT void VKAPI vkCmdBindPipeline(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_PIPELINE_BIND_POINT                     pipelineBindPoint,
+    VK_PIPELINE                                pipeline)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
     switch (pipelineBindPoint) {
-    case XGL_PIPELINE_BIND_POINT_COMPUTE:
+    case VK_PIPELINE_BIND_POINT_COMPUTE:
         cmd_bind_compute_pipeline(cmd, intel_pipeline(pipeline));
         break;
-    case XGL_PIPELINE_BIND_POINT_GRAPHICS:
+    case VK_PIPELINE_BIND_POINT_GRAPHICS:
         cmd_bind_graphics_pipeline(cmd, intel_pipeline(pipeline));
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_INVALID_VALUE);
+        cmd_fail(cmd, VK_ERROR_INVALID_VALUE);
         break;
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdBindDynamicStateObject(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_STATE_BIND_POINT                        stateBindPoint,
-    XGL_DYNAMIC_STATE_OBJECT                    state)
+ICD_EXPORT void VKAPI vkCmdBindDynamicStateObject(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_STATE_BIND_POINT                        stateBindPoint,
+    VK_DYNAMIC_STATE_OBJECT                    state)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
     switch (stateBindPoint) {
-    case XGL_STATE_BIND_VIEWPORT:
+    case VK_STATE_BIND_VIEWPORT:
         cmd_bind_viewport_state(cmd,
-                intel_dynamic_vp((XGL_DYNAMIC_VP_STATE_OBJECT) state));
+                intel_dynamic_vp((VK_DYNAMIC_VP_STATE_OBJECT) state));
         break;
-    case XGL_STATE_BIND_RASTER:
+    case VK_STATE_BIND_RASTER:
         cmd_bind_raster_state(cmd,
-                intel_dynamic_rs((XGL_DYNAMIC_RS_STATE_OBJECT) state));
+                intel_dynamic_rs((VK_DYNAMIC_RS_STATE_OBJECT) state));
         break;
-    case XGL_STATE_BIND_DEPTH_STENCIL:
+    case VK_STATE_BIND_DEPTH_STENCIL:
         cmd_bind_ds_state(cmd,
-                intel_dynamic_ds((XGL_DYNAMIC_DS_STATE_OBJECT) state));
+                intel_dynamic_ds((VK_DYNAMIC_DS_STATE_OBJECT) state));
         break;
-    case XGL_STATE_BIND_COLOR_BLEND:
+    case VK_STATE_BIND_COLOR_BLEND:
         cmd_bind_blend_state(cmd,
-                intel_dynamic_cb((XGL_DYNAMIC_CB_STATE_OBJECT) state));
+                intel_dynamic_cb((VK_DYNAMIC_CB_STATE_OBJECT) state));
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_INVALID_VALUE);
+        cmd_fail(cmd, VK_ERROR_INVALID_VALUE);
         break;
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdBindDescriptorSets(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_PIPELINE_BIND_POINT                     pipelineBindPoint,
-    XGL_DESCRIPTOR_SET_LAYOUT_CHAIN             layoutChain,
+ICD_EXPORT void VKAPI vkCmdBindDescriptorSets(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_PIPELINE_BIND_POINT                     pipelineBindPoint,
+    VK_DESCRIPTOR_SET_LAYOUT_CHAIN             layoutChain,
     uint32_t                                    layoutChainSlot,
     uint32_t                                    count,
-    const XGL_DESCRIPTOR_SET*                   pDescriptorSets,
+    const VK_DESCRIPTOR_SET*                   pDescriptorSets,
     const uint32_t*                             pUserData)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
@@ -3387,16 +3387,16 @@
     uint32_t i;
 
     switch (pipelineBindPoint) {
-    case XGL_PIPELINE_BIND_POINT_COMPUTE:
+    case VK_PIPELINE_BIND_POINT_COMPUTE:
         cmd->bind.dset.compute = chain;
         data = &cmd->bind.dset.compute_data;
         break;
-    case XGL_PIPELINE_BIND_POINT_GRAPHICS:
+    case VK_PIPELINE_BIND_POINT_GRAPHICS:
         cmd->bind.dset.graphics = chain;
         data = &cmd->bind.dset.graphics_data;
         break;
     default:
-        cmd_fail(cmd, XGL_ERROR_INVALID_VALUE);
+        cmd_fail(cmd, VK_ERROR_INVALID_VALUE);
         return;
         break;
     }
@@ -3413,10 +3413,10 @@
     }
 }
 
-ICD_EXPORT void XGLAPI xglCmdBindVertexBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  buffer,
-    XGL_GPU_SIZE                                offset,
+ICD_EXPORT void VKAPI vkCmdBindVertexBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  buffer,
+    VK_GPU_SIZE                                offset,
     uint32_t                                    binding)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
@@ -3425,11 +3425,11 @@
     cmd_bind_vertex_data(cmd, buf, offset, binding);
 }
 
-ICD_EXPORT void XGLAPI xglCmdBindIndexBuffer(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  buffer,
-    XGL_GPU_SIZE                                offset,
-    XGL_INDEX_TYPE                              indexType)
+ICD_EXPORT void VKAPI vkCmdBindIndexBuffer(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  buffer,
+    VK_GPU_SIZE                                offset,
+    VK_INDEX_TYPE                              indexType)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
     struct intel_buf *buf = intel_buf(buffer);
@@ -3437,8 +3437,8 @@
     cmd_bind_index_data(cmd, buf, offset, indexType);
 }
 
-ICD_EXPORT void XGLAPI xglCmdDraw(
-    XGL_CMD_BUFFER                              cmdBuffer,
+ICD_EXPORT void VKAPI vkCmdDraw(
+    VK_CMD_BUFFER                              cmdBuffer,
     uint32_t                                    firstVertex,
     uint32_t                                    vertexCount,
     uint32_t                                    firstInstance,
@@ -3450,8 +3450,8 @@
             firstInstance, instanceCount, false, 0);
 }
 
-ICD_EXPORT void XGLAPI xglCmdDrawIndexed(
-    XGL_CMD_BUFFER                              cmdBuffer,
+ICD_EXPORT void VKAPI vkCmdDrawIndexed(
+    VK_CMD_BUFFER                              cmdBuffer,
     uint32_t                                    firstIndex,
     uint32_t                                    indexCount,
     int32_t                                     vertexOffset,
@@ -3464,63 +3464,63 @@
             firstInstance, instanceCount, true, vertexOffset);
 }
 
-ICD_EXPORT void XGLAPI xglCmdDrawIndirect(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  buffer,
-    XGL_GPU_SIZE                                offset,
+ICD_EXPORT void VKAPI vkCmdDrawIndirect(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  buffer,
+    VK_GPU_SIZE                                offset,
     uint32_t                                    count,
     uint32_t                                    stride)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
-    cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+    cmd_fail(cmd, VK_ERROR_UNKNOWN);
 }
 
-ICD_EXPORT void XGLAPI xglCmdDrawIndexedIndirect(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  buffer,
-    XGL_GPU_SIZE                                offset,
+ICD_EXPORT void VKAPI vkCmdDrawIndexedIndirect(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  buffer,
+    VK_GPU_SIZE                                offset,
     uint32_t                                    count,
     uint32_t                                    stride)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
-    cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+    cmd_fail(cmd, VK_ERROR_UNKNOWN);
 }
 
-ICD_EXPORT void XGLAPI xglCmdDispatch(
-    XGL_CMD_BUFFER                              cmdBuffer,
+ICD_EXPORT void VKAPI vkCmdDispatch(
+    VK_CMD_BUFFER                              cmdBuffer,
     uint32_t                                    x,
     uint32_t                                    y,
     uint32_t                                    z)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
-    cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+    cmd_fail(cmd, VK_ERROR_UNKNOWN);
 }
 
-ICD_EXPORT void XGLAPI xglCmdDispatchIndirect(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_BUFFER                                  buffer,
-    XGL_GPU_SIZE                                offset)
+ICD_EXPORT void VKAPI vkCmdDispatchIndirect(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_BUFFER                                  buffer,
+    VK_GPU_SIZE                                offset)
 {
     struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
-    cmd_fail(cmd, XGL_ERROR_UNKNOWN);
+    cmd_fail(cmd, VK_ERROR_UNKNOWN);
 }
 
-ICD_EXPORT void XGLAPI xglCmdBeginRenderPass(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    const XGL_RENDER_PASS_BEGIN*                pRenderPassBegin)
+ICD_EXPORT void VKAPI vkCmdBeginRenderPass(
+    VK_CMD_BUFFER                              cmdBuffer,
+    const VK_RENDER_PASS_BEGIN*                pRenderPassBegin)
 {
    struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
    cmd_begin_render_pass(cmd, (struct intel_render_pass *) pRenderPassBegin->renderPass, pRenderPassBegin->framebuffer);
 }
 
-ICD_EXPORT void XGLAPI xglCmdEndRenderPass(
-    XGL_CMD_BUFFER                              cmdBuffer,
-    XGL_RENDER_PASS                             renderPass)
+ICD_EXPORT void VKAPI vkCmdEndRenderPass(
+    VK_CMD_BUFFER                              cmdBuffer,
+    VK_RENDER_PASS                             renderPass)
 {
    struct intel_cmd *cmd = intel_cmd(cmdBuffer);
 
diff --git a/icd/intel/cmd_priv.h b/icd/intel/cmd_priv.h
index ff930a9..b5d831d 100644
--- a/icd/intel/cmd_priv.h
+++ b/icd/intel/cmd_priv.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -125,7 +125,7 @@
     struct {
         struct intel_ds_view *view;
         uint32_t stencil_ref;
-        XGL_IMAGE_ASPECT aspect;
+        VK_IMAGE_ASPECT aspect;
 
         enum intel_cmd_meta_ds_op op;
         bool optimal;
@@ -142,10 +142,10 @@
     return intel_gpu_gen(cmd->dev->gpu);
 }
 
-static inline void cmd_fail(struct intel_cmd *cmd, XGL_RESULT result)
+static inline void cmd_fail(struct intel_cmd *cmd, VK_RESULT result)
 {
-    intel_dev_log(cmd->dev, XGL_DBG_MSG_ERROR,
-            XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
+    intel_dev_log(cmd->dev, VK_DBG_MSG_ERROR,
+            VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
             "command building error");
 
     cmd->result = result;
@@ -157,7 +157,7 @@
     /* fail silently */
     if (cmd->reloc_used + reloc_len > cmd->reloc_count) {
         cmd->reloc_used = 0;
-        cmd_fail(cmd, XGL_ERROR_TOO_MANY_MEMORY_REFERENCES);
+        cmd_fail(cmd, VK_ERROR_TOO_MANY_MEMORY_REFERENCES);
     }
     assert(cmd->reloc_used + reloc_len <= cmd->reloc_count);
 }
@@ -506,16 +506,16 @@
 
 void cmd_batch_depth_count(struct intel_cmd *cmd,
                            struct intel_bo *bo,
-                           XGL_GPU_SIZE offset);
+                           VK_GPU_SIZE offset);
 
 void cmd_batch_timestamp(struct intel_cmd *cmd,
                          struct intel_bo *bo,
-                         XGL_GPU_SIZE offset);
+                         VK_GPU_SIZE offset);
 
 void cmd_batch_immediate(struct intel_cmd *cmd,
                          uint32_t pipe_control_flags,
                          struct intel_bo *bo,
-                         XGL_GPU_SIZE offset,
+                         VK_GPU_SIZE offset,
                          uint64_t val);
 
 void cmd_draw_meta(struct intel_cmd *cmd, const struct intel_cmd_meta *meta);
@@ -523,6 +523,6 @@
 void cmd_meta_ds_op(struct intel_cmd *cmd,
                     enum intel_cmd_meta_ds_op op,
                     struct intel_img *img,
-                    const XGL_IMAGE_SUBRESOURCE_RANGE *range);
+                    const VK_IMAGE_SUBRESOURCE_RANGE *range);
 
 #endif /* CMD_PRIV_H */
diff --git a/icd/intel/compiler/README.md b/icd/intel/compiler/README.md
index a96134a..d4cd9e6 100644
--- a/icd/intel/compiler/README.md
+++ b/icd/intel/compiler/README.md
@@ -8,25 +8,25 @@
 - [GlassyMesa's GLSLIR and supporting infrastructure](shader)
 - [GlassyMesa's DRI i965 backend](pipeline)
 
-For xglCreateShader, we primarily used the existing standalone device independent front end which can consume GLSL or BIL, and results in a separately linked shader object.
+For vkCreateShader, we primarily used the existing standalone device independent front end which can consume GLSL or BIL, and results in a separately linked shader object.
 
-For xglCreateGraphicsPipeline, we pulled over only the files needed to lower the shader object to ISA and supporting metadata.  Much of the i965 DRI driver was removed or commented out for future use, and is still being actively bootstrapped.
+For vkCreateGraphicsPipeline, we pulled over only the files needed to lower the shader object to ISA and supporting metadata.  Much of the i965 DRI driver was removed or commented out for future use, and is still being actively bootstrapped.
 
 Currently only Vertex and Fragment shaders are supported.  Any shader that fits within the IO parameters you see tested in compiler_render_tests.cpp should work.  Buffers with bindings, samplers with bindings, interstage IO with locations, are all working.  Vertex input locations work if they are sequential and start from 0.  Fragment output locations only work for location 0.
 
 We recommend using only buffers with bindings for uniforms, no global, non-block uniforms.
 
-Design decisions we made to get this stack working with current specified XGL and BIL.  We know these are active areas of discussion, and we'll update when decisions are made:
+Design decisions we made to get this stack working with current specified VK and BIL.  We know these are active areas of discussion, and we'll update when decisions are made:
 - Samplers:
-  - GLSL sampler bindings equate to a sampler/texture pair of the same number, as set up by the XGL application.  i.e. the following sampler:
+  - GLSL sampler bindings equate to a sampler/texture pair of the same number, as set up by the VK application.  i.e. the following sampler:
 ```
     layout (binding = 2) uniform sampler2D surface;
 ```
-will read from XGL_SLOT_SHADER_SAMPLER entity 2 and XGL_SLOT_SHADER_RESOURCE entity 2.
+will read from VK_SLOT_SHADER_SAMPLER entity 2 and VK_SLOT_SHADER_RESOURCE entity 2.
 
 - Buffers:
   - GLSL buffer bindings equate to the buffer bound at the same slot. i.e. the following uniform buffer:
 ```
     layout (std140, binding = 2) uniform foo { vec4 bar; } myBuffer;
 ```
-will be read from XGL_SHADER_RESOURCE entity 2.
+will be read from VK_SHADER_RESOURCE entity 2.
diff --git a/icd/intel/compiler/mesa-utils/src/mesa/main/mtypes.h b/icd/intel/compiler/mesa-utils/src/mesa/main/mtypes.h
index 71f98be..fb3a12e 100644
--- a/icd/intel/compiler/mesa-utils/src/mesa/main/mtypes.h
+++ b/icd/intel/compiler/mesa-utils/src/mesa/main/mtypes.h
@@ -796,7 +796,7 @@
 {
    GLuint Id;
    GLubyte *String;  /**< Null-terminated program text */
-   // LunarG: Remove - XGL does not use reference counts
+   // LunarG: Remove - VK does not use reference counts
    // GLint RefCount;
    GLenum Target;    /**< GL_VERTEX/FRAGMENT_PROGRAM_ARB, GL_GEOMETRY_PROGRAM_NV */
    GLenum Format;    /**< String encoding format */
@@ -1090,7 +1090,7 @@
    gl_shader_stage Stage;
    GLuint Name;  /**< AKA the handle */
    GLchar *Label;   /**< GL_KHR_debug */
-   // LunarG: Remove - XGL does not use reference counts
+   // LunarG: Remove - VK does not use reference counts
    // GLint RefCount;
    GLboolean DeletePending;
    GLboolean CompileStatus;
@@ -1326,7 +1326,7 @@
    GLenum Type;  /**< Always GL_SHADER_PROGRAM (internal token) */
    GLuint Name;  /**< aka handle or ID */
    GLchar *Label;   /**< GL_KHR_debug */
-   // LunarG: Remove - XGL does not use reference counts
+   // LunarG: Remove - VK does not use reference counts
    // GLint RefCount;
    GLboolean DeletePending;
 
@@ -2259,7 +2259,7 @@
    API_OPENGLES,
    API_OPENGLES2,
    API_OPENGL_CORE,
-   API_XGL,
+   API_VK,
    API_OPENGL_LAST = API_OPENGL_CORE
 } gl_api;
 
diff --git a/icd/intel/compiler/mesa-utils/src/mesa/main/version.c b/icd/intel/compiler/mesa-utils/src/mesa/main/version.c
index e27ce26..520c7b4 100644
--- a/icd/intel/compiler/mesa-utils/src/mesa/main/version.c
+++ b/icd/intel/compiler/mesa-utils/src/mesa/main/version.c
@@ -408,7 +408,7 @@
    case API_OPENGLES2:
       compute_version_es2(ctx);
       break;
-   case API_XGL:
+   case API_VK:
        break;
    }
 
diff --git a/icd/intel/compiler/mesa-utils/src/mesa/program/program.c b/icd/intel/compiler/mesa-utils/src/mesa/program/program.c
index 2915b36..b9360ac 100644
--- a/icd/intel/compiler/mesa-utils/src/mesa/program/program.c
+++ b/icd/intel/compiler/mesa-utils/src/mesa/program/program.c
@@ -239,7 +239,7 @@
       memset(prog, 0, sizeof(*prog));
       prog->Id = id;
       prog->Target = target;
-      // LunarG: XGL does not use reference counts
+      // LunarG: VK does not use reference counts
       // prog->RefCount = 1;
       prog->Format = GL_PROGRAM_FORMAT_ASCII_ARB;
 
@@ -365,7 +365,7 @@
 {
    (void) ctx;
    ASSERT(prog);
-   // LunarG: XGL does not use reference counts
+   // LunarG: VK does not use reference counts
    //ASSERT(prog->RefCount==0);
 
    if (prog == &_mesa_DummyProgram)
@@ -410,7 +410,7 @@
                          struct gl_program **ptr,
                          struct gl_program *prog)
 {
-// LunarG: XGL does not use reference counts
+// LunarG: VK does not use reference counts
 #if 0
 #ifndef NDEBUG
    assert(ptr);
@@ -485,7 +485,7 @@
       return NULL;
 
    assert(clone->Target == prog->Target);
-   // LunarG: XGL does not use reference counts
+   // LunarG: VK does not use reference counts
    // assert(clone->RefCount == 1);
 
    clone->String = (GLubyte *) _mesa_strdup((char *) prog->String);
diff --git a/icd/intel/compiler/pipeline/brw_context.c b/icd/intel/compiler/pipeline/brw_context.c
index b26b555..5c77706 100644
--- a/icd/intel/compiler/pipeline/brw_context.c
+++ b/icd/intel/compiler/pipeline/brw_context.c
@@ -499,7 +499,7 @@
    ctx->ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
 
    /* ARB_viewport_array */
-   if (brw->gen >= 7 && (ctx->API == API_OPENGL_CORE || ctx->API == API_XGL)) {
+   if (brw->gen >= 7 && (ctx->API == API_OPENGL_CORE || ctx->API == API_VK)) {
       ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS;
       ctx->Const.ViewportSubpixelBits = 0;
 
diff --git a/icd/intel/compiler/pipeline/brw_shader.cpp b/icd/intel/compiler/pipeline/brw_shader.cpp
index 7faea89..5db6129 100644
--- a/icd/intel/compiler/pipeline/brw_shader.cpp
+++ b/icd/intel/compiler/pipeline/brw_shader.cpp
@@ -139,7 +139,7 @@
       //               bringing in shaderobj.c
       //_mesa_init_shader_program(ctx, &prog->base);
       prog->base.Type = GL_SHADER_PROGRAM_MESA;
-      // LunarG: Remove - XGL does not use reference counts
+      // LunarG: Remove - VK does not use reference counts
       // prog->base.RefCount = 1;
 
       prog->base.AttributeBindings = new string_to_uint_map;
diff --git a/icd/intel/compiler/pipeline/brw_vs.c b/icd/intel/compiler/pipeline/brw_vs.c
index 2516d9a..e86ac85 100644
--- a/icd/intel/compiler/pipeline/brw_vs.c
+++ b/icd/intel/compiler/pipeline/brw_vs.c
@@ -345,7 +345,7 @@
 
    brw_vec4_setup_prog_key_for_precompile(ctx, &key.base, bvp->id, &vp->Base);
 
-   // In XGL, user clipping is triggered solely from the shader.
+   // In VK, user clipping is triggered solely from the shader.
    key.base.userclip_active = vp->Base.UsesClipDistanceOut;
 
    struct brw_vs_compile c;
diff --git a/icd/intel/compiler/pipeline/pipeline_compiler_interface.cpp b/icd/intel/compiler/pipeline/pipeline_compiler_interface.cpp
index f8201ac..98804b9 100644
--- a/icd/intel/compiler/pipeline/pipeline_compiler_interface.cpp
+++ b/icd/intel/compiler/pipeline/pipeline_compiler_interface.cpp
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -296,7 +296,7 @@
     uint32_t surface_count, i;
 
     rmap = (struct intel_pipeline_rmap *)
-        intel_alloc(gpu, sizeof(*rmap), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+        intel_alloc(gpu, sizeof(*rmap), 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!rmap)
         return NULL;
 
@@ -313,7 +313,7 @@
 
     rmap->slots = (struct intel_pipeline_rmap_slot *)
         intel_alloc(gpu, sizeof(rmap->slots[0]) * rmap->slot_count,
-            0, XGL_SYSTEM_ALLOC_INTERNAL);
+            0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!rmap->slots) {
         intel_free(gpu, rmap);
         return NULL;
@@ -401,15 +401,15 @@
 }
 
 // invoke backend compiler to generate ISA and supporting data structures
-XGL_RESULT intel_pipeline_shader_compile(struct intel_pipeline_shader *pipe_shader,
+VK_RESULT intel_pipeline_shader_compile(struct intel_pipeline_shader *pipe_shader,
                                          const struct intel_gpu *gpu,
                                          const struct intel_desc_layout_chain *chain,
-                                         const XGL_PIPELINE_SHADER *info)
+                                         const VK_PIPELINE_SHADER *info)
 {
     const struct intel_ir *ir = intel_shader(info->shader)->ir;
     /* XXX how about constness? */
     struct gl_shader_program *sh_prog = (struct gl_shader_program *) ir;
-    XGL_RESULT status = XGL_SUCCESS;
+    VK_RESULT status = VK_SUCCESS;
     struct brw_binding_table bt;
 
     struct brw_context *brw = intel_create_brw_context(gpu);
@@ -427,9 +427,9 @@
         {
             pipe_shader->codeSize = get_vs_program_size(brw->shader_prog);
 
-            pipe_shader->pCode = intel_alloc(gpu, pipe_shader->codeSize, 0, XGL_SYSTEM_ALLOC_INTERNAL_SHADER);
+            pipe_shader->pCode = intel_alloc(gpu, pipe_shader->codeSize, 0, VK_SYSTEM_ALLOC_INTERNAL_SHADER);
             if (!pipe_shader->pCode) {
-                status = XGL_ERROR_OUT_OF_MEMORY;
+                status = VK_ERROR_OUT_OF_MEMORY;
                 break;
             }
 
@@ -484,12 +484,12 @@
 
             if (bt.ubo_count != sh_prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) {
                 // If there is no UBO data to pull from, the shader is using a default uniform, which
-                // will not work in XGL.  We need a binding slot to pull from.
-                intel_log(gpu, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
+                // will not work in VK.  We need a binding slot to pull from.
+                intel_log(gpu, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
                         "compile error: VS reads from global, non-block uniform");
 
                 assert(0);
-                status = XGL_ERROR_BAD_PIPELINE_DATA;
+                status = VK_ERROR_BAD_PIPELINE_DATA;
                 break;
             }
 
@@ -546,9 +546,9 @@
 
             pipe_shader->codeSize = get_wm_program_size(brw->shader_prog);
 
-            pipe_shader->pCode = intel_alloc(gpu, pipe_shader->codeSize, 0, XGL_SYSTEM_ALLOC_INTERNAL_SHADER);
+            pipe_shader->pCode = intel_alloc(gpu, pipe_shader->codeSize, 0, VK_SYSTEM_ALLOC_INTERNAL_SHADER);
             if (!pipe_shader->pCode) {
-                status = XGL_ERROR_OUT_OF_MEMORY;
+                status = VK_ERROR_OUT_OF_MEMORY;
                 break;
             }
 
@@ -618,12 +618,12 @@
 
             if (bt.ubo_count != sh_prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->NumUniformBlocks) {
                 // If there is no UBO data to pull from, the shader is using a default uniform, which
-                // will not work in XGL.  We need a binding slot to pull from.
-                intel_log(gpu, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
+                // will not work in VK.  We need a binding slot to pull from.
+                intel_log(gpu, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
                         "compile error: FS reads from global, non-block uniform");
 
                 assert(0);
-                status = XGL_ERROR_BAD_PIPELINE_DATA;
+                status = VK_ERROR_BAD_PIPELINE_DATA;
                 break;
             }
 
@@ -687,18 +687,18 @@
         case GL_COMPUTE_SHADER:
         default:
             assert(0);
-            status = XGL_ERROR_BAD_PIPELINE_DATA;
+            status = VK_ERROR_BAD_PIPELINE_DATA;
         }
     } else {
         assert(0);
-        status = XGL_ERROR_BAD_PIPELINE_DATA;
+        status = VK_ERROR_BAD_PIPELINE_DATA;
     }
 
-    if (status == XGL_SUCCESS) {
+    if (status == VK_SUCCESS) {
         pipe_shader->rmap = rmap_create(gpu, chain, &bt);
         if (!pipe_shader->rmap) {
             intel_pipeline_shader_cleanup(pipe_shader, gpu);
-            status = XGL_ERROR_OUT_OF_MEMORY;
+            status = VK_ERROR_OUT_OF_MEMORY;
         }
     }
 
diff --git a/icd/intel/compiler/pipeline/pipeline_compiler_interface.h b/icd/intel/compiler/pipeline/pipeline_compiler_interface.h
index a8a1e29..aa26160 100644
--- a/icd/intel/compiler/pipeline/pipeline_compiler_interface.h
+++ b/icd/intel/compiler/pipeline/pipeline_compiler_interface.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -44,15 +44,15 @@
 struct brw_context *intel_create_brw_context(const struct intel_gpu *gpu);
 void intel_destroy_brw_context(struct brw_context *brw);
 
-XGL_RESULT intel_pipeline_shader_compile(struct intel_pipeline_shader *ips,
+VK_RESULT intel_pipeline_shader_compile(struct intel_pipeline_shader *ips,
                                          const struct intel_gpu *gpu,
                                          const struct intel_desc_layout_chain *chain,
-                                         const XGL_PIPELINE_SHADER *info);
+                                         const VK_PIPELINE_SHADER *info);
 
 void intel_pipeline_shader_cleanup(struct intel_pipeline_shader *sh,
                                    const struct intel_gpu *gpu);
 
-XGL_RESULT intel_pipeline_shader_compile_meta(struct intel_pipeline_shader *sh,
+VK_RESULT intel_pipeline_shader_compile_meta(struct intel_pipeline_shader *sh,
                                               const struct intel_gpu *gpu,
                                               enum intel_dev_meta_shader id);
 
diff --git a/icd/intel/compiler/pipeline/pipeline_compiler_interface_meta.cpp b/icd/intel/compiler/pipeline/pipeline_compiler_interface_meta.cpp
index 1feb3c6..3561e4a 100644
--- a/icd/intel/compiler/pipeline/pipeline_compiler_interface_meta.cpp
+++ b/icd/intel/compiler/pipeline/pipeline_compiler_interface_meta.cpp
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -619,7 +619,7 @@
 
     prog = get_program(&prog_size, stderr);
 
-    code = intel_alloc(gpu, prog_size, 0, XGL_SYSTEM_ALLOC_INTERNAL);
+    code = intel_alloc(gpu, prog_size, 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!code)
         return NULL;
 
@@ -695,7 +695,7 @@
 
 extern "C" {
 
-XGL_RESULT intel_pipeline_shader_compile_meta(struct intel_pipeline_shader *sh,
+VK_RESULT intel_pipeline_shader_compile_meta(struct intel_pipeline_shader *sh,
                                               const struct intel_gpu *gpu,
                                               enum intel_dev_meta_shader id)
 {
@@ -734,7 +734,7 @@
     ralloc_free(brw->shader_prog);
     ralloc_free(brw);
 
-    return (sh->pCode) ? XGL_SUCCESS : XGL_ERROR_UNKNOWN;
+    return (sh->pCode) ? VK_SUCCESS : VK_ERROR_UNKNOWN;
 }
 
 } // extern "C"
diff --git a/icd/intel/compiler/shader/compiler_interface.cpp b/icd/intel/compiler/shader/compiler_interface.cpp
index 6f100db..be72740 100644
--- a/icd/intel/compiler/shader/compiler_interface.cpp
+++ b/icd/intel/compiler/shader/compiler_interface.cpp
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -229,7 +229,7 @@
    ctx->Const.MaxVertexStreams = 1;
 
    /* GL 3.2  */
-   ctx->Const.ProfileMask = (ctx->API == API_OPENGL_CORE || ctx->API == API_XGL)
+   ctx->Const.ProfileMask = (ctx->API == API_OPENGL_CORE || ctx->API == API_VK)
                           ? GL_CONTEXT_CORE_PROFILE_BIT
                           : GL_CONTEXT_COMPATIBILITY_PROFILE_BIT;
 
@@ -285,7 +285,7 @@
 {
    memset(ctx, 0, sizeof(*ctx));
 
-   ctx->API = API_XGL;
+   ctx->API = API_VK;
 
    ctx->Extensions.dummy_false = false;
    ctx->Extensions.dummy_true = true;
@@ -381,10 +381,10 @@
         shader->Source = (const char *) code + sizeof(header);
 
         switch(header.gen_magic) {
-        case XGL_SHADER_STAGE_VERTEX:
+        case VK_SHADER_STAGE_VERTEX:
             shader->Type = GL_VERTEX_SHADER;
             break;
-        case XGL_SHADER_STAGE_FRAGMENT:
+        case VK_SHADER_STAGE_FRAGMENT:
             shader->Type = GL_FRAGMENT_SHADER;
             break;
         default:
@@ -449,7 +449,7 @@
 
     assert(shader_program->NumShaders == 1);
 
-    // for XGL, we are independently compiling and linking individual
+    // for VK, we are independently compiling and linking individual
     // shaders, which matches this frontend's concept of SSO
     shader_program->SeparateShader = true;
 
diff --git a/icd/intel/compiler/shader/compiler_interface.h b/icd/intel/compiler/shader/compiler_interface.h
index 09f5b4f..f318639 100644
--- a/icd/intel/compiler/shader/compiler_interface.h
+++ b/icd/intel/compiler/shader/compiler_interface.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
diff --git a/icd/intel/compiler/shader/glcpp/README b/icd/intel/compiler/shader/glcpp/README
index 0637935..77797dd 100644
--- a/icd/intel/compiler/shader/glcpp/README
+++ b/icd/intel/compiler/shader/glcpp/README
@@ -27,4 +27,4 @@
 -----------------
 A file that ends with a function-like macro name as the last
 non-whitespace token will result in a parse error, (where it should be
-passed through as is).
\ No newline at end of file
+passed through as is).
diff --git a/icd/intel/compiler/shader/glcpp/tests/glcpp-test b/icd/intel/compiler/shader/glcpp/tests/glcpp-test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/glsl_parser_extras.cpp b/icd/intel/compiler/shader/glsl_parser_extras.cpp
index f0850af..2177dba 100644
--- a/icd/intel/compiler/shader/glsl_parser_extras.cpp
+++ b/icd/intel/compiler/shader/glsl_parser_extras.cpp
@@ -362,7 +362,7 @@
       case API_OPENGLES2:
 	 this->language_version = 100;
 	 break;
-      case API_XGL:
+      case API_VK:
          break;
       }
    }
diff --git a/icd/intel/compiler/shader/link_uniforms.cpp b/icd/intel/compiler/shader/link_uniforms.cpp
index 2c06052..eae87ab 100644
--- a/icd/intel/compiler/shader/link_uniforms.cpp
+++ b/icd/intel/compiler/shader/link_uniforms.cpp
@@ -608,7 +608,7 @@
     */
    unsigned shader_shadow_samplers;
 
-   bool isXGL;
+   bool isVK;
 };
 
 /**
diff --git a/icd/intel/compiler/shader/main.cpp b/icd/intel/compiler/shader/main.cpp
index 8076dac..9fae962 100644
--- a/icd/intel/compiler/shader/main.cpp
+++ b/icd/intel/compiler/shader/main.cpp
@@ -434,7 +434,7 @@
    if ((status == EXIT_SUCCESS) && do_link)  {
       assert(whole_program->NumShaders == 1);
 
-      // for XGL, we are independently compiling and linking individual
+      // for VK, we are independently compiling and linking individual
       // shaders, which matches this frontend's concept of SSO
       whole_program->SeparateShader = true;
 
diff --git a/icd/intel/compiler/shader/opt_dead_builtin_varyings.cpp b/icd/intel/compiler/shader/opt_dead_builtin_varyings.cpp
index 2af8c37..824261b 100644
--- a/icd/intel/compiler/shader/opt_dead_builtin_varyings.cpp
+++ b/icd/intel/compiler/shader/opt_dead_builtin_varyings.cpp
@@ -521,7 +521,7 @@
     * GLES2, because they are not available there.
     */
    if (ctx->API == API_OPENGL_CORE ||
-       ctx->API == API_XGL ||
+       ctx->API == API_VK ||
        ctx->API == API_OPENGLES2) {
       return;
    }
diff --git a/icd/intel/compiler/shader/standalone_scaffolding.cpp b/icd/intel/compiler/shader/standalone_scaffolding.cpp
index 043fb79..55788a6 100644
--- a/icd/intel/compiler/shader/standalone_scaffolding.cpp
+++ b/icd/intel/compiler/shader/standalone_scaffolding.cpp
@@ -86,7 +86,7 @@
       shader->Type = type;
       shader->Stage = _mesa_shader_enum_to_shader_stage(type);
       shader->Name = name;
-      // LunarG: XGL does not use reference counts
+      // LunarG: VK does not use reference counts
 //      shader->RefCount = 1;
    }
    return shader;
diff --git a/icd/intel/compiler/shader/tests/compare_ir b/icd/intel/compiler/shader/tests/compare_ir
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_1.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_1.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_2.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_2.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_3.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_3.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_4.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_4.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_5.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_5.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_6.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_breaks_6.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_guarded_conditional_break.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_guarded_conditional_break.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_pulled_out_jump.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_pulled_out_jump.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_1.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_1.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_2.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_2.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_3.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_3.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_4.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_4.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_main_false.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_main_false.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_main_true.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_main_true.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_sub_false.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_sub_false.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_sub_true.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_returns_sub_true.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/lower_unified_returns.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/lower_unified_returns.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/remove_continue_at_end_of_loop.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/remove_continue_at_end_of_loop.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/return_non_void_at_end_of_loop_lower_nothing.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/return_non_void_at_end_of_loop_lower_nothing.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/return_non_void_at_end_of_loop_lower_return.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/return_non_void_at_end_of_loop_lower_return.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/return_non_void_at_end_of_loop_lower_return_and_break.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/return_non_void_at_end_of_loop_lower_return_and_break.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/return_void_at_end_of_loop_lower_nothing.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/return_void_at_end_of_loop_lower_nothing.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/return_void_at_end_of_loop_lower_return.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/return_void_at_end_of_loop_lower_return.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/lower_jumps/return_void_at_end_of_loop_lower_return_and_break.opt_test b/icd/intel/compiler/shader/tests/lower_jumps/return_void_at_end_of_loop_lower_return_and_break.opt_test
old mode 100755
new mode 100644
diff --git a/icd/intel/compiler/shader/tests/optimization-test b/icd/intel/compiler/shader/tests/optimization-test
old mode 100755
new mode 100644
diff --git a/icd/intel/desc.c b/icd/intel/desc.c
index 5440133..af92236 100644
--- a/icd/intel/desc.c
+++ b/icd/intel/desc.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2015 LunarG, Inc.
  *
@@ -82,7 +82,7 @@
 
 static bool desc_iter_init_for_update(struct intel_desc_iter *iter,
                                       const struct intel_desc_set *set,
-                                      XGL_DESCRIPTOR_TYPE type,
+                                      VK_DESCRIPTOR_TYPE type,
                                       uint32_t binding_index, uint32_t array_base)
 {
     if (!intel_desc_iter_init_for_binding(iter, set->layout,
@@ -117,22 +117,22 @@
     return true;
 }
 
-XGL_RESULT intel_desc_region_create(struct intel_dev *dev,
+VK_RESULT intel_desc_region_create(struct intel_dev *dev,
                                     struct intel_desc_region **region_ret)
 {
     const uint32_t surface_count = 16384;
     const uint32_t sampler_count = 16384;
     struct intel_desc_region *region;
 
-    region = intel_alloc(dev, sizeof(*region), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+    region = intel_alloc(dev, sizeof(*region), 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!region)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     memset(region, 0, sizeof(*region));
 
     if (!desc_region_init_desc_sizes(region, dev->gpu)) {
         intel_free(dev, region);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
     intel_desc_offset_set(&region->size,
@@ -140,23 +140,23 @@
             region->sampler_desc_size * sampler_count);
 
     region->surfaces = intel_alloc(dev, region->size.surface,
-            64, XGL_SYSTEM_ALLOC_INTERNAL);
+            64, VK_SYSTEM_ALLOC_INTERNAL);
     if (!region->surfaces) {
         intel_free(dev, region);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     region->samplers = intel_alloc(dev, region->size.sampler,
-            64, XGL_SYSTEM_ALLOC_INTERNAL);
+            64, VK_SYSTEM_ALLOC_INTERNAL);
     if (!region->samplers) {
         intel_free(dev, region->surfaces);
         intel_free(dev, region);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     *region_ret = region;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_region_destroy(struct intel_dev *dev,
@@ -170,43 +170,43 @@
 /**
  * Get the size of a descriptor in the region.
  */
-static XGL_RESULT desc_region_get_desc_size(const struct intel_desc_region *region,
-                                            XGL_DESCRIPTOR_TYPE type,
+static VK_RESULT desc_region_get_desc_size(const struct intel_desc_region *region,
+                                            VK_DESCRIPTOR_TYPE type,
                                             struct intel_desc_offset *size)
 {
     uint32_t surface_size = 0, sampler_size = 0;
 
     switch (type) {
-    case XGL_DESCRIPTOR_TYPE_SAMPLER:
+    case VK_DESCRIPTOR_TYPE_SAMPLER:
         sampler_size = region->sampler_desc_size;
         break;
-    case XGL_DESCRIPTOR_TYPE_SAMPLER_TEXTURE:
+    case VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE:
         surface_size = region->surface_desc_size;
         sampler_size = region->sampler_desc_size;
         break;
-    case XGL_DESCRIPTOR_TYPE_TEXTURE:
-    case XGL_DESCRIPTOR_TYPE_TEXTURE_BUFFER:
-    case XGL_DESCRIPTOR_TYPE_IMAGE:
-    case XGL_DESCRIPTOR_TYPE_IMAGE_BUFFER:
-    case XGL_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
-    case XGL_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER:
-    case XGL_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-    case XGL_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER_DYNAMIC:
+    case VK_DESCRIPTOR_TYPE_TEXTURE:
+    case VK_DESCRIPTOR_TYPE_TEXTURE_BUFFER:
+    case VK_DESCRIPTOR_TYPE_IMAGE:
+    case VK_DESCRIPTOR_TYPE_IMAGE_BUFFER:
+    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+    case VK_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER:
+    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+    case VK_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER_DYNAMIC:
         surface_size = region->surface_desc_size;
         break;
     default:
         assert(!"unknown descriptor type");
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
         break;
     }
 
     intel_desc_offset_set(size, surface_size, sampler_size);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_desc_region_alloc(struct intel_desc_region *region,
-                                   const XGL_DESCRIPTOR_POOL_CREATE_INFO *info,
+VK_RESULT intel_desc_region_alloc(struct intel_desc_region *region,
+                                   const VK_DESCRIPTOR_POOL_CREATE_INFO *info,
                                    struct intel_desc_offset *begin,
                                    struct intel_desc_offset *end)
 {
@@ -216,12 +216,12 @@
 
     /* calculate sizes needed */
     for (i = 0; i < info->count; i++) {
-        const XGL_DESCRIPTOR_TYPE_COUNT *tc = &info->pTypeCount[i];
+        const VK_DESCRIPTOR_TYPE_COUNT *tc = &info->pTypeCount[i];
         struct intel_desc_offset size;
-        XGL_RESULT ret;
+        VK_RESULT ret;
 
         ret = desc_region_get_desc_size(region, tc->type, &size);
-        if (ret != XGL_SUCCESS)
+        if (ret != VK_SUCCESS)
             return ret;
 
         surface_size += size.surface * tc->count;
@@ -234,12 +234,12 @@
     intel_desc_offset_add(end, &region->cur, &alloc);
 
     if (!intel_desc_offset_within(end, &region->size))
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     /* increment the writer pointer */
     region->cur = *end;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void desc_region_validate_begin_end(const struct intel_desc_region *region,
@@ -262,18 +262,18 @@
     /* is it ok not to reclaim? */
 }
 
-XGL_RESULT intel_desc_region_begin_update(struct intel_desc_region *region,
-                                          XGL_DESCRIPTOR_UPDATE_MODE mode)
+VK_RESULT intel_desc_region_begin_update(struct intel_desc_region *region,
+                                          VK_DESCRIPTOR_UPDATE_MODE mode)
 {
     /* no-op */
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_desc_region_end_update(struct intel_desc_region *region,
+VK_RESULT intel_desc_region_end_update(struct intel_desc_region *region,
                                         struct intel_cmd *cmd)
 {
     /* No pipelined update.  cmd_draw() will do the work. */
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_region_clear(struct intel_desc_region *region,
@@ -348,7 +348,7 @@
 
 void intel_desc_region_read_surface(const struct intel_desc_region *region,
                                     const struct intel_desc_offset *offset,
-                                    XGL_PIPELINE_SHADER_STAGE stage,
+                                    VK_PIPELINE_SHADER_STAGE stage,
                                     const struct intel_mem **mem,
                                     bool *read_only,
                                     const uint32_t **cmd,
@@ -368,7 +368,7 @@
     *read_only = desc->read_only;
     switch (desc->type) {
     case INTEL_DESC_SURFACE_BUF:
-        *cmd = (stage == XGL_SHADER_STAGE_FRAGMENT) ?
+        *cmd = (stage == VK_SHADER_STAGE_FRAGMENT) ?
             desc->u.buf->fs_cmd : desc->u.buf->cmd;
         *cmd_len = desc->u.buf->cmd_len;
         break;
@@ -408,26 +408,26 @@
     intel_desc_pool_destroy(pool);
 }
 
-XGL_RESULT intel_desc_pool_create(struct intel_dev *dev,
-                                  XGL_DESCRIPTOR_POOL_USAGE usage,
+VK_RESULT intel_desc_pool_create(struct intel_dev *dev,
+                                  VK_DESCRIPTOR_POOL_USAGE usage,
                                   uint32_t max_sets,
-                                  const XGL_DESCRIPTOR_POOL_CREATE_INFO *info,
+                                  const VK_DESCRIPTOR_POOL_CREATE_INFO *info,
                                   struct intel_desc_pool **pool_ret)
 {
     struct intel_desc_pool *pool;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     pool = (struct intel_desc_pool *) intel_base_create(&dev->base.handle,
-            sizeof(*pool), dev->base.dbg, XGL_DBG_OBJECT_DESCRIPTOR_POOL,
+            sizeof(*pool), dev->base.dbg, VK_DBG_OBJECT_DESCRIPTOR_POOL,
             info, 0);
     if (!pool)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     pool->dev = dev;
 
     ret = intel_desc_region_alloc(dev->desc_region, info,
             &pool->region_begin, &pool->region_end);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_base_destroy(&pool->obj.base);
         return ret;
     }
@@ -439,7 +439,7 @@
 
     *pool_ret = pool;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_pool_destroy(struct intel_desc_pool *pool)
@@ -449,7 +449,7 @@
     intel_base_destroy(&pool->obj.base);
 }
 
-XGL_RESULT intel_desc_pool_alloc(struct intel_desc_pool *pool,
+VK_RESULT intel_desc_pool_alloc(struct intel_desc_pool *pool,
                                  const struct intel_desc_layout *layout,
                                  struct intel_desc_offset *begin,
                                  struct intel_desc_offset *end)
@@ -458,12 +458,12 @@
     intel_desc_offset_add(end, &pool->cur, &layout->region_size);
 
     if (!intel_desc_offset_within(end, &pool->region_end))
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     /* increment the writer pointer */
     pool->cur = *end;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_pool_reset(struct intel_desc_pool *pool)
@@ -479,25 +479,25 @@
     intel_desc_set_destroy(set);
 }
 
-XGL_RESULT intel_desc_set_create(struct intel_dev *dev,
+VK_RESULT intel_desc_set_create(struct intel_dev *dev,
                                  struct intel_desc_pool *pool,
-                                 XGL_DESCRIPTOR_SET_USAGE usage,
+                                 VK_DESCRIPTOR_SET_USAGE usage,
                                  const struct intel_desc_layout *layout,
                                  struct intel_desc_set **set_ret)
 {
     struct intel_desc_set *set;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     set = (struct intel_desc_set *) intel_base_create(&dev->base.handle,
-            sizeof(*set), dev->base.dbg, XGL_DBG_OBJECT_DESCRIPTOR_SET,
+            sizeof(*set), dev->base.dbg, VK_DBG_OBJECT_DESCRIPTOR_SET,
             NULL, 0);
     if (!set)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     set->region = dev->desc_region;
     ret = intel_desc_pool_alloc(pool, layout,
             &set->region_begin, &set->region_end);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_base_destroy(&set->obj.base);
         return ret;
     }
@@ -508,7 +508,7 @@
 
     *set_ret = set;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_set_destroy(struct intel_desc_set *set)
@@ -516,12 +516,12 @@
     intel_base_destroy(&set->obj.base);
 }
 
-static bool desc_set_img_layout_read_only(XGL_IMAGE_LAYOUT layout)
+static bool desc_set_img_layout_read_only(VK_IMAGE_LAYOUT layout)
 {
     switch (layout) {
-    case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
-    case XGL_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
-    case XGL_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
+    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+    case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
         return true;
     default:
         return false;
@@ -529,18 +529,18 @@
 }
 
 void intel_desc_set_update_samplers(struct intel_desc_set *set,
-                                    const XGL_UPDATE_SAMPLERS *update)
+                                    const VK_UPDATE_SAMPLERS *update)
 {
     struct intel_desc_iter iter;
     uint32_t i;
 
-    if (!desc_iter_init_for_update(&iter, set, XGL_DESCRIPTOR_TYPE_SAMPLER,
+    if (!desc_iter_init_for_update(&iter, set, VK_DESCRIPTOR_TYPE_SAMPLER,
                 update->binding, update->arrayIndex))
         return;
 
     for (i = 0; i < update->count; i++) {
         const struct intel_sampler *sampler =
-            intel_sampler((XGL_SAMPLER) update->pSamplers[i]);
+            intel_sampler((VK_SAMPLER) update->pSamplers[i]);
         struct intel_desc_sampler desc;
 
         desc.sampler = sampler;
@@ -553,13 +553,13 @@
 }
 
 void intel_desc_set_update_sampler_textures(struct intel_desc_set *set,
-                                            const XGL_UPDATE_SAMPLER_TEXTURES *update)
+                                            const VK_UPDATE_SAMPLER_TEXTURES *update)
 {
     struct intel_desc_iter iter;
     const struct intel_desc_layout_binding *binding;
     uint32_t i;
 
-    if (!desc_iter_init_for_update(&iter, set, XGL_DESCRIPTOR_TYPE_SAMPLER_TEXTURE,
+    if (!desc_iter_init_for_update(&iter, set, VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE,
                 update->binding, update->arrayIndex))
         return;
 
@@ -582,7 +582,7 @@
         const struct intel_sampler *sampler = (binding->immutable_samplers) ?
             binding->immutable_samplers[update->arrayIndex + i] :
             intel_sampler(update->pSamplerImageViews[i].sampler);
-        const XGL_IMAGE_VIEW_ATTACH_INFO *info =
+        const VK_IMAGE_VIEW_ATTACH_INFO *info =
             update->pSamplerImageViews[i].pImageView;
         const struct intel_img_view *view = intel_img_view(info->view);
         struct intel_desc_surface view_desc;
@@ -604,7 +604,7 @@
 }
 
 void intel_desc_set_update_images(struct intel_desc_set *set,
-                                  const XGL_UPDATE_IMAGES *update)
+                                  const VK_UPDATE_IMAGES *update)
 {
     struct intel_desc_iter iter;
     uint32_t i;
@@ -614,7 +614,7 @@
         return;
 
     for (i = 0; i < update->count; i++) {
-        const XGL_IMAGE_VIEW_ATTACH_INFO *info = &update->pImageViews[i];
+        const VK_IMAGE_VIEW_ATTACH_INFO *info = &update->pImageViews[i];
         const struct intel_img_view *view = intel_img_view(info->view);
         struct intel_desc_surface desc;
 
@@ -631,7 +631,7 @@
 }
 
 void intel_desc_set_update_buffers(struct intel_desc_set *set,
-                                   const XGL_UPDATE_BUFFERS *update)
+                                   const VK_UPDATE_BUFFERS *update)
 {
     struct intel_desc_iter iter;
     uint32_t i;
@@ -641,7 +641,7 @@
         return;
 
     for (i = 0; i < update->count; i++) {
-        const XGL_BUFFER_VIEW_ATTACH_INFO *info = &update->pBufferViews[i];
+        const VK_BUFFER_VIEW_ATTACH_INFO *info = &update->pBufferViews[i];
         const struct intel_buf_view *view = intel_buf_view(info->view);
         struct intel_desc_surface desc;
 
@@ -658,7 +658,7 @@
 }
 
 void intel_desc_set_update_as_copy(struct intel_desc_set *set,
-                                   const XGL_UPDATE_AS_COPY *update)
+                                   const VK_UPDATE_AS_COPY *update)
 {
     const struct intel_desc_set *src_set =
         intel_desc_set(update->descriptorSet);
@@ -667,7 +667,7 @@
     uint32_t i;
 
     /* disallow combined sampler textures */
-    if (update->descriptorType == XGL_DESCRIPTOR_TYPE_SAMPLER_TEXTURE)
+    if (update->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE)
         return;
 
     if (!desc_iter_init_for_update(&iter, set, update->descriptorType,
@@ -699,34 +699,34 @@
     intel_desc_layout_destroy(layout);
 }
 
-static XGL_RESULT desc_layout_init_bindings(struct intel_desc_layout *layout,
+static VK_RESULT desc_layout_init_bindings(struct intel_desc_layout *layout,
                                             const struct intel_desc_region *region,
-                                            const XGL_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info)
+                                            const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info)
 {
     struct intel_desc_offset offset;
     uint32_t i;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     intel_desc_offset_set(&offset, 0, 0);
 
     /* allocate bindings */
     layout->bindings = intel_alloc(layout, sizeof(layout->bindings[0]) *
-            info->count, 0, XGL_SYSTEM_ALLOC_INTERNAL);
+            info->count, 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!layout->bindings)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     memset(layout->bindings, 0, sizeof(layout->bindings[0]) * info->count);
     layout->binding_count = info->count;
 
     /* initialize bindings */
     for (i = 0; i < info->count; i++) {
-        const XGL_DESCRIPTOR_SET_LAYOUT_BINDING *lb = &info->pBinding[i];
+        const VK_DESCRIPTOR_SET_LAYOUT_BINDING *lb = &info->pBinding[i];
         struct intel_desc_layout_binding *binding = &layout->bindings[i];
         struct intel_desc_offset size;
 
         switch (lb->descriptorType) {
-        case XGL_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-        case XGL_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER_DYNAMIC:
+        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+        case VK_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER_DYNAMIC:
             layout->dynamic_desc_count += lb->count;
             break;
         default:
@@ -740,7 +740,7 @@
 
         ret = desc_region_get_desc_size(region,
                 lb->descriptorType, &size);
-        if (ret != XGL_SUCCESS)
+        if (ret != VK_SUCCESS)
             return ret;
 
         binding->increment = size;
@@ -759,20 +759,20 @@
 
             if (shared) {
                 binding->shared_immutable_sampler =
-                    intel_sampler((XGL_SAMPLER) lb->pImmutableSamplers[0]);
+                    intel_sampler((VK_SAMPLER) lb->pImmutableSamplers[0]);
                 /* set sampler offset increment to 0 */
                 intel_desc_offset_set(&binding->increment,
                         binding->increment.surface, 0);
             } else {
                 binding->immutable_samplers = intel_alloc(layout,
                         sizeof(binding->immutable_samplers[0]) * lb->count,
-                        0, XGL_SYSTEM_ALLOC_INTERNAL);
+                        0, VK_SYSTEM_ALLOC_INTERNAL);
                 if (!binding->immutable_samplers)
-                    return XGL_ERROR_OUT_OF_MEMORY;
+                    return VK_ERROR_OUT_OF_MEMORY;
 
                 for (j = 0; j < lb->count; j++) {
                     binding->immutable_samplers[j] =
-                        intel_sampler((XGL_SAMPLER) lb->pImmutableSamplers[j]);
+                        intel_sampler((VK_SAMPLER) lb->pImmutableSamplers[j]);
                 }
             }
         }
@@ -785,24 +785,24 @@
 
     layout->region_size = offset;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_desc_layout_create(struct intel_dev *dev,
-                                    const XGL_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info,
+VK_RESULT intel_desc_layout_create(struct intel_dev *dev,
+                                    const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info,
                                     struct intel_desc_layout **layout_ret)
 {
     struct intel_desc_layout *layout;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     layout = (struct intel_desc_layout *) intel_base_create(&dev->base.handle,
             sizeof(*layout), dev->base.dbg,
-            XGL_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT, info, 0);
+            VK_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT, info, 0);
     if (!layout)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     ret = desc_layout_init_bindings(layout, dev->desc_region, info);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_desc_layout_destroy(layout);
         return ret;
     }
@@ -811,7 +811,7 @@
 
     *layout_ret = layout;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_layout_destroy(struct intel_desc_layout *layout)
@@ -836,8 +836,8 @@
     intel_desc_layout_chain_destroy(chain);
 }
 
-XGL_RESULT intel_desc_layout_chain_create(struct intel_dev *dev,
-                                          const XGL_DESCRIPTOR_SET_LAYOUT *layouts,
+VK_RESULT intel_desc_layout_chain_create(struct intel_dev *dev,
+                                          const VK_DESCRIPTOR_SET_LAYOUT *layouts,
                                           uint32_t count,
                                           struct intel_desc_layout_chain **chain_ret)
 {
@@ -846,23 +846,23 @@
 
     chain = (struct intel_desc_layout_chain *)
         intel_base_create(&dev->base.handle, sizeof(*chain), dev->base.dbg,
-                XGL_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT_CHAIN, NULL, 0);
+                VK_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT_CHAIN, NULL, 0);
     if (!chain)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     chain->layouts = intel_alloc(chain, sizeof(chain->layouts[0]) * count,
-            0, XGL_SYSTEM_ALLOC_INTERNAL);
+            0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!chain) {
         intel_desc_layout_chain_destroy(chain);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     chain->dynamic_desc_indices = intel_alloc(chain,
             sizeof(chain->dynamic_desc_indices[0]) * count,
-            0, XGL_SYSTEM_ALLOC_INTERNAL);
+            0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!chain->dynamic_desc_indices) {
         intel_desc_layout_chain_destroy(chain);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     for (i = 0; i < count; i++) {
@@ -879,7 +879,7 @@
 
     *chain_ret = chain;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_desc_layout_chain_destroy(struct intel_desc_layout_chain *chain)
@@ -891,10 +891,10 @@
     intel_base_destroy(&chain->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDescriptorSetLayout(
-    XGL_DEVICE                                   device,
-    const XGL_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo,
-    XGL_DESCRIPTOR_SET_LAYOUT*                   pSetLayout)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(
+    VK_DEVICE                                   device,
+    const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo,
+    VK_DESCRIPTOR_SET_LAYOUT*                   pSetLayout)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -902,11 +902,11 @@
             (struct intel_desc_layout **) pSetLayout);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDescriptorSetLayoutChain(
-    XGL_DEVICE                                   device,
+ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayoutChain(
+    VK_DEVICE                                   device,
     uint32_t                                     setLayoutArrayCount,
-    const XGL_DESCRIPTOR_SET_LAYOUT*             pSetLayoutArray,
-    XGL_DESCRIPTOR_SET_LAYOUT_CHAIN*             pLayoutChain)
+    const VK_DESCRIPTOR_SET_LAYOUT*             pSetLayoutArray,
+    VK_DESCRIPTOR_SET_LAYOUT_CHAIN*             pLayoutChain)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -915,9 +915,9 @@
             (struct intel_desc_layout_chain **) pLayoutChain);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglBeginDescriptorPoolUpdate(
-    XGL_DEVICE                                   device,
-    XGL_DESCRIPTOR_UPDATE_MODE                   updateMode)
+ICD_EXPORT VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(
+    VK_DEVICE                                   device,
+    VK_DESCRIPTOR_UPDATE_MODE                   updateMode)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_desc_region *region = dev->desc_region;
@@ -925,9 +925,9 @@
     return intel_desc_region_begin_update(region, updateMode);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglEndDescriptorPoolUpdate(
-    XGL_DEVICE                                   device,
-    XGL_CMD_BUFFER                               cmd_)
+ICD_EXPORT VK_RESULT VKAPI vkEndDescriptorPoolUpdate(
+    VK_DEVICE                                   device,
+    VK_CMD_BUFFER                               cmd_)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_desc_region *region = dev->desc_region;
@@ -936,12 +936,12 @@
     return intel_desc_region_end_update(region, cmd);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDescriptorPool(
-    XGL_DEVICE                                   device,
-    XGL_DESCRIPTOR_POOL_USAGE                    poolUsage,
+ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(
+    VK_DEVICE                                   device,
+    VK_DESCRIPTOR_POOL_USAGE                    poolUsage,
     uint32_t                                     maxSets,
-    const XGL_DESCRIPTOR_POOL_CREATE_INFO*       pCreateInfo,
-    XGL_DESCRIPTOR_POOL*                         pDescriptorPool)
+    const VK_DESCRIPTOR_POOL_CREATE_INFO*       pCreateInfo,
+    VK_DESCRIPTOR_POOL*                         pDescriptorPool)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -949,36 +949,36 @@
             (struct intel_desc_pool **) pDescriptorPool);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglResetDescriptorPool(
-    XGL_DESCRIPTOR_POOL                          descriptorPool)
+ICD_EXPORT VK_RESULT VKAPI vkResetDescriptorPool(
+    VK_DESCRIPTOR_POOL                          descriptorPool)
 {
     struct intel_desc_pool *pool = intel_desc_pool(descriptorPool);
 
     intel_desc_pool_reset(pool);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglAllocDescriptorSets(
-    XGL_DESCRIPTOR_POOL                          descriptorPool,
-    XGL_DESCRIPTOR_SET_USAGE                     setUsage,
+ICD_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(
+    VK_DESCRIPTOR_POOL                          descriptorPool,
+    VK_DESCRIPTOR_SET_USAGE                     setUsage,
     uint32_t                                     count,
-    const XGL_DESCRIPTOR_SET_LAYOUT*             pSetLayouts,
-    XGL_DESCRIPTOR_SET*                          pDescriptorSets,
+    const VK_DESCRIPTOR_SET_LAYOUT*             pSetLayouts,
+    VK_DESCRIPTOR_SET*                          pDescriptorSets,
     uint32_t*                                    pCount)
 {
     struct intel_desc_pool *pool = intel_desc_pool(descriptorPool);
     struct intel_dev *dev = pool->dev;
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
     uint32_t i;
 
     for (i = 0; i < count; i++) {
         const struct intel_desc_layout *layout =
-            intel_desc_layout((XGL_DESCRIPTOR_SET_LAYOUT) pSetLayouts[i]);
+            intel_desc_layout((VK_DESCRIPTOR_SET_LAYOUT) pSetLayouts[i]);
 
         ret = intel_desc_set_create(dev, pool, setUsage, layout,
                 (struct intel_desc_set **) &pDescriptorSets[i]);
-        if (ret != XGL_SUCCESS)
+        if (ret != VK_SUCCESS)
             break;
     }
 
@@ -988,23 +988,23 @@
     return ret;
 }
 
-ICD_EXPORT void XGLAPI xglClearDescriptorSets(
-    XGL_DESCRIPTOR_POOL                          descriptorPool,
+ICD_EXPORT void VKAPI vkClearDescriptorSets(
+    VK_DESCRIPTOR_POOL                          descriptorPool,
     uint32_t                                     count,
-    const XGL_DESCRIPTOR_SET*                    pDescriptorSets)
+    const VK_DESCRIPTOR_SET*                    pDescriptorSets)
 {
     uint32_t i;
 
     for (i = 0; i < count; i++) {
         struct intel_desc_set *set =
-            intel_desc_set((XGL_DESCRIPTOR_SET) pDescriptorSets[i]);
+            intel_desc_set((VK_DESCRIPTOR_SET) pDescriptorSets[i]);
 
         intel_desc_region_clear(set->region, &set->region_begin, &set->region_end);
     }
 }
 
-ICD_EXPORT void XGLAPI xglUpdateDescriptors(
-    XGL_DESCRIPTOR_SET                           descriptorSet,
+ICD_EXPORT void VKAPI vkUpdateDescriptors(
+    VK_DESCRIPTOR_SET                           descriptorSet,
     uint32_t                                     updateCount,
     const void**                                 ppUpdateArray)
 {
@@ -1014,31 +1014,31 @@
     for (i = 0; i < updateCount; i++) {
         const union {
             struct {
-                XGL_STRUCTURE_TYPE                      sType;
+                VK_STRUCTURE_TYPE                      sType;
                 const void*                             pNext;
             } common;
 
-            XGL_UPDATE_SAMPLERS samplers;
-            XGL_UPDATE_SAMPLER_TEXTURES sampler_textures;
-            XGL_UPDATE_IMAGES images;
-            XGL_UPDATE_BUFFERS buffers;
-            XGL_UPDATE_AS_COPY as_copy;
+            VK_UPDATE_SAMPLERS samplers;
+            VK_UPDATE_SAMPLER_TEXTURES sampler_textures;
+            VK_UPDATE_IMAGES images;
+            VK_UPDATE_BUFFERS buffers;
+            VK_UPDATE_AS_COPY as_copy;
         } *u = ppUpdateArray[i];
 
         switch (u->common.sType) {
-        case XGL_STRUCTURE_TYPE_UPDATE_SAMPLERS:
+        case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
             intel_desc_set_update_samplers(set, &u->samplers);
             break;
-        case XGL_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
+        case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
             intel_desc_set_update_sampler_textures(set, &u->sampler_textures);
             break;
-        case XGL_STRUCTURE_TYPE_UPDATE_IMAGES:
+        case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
             intel_desc_set_update_images(set, &u->images);
             break;
-        case XGL_STRUCTURE_TYPE_UPDATE_BUFFERS:
+        case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
             intel_desc_set_update_buffers(set, &u->buffers);
             break;
-        case XGL_STRUCTURE_TYPE_UPDATE_AS_COPY:
+        case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
             intel_desc_set_update_as_copy(set, &u->as_copy);
             break;
         default:
diff --git a/icd/intel/desc.h b/icd/intel/desc.h
index a7762c7..11cf6c5 100644
--- a/icd/intel/desc.h
+++ b/icd/intel/desc.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2015 LunarG, Inc.
  *
@@ -49,7 +49,7 @@
 };
 
 struct intel_desc_iter {
-    XGL_DESCRIPTOR_TYPE type;
+    VK_DESCRIPTOR_TYPE type;
     struct intel_desc_offset increment;
     uint32_t size;
 
@@ -106,7 +106,7 @@
 
     /* homogeneous bindings in this layout */
     struct intel_desc_layout_binding {
-        XGL_DESCRIPTOR_TYPE type;
+        VK_DESCRIPTOR_TYPE type;
         uint32_t array_size;
         const struct intel_sampler **immutable_samplers;
         const struct intel_sampler *shared_immutable_sampler;
@@ -134,7 +134,7 @@
     uint32_t total_dynamic_desc_count;
 };
 
-static inline struct intel_desc_pool *intel_desc_pool(XGL_DESCRIPTOR_POOL pool)
+static inline struct intel_desc_pool *intel_desc_pool(VK_DESCRIPTOR_POOL pool)
 {
     return (struct intel_desc_pool *) pool;
 }
@@ -144,7 +144,7 @@
     return (struct intel_desc_pool *) obj;
 }
 
-static inline struct intel_desc_set *intel_desc_set(XGL_DESCRIPTOR_SET set)
+static inline struct intel_desc_set *intel_desc_set(VK_DESCRIPTOR_SET set)
 {
     return (struct intel_desc_set *) set;
 }
@@ -154,7 +154,7 @@
     return (struct intel_desc_set *) obj;
 }
 
-static inline struct intel_desc_layout *intel_desc_layout(XGL_DESCRIPTOR_SET_LAYOUT layout)
+static inline struct intel_desc_layout *intel_desc_layout(VK_DESCRIPTOR_SET_LAYOUT layout)
 {
     return (struct intel_desc_layout *) layout;
 }
@@ -164,7 +164,7 @@
     return (struct intel_desc_layout *) obj;
 }
 
-static inline struct intel_desc_layout_chain *intel_desc_layout_chain(XGL_DESCRIPTOR_SET_LAYOUT_CHAIN chain)
+static inline struct intel_desc_layout_chain *intel_desc_layout_chain(VK_DESCRIPTOR_SET_LAYOUT_CHAIN chain)
 {
     return (struct intel_desc_layout_chain *) chain;
 }
@@ -220,22 +220,22 @@
 
 bool intel_desc_iter_advance(struct intel_desc_iter *iter);
 
-XGL_RESULT intel_desc_region_create(struct intel_dev *dev,
+VK_RESULT intel_desc_region_create(struct intel_dev *dev,
                                     struct intel_desc_region **region_ret);
 void intel_desc_region_destroy(struct intel_dev *dev,
                                struct intel_desc_region *region);
 
-XGL_RESULT intel_desc_region_alloc(struct intel_desc_region *region,
-                                   const XGL_DESCRIPTOR_POOL_CREATE_INFO *info,
+VK_RESULT intel_desc_region_alloc(struct intel_desc_region *region,
+                                   const VK_DESCRIPTOR_POOL_CREATE_INFO *info,
                                    struct intel_desc_offset *begin,
                                    struct intel_desc_offset *end);
 void intel_desc_region_free(struct intel_desc_region *region,
                             const struct intel_desc_offset *begin,
                             const struct intel_desc_offset *end);
 
-XGL_RESULT intel_desc_region_begin_update(struct intel_desc_region *region,
-                                          XGL_DESCRIPTOR_UPDATE_MODE mode);
-XGL_RESULT intel_desc_region_end_update(struct intel_desc_region *region,
+VK_RESULT intel_desc_region_begin_update(struct intel_desc_region *region,
+                                          VK_DESCRIPTOR_UPDATE_MODE mode);
+VK_RESULT intel_desc_region_end_update(struct intel_desc_region *region,
                                         struct intel_cmd *cmd);
 
 void intel_desc_region_clear(struct intel_desc_region *region,
@@ -255,7 +255,7 @@
 
 void intel_desc_region_read_surface(const struct intel_desc_region *region,
                                     const struct intel_desc_offset *offset,
-                                    XGL_PIPELINE_SHADER_STAGE stage,
+                                    VK_PIPELINE_SHADER_STAGE stage,
                                     const struct intel_mem **mem,
                                     bool *read_only,
                                     const uint32_t **cmd,
@@ -264,44 +264,44 @@
                                     const struct intel_desc_offset *offset,
                                     const struct intel_sampler **sampler);
 
-XGL_RESULT intel_desc_pool_create(struct intel_dev *dev,
-                                  XGL_DESCRIPTOR_POOL_USAGE usage,
+VK_RESULT intel_desc_pool_create(struct intel_dev *dev,
+                                  VK_DESCRIPTOR_POOL_USAGE usage,
                                   uint32_t max_sets,
-                                  const XGL_DESCRIPTOR_POOL_CREATE_INFO *info,
+                                  const VK_DESCRIPTOR_POOL_CREATE_INFO *info,
                                   struct intel_desc_pool **pool_ret);
 void intel_desc_pool_destroy(struct intel_desc_pool *pool);
 
-XGL_RESULT intel_desc_pool_alloc(struct intel_desc_pool *pool,
+VK_RESULT intel_desc_pool_alloc(struct intel_desc_pool *pool,
                                  const struct intel_desc_layout *layout,
                                  struct intel_desc_offset *begin,
                                  struct intel_desc_offset *end);
 void intel_desc_pool_reset(struct intel_desc_pool *pool);
 
-XGL_RESULT intel_desc_set_create(struct intel_dev *dev,
+VK_RESULT intel_desc_set_create(struct intel_dev *dev,
                                  struct intel_desc_pool *pool,
-                                 XGL_DESCRIPTOR_SET_USAGE usage,
+                                 VK_DESCRIPTOR_SET_USAGE usage,
                                  const struct intel_desc_layout *layout,
                                  struct intel_desc_set **set_ret);
 void intel_desc_set_destroy(struct intel_desc_set *set);
 
 void intel_desc_set_update_samplers(struct intel_desc_set *set,
-                                    const XGL_UPDATE_SAMPLERS *update);
+                                    const VK_UPDATE_SAMPLERS *update);
 void intel_desc_set_update_sampler_textures(struct intel_desc_set *set,
-                                            const XGL_UPDATE_SAMPLER_TEXTURES *update);
+                                            const VK_UPDATE_SAMPLER_TEXTURES *update);
 void intel_desc_set_update_images(struct intel_desc_set *set,
-                                  const XGL_UPDATE_IMAGES *update);
+                                  const VK_UPDATE_IMAGES *update);
 void intel_desc_set_update_buffers(struct intel_desc_set *set,
-                                   const XGL_UPDATE_BUFFERS *update);
+                                   const VK_UPDATE_BUFFERS *update);
 void intel_desc_set_update_as_copy(struct intel_desc_set *set,
-                                   const XGL_UPDATE_AS_COPY *update);
+                                   const VK_UPDATE_AS_COPY *update);
 
-XGL_RESULT intel_desc_layout_create(struct intel_dev *dev,
-                                    const XGL_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info,
+VK_RESULT intel_desc_layout_create(struct intel_dev *dev,
+                                    const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info,
                                     struct intel_desc_layout **layout_ret);
 void intel_desc_layout_destroy(struct intel_desc_layout *layout);
 
-XGL_RESULT intel_desc_layout_chain_create(struct intel_dev *dev,
-                                          const XGL_DESCRIPTOR_SET_LAYOUT *layouts,
+VK_RESULT intel_desc_layout_chain_create(struct intel_dev *dev,
+                                          const VK_DESCRIPTOR_SET_LAYOUT *layouts,
                                           uint32_t count,
                                           struct intel_desc_layout_chain **chain_ret);
 void intel_desc_layout_chain_destroy(struct intel_desc_layout_chain *chain);
diff --git a/icd/intel/dev.c b/icd/intel/dev.c
index 8db165b..94bcd42 100644
--- a/icd/intel/dev.c
+++ b/icd/intel/dev.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -65,18 +65,18 @@
     return true;
 }
 
-static XGL_RESULT dev_create_queues(struct intel_dev *dev,
-                                    const XGL_DEVICE_QUEUE_CREATE_INFO *queues,
+static VK_RESULT dev_create_queues(struct intel_dev *dev,
+                                    const VK_DEVICE_QUEUE_CREATE_INFO *queues,
                                     uint32_t count)
 {
     uint32_t i;
 
     if (!count)
-        return XGL_ERROR_INVALID_POINTER;
+        return VK_ERROR_INVALID_POINTER;
 
     for (i = 0; i < count; i++) {
-        const XGL_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
-        XGL_RESULT ret = XGL_SUCCESS;
+        const VK_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
+        VK_RESULT ret = VK_SUCCESS;
 
         if (q->queueNodeIndex < INTEL_GPU_ENGINE_COUNT &&
             q->queueCount == 1 && !dev->queues[q->queueNodeIndex]) {
@@ -84,10 +84,10 @@
                     &dev->queues[q->queueNodeIndex]);
         }
         else {
-            ret = XGL_ERROR_INVALID_POINTER;
+            ret = VK_ERROR_INVALID_POINTER;
         }
 
-        if (ret != XGL_SUCCESS) {
+        if (ret != VK_SUCCESS) {
             uint32_t j;
             for (j = 0; j < i; j++)
                 intel_queue_destroy(dev->queues[j]);
@@ -96,25 +96,25 @@
         }
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
-                            const XGL_DEVICE_CREATE_INFO *info,
+VK_RESULT intel_dev_create(struct intel_gpu *gpu,
+                            const VK_DEVICE_CREATE_INFO *info,
                             struct intel_dev **dev_ret)
 {
     struct intel_dev *dev;
     uint32_t i;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     if (gpu->winsys)
-        return XGL_ERROR_DEVICE_ALREADY_CREATED;
+        return VK_ERROR_DEVICE_ALREADY_CREATED;
 
     dev = (struct intel_dev *) intel_base_create(&gpu->handle,
             sizeof(*dev), info->flags,
-            XGL_DBG_OBJECT_DEVICE, info, sizeof(struct intel_dev_dbg));
+            VK_DBG_OBJECT_DEVICE, info, sizeof(struct intel_dev_dbg));
     if (!dev)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     for (i = 0; i < info->extensionCount; i++) {
         const enum intel_ext_type ext = intel_gpu_lookup_extension(gpu,
@@ -127,7 +127,7 @@
     dev->gpu = gpu;
 
     ret = intel_gpu_init_winsys(gpu);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_dev_destroy(dev);
         return ret;
     }
@@ -138,16 +138,16 @@
             "command buffer scratch", 4096, false);
     if (!dev->cmd_scratch_bo) {
         intel_dev_destroy(dev);
-        return XGL_ERROR_OUT_OF_GPU_MEMORY;
+        return VK_ERROR_OUT_OF_GPU_MEMORY;
     }
 
     if (!dev_create_meta_shaders(dev)) {
         intel_dev_destroy(dev);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     ret = intel_desc_region_create(dev, &dev->desc_region);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_dev_destroy(dev);
         return ret;
     }
@@ -161,14 +161,14 @@
 
     ret = dev_create_queues(dev, info->pRequestedQueues,
             info->queueRecordCount);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_dev_destroy(dev);
         return ret;
     }
 
     *dev_ret = dev;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void dev_clear_msg_filters(struct intel_dev *dev)
@@ -212,14 +212,14 @@
         intel_gpu_cleanup_winsys(gpu);
 }
 
-XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
+VK_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
                                     int32_t msg_code,
-                                    XGL_DBG_MSG_FILTER filter)
+                                    VK_DBG_MSG_FILTER filter)
 {
     struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
     struct intel_dev_dbg_msg_filter *f = dbg->filters;
 
-    assert(filter != XGL_DBG_MSG_FILTER_NONE);
+    assert(filter != VK_DBG_MSG_FILTER_NONE);
 
     while (f) {
         if (f->msg_code == msg_code)
@@ -233,9 +233,9 @@
             f->triggered = false;
         }
     } else {
-        f = intel_alloc(dev, sizeof(*f), 0, XGL_SYSTEM_ALLOC_DEBUG);
+        f = intel_alloc(dev, sizeof(*f), 0, VK_SYSTEM_ALLOC_DEBUG);
         if (!f)
-            return XGL_ERROR_OUT_OF_MEMORY;
+            return VK_ERROR_OUT_OF_MEMORY;
 
         f->msg_code = msg_code;
         f->filter = filter;
@@ -245,7 +245,7 @@
         dbg->filters = f;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_dev_remove_msg_filter(struct intel_dev *dev,
@@ -286,10 +286,10 @@
             continue;
         }
 
-        if (filter->filter == XGL_DBG_MSG_FILTER_ALL)
+        if (filter->filter == VK_DBG_MSG_FILTER_ALL)
             return true;
 
-        if (filter->filter == XGL_DBG_MSG_FILTER_REPEATED &&
+        if (filter->filter == VK_DBG_MSG_FILTER_REPEATED &&
             filter->triggered)
             return true;
 
@@ -301,8 +301,8 @@
 }
 
 void intel_dev_log(struct intel_dev *dev,
-                   XGL_DBG_MSG_TYPE msg_type,
-                   XGL_VALIDATION_LEVEL validation_level,
+                   VK_DBG_MSG_TYPE msg_type,
+                   VK_VALIDATION_LEVEL validation_level,
                    struct intel_base *src_object,
                    size_t location,
                    int32_t msg_code,
@@ -314,61 +314,61 @@
         return;
 
     va_start(ap, format);
-    intel_logv(dev, msg_type, validation_level, (XGL_BASE_OBJECT) src_object,
+    intel_logv(dev, msg_type, validation_level, (VK_BASE_OBJECT) src_object,
             location, msg_code, format, ap);
     va_end(ap);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDevice(
-    XGL_PHYSICAL_GPU                            gpu_,
-    const XGL_DEVICE_CREATE_INFO*               pCreateInfo,
-    XGL_DEVICE*                                 pDevice)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDevice(
+    VK_PHYSICAL_GPU                            gpu_,
+    const VK_DEVICE_CREATE_INFO*               pCreateInfo,
+    VK_DEVICE*                                 pDevice)
 {
     struct intel_gpu *gpu = intel_gpu(gpu_);
 
     return intel_dev_create(gpu, pCreateInfo, (struct intel_dev **) pDevice);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDestroyDevice(
-    XGL_DEVICE                                  device)
+ICD_EXPORT VK_RESULT VKAPI vkDestroyDevice(
+    VK_DEVICE                                  device)
 {
     struct intel_dev *dev = intel_dev(device);
 
     intel_dev_destroy(dev);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetDeviceQueue(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(
+    VK_DEVICE                                  device,
     uint32_t                                    queueNodeIndex,
     uint32_t                                    queueIndex,
-    XGL_QUEUE*                                  pQueue)
+    VK_QUEUE*                                  pQueue)
 {
     struct intel_dev *dev = intel_dev(device);
 
     if (queueNodeIndex >= INTEL_GPU_ENGINE_COUNT) {
-        return XGL_ERROR_UNAVAILABLE;
+        return VK_ERROR_UNAVAILABLE;
     }
 
     if (queueIndex > 0)
-        return XGL_ERROR_UNAVAILABLE;
+        return VK_ERROR_UNAVAILABLE;
 
     *pQueue = dev->queues[queueNodeIndex];
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDeviceWaitIdle(
-    XGL_DEVICE                                  device)
+ICD_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(
+    VK_DEVICE                                  device)
 {
     struct intel_dev *dev = intel_dev(device);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
     uint32_t i;
 
     for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
         if (dev->queues[i]) {
-            const XGL_RESULT r = intel_queue_wait(dev->queues[i], -1);
-            if (r != XGL_SUCCESS)
+            const VK_RESULT r = intel_queue_wait(dev->queues[i], -1);
+            if (r != VK_SUCCESS)
                 ret = r;
         }
     }
@@ -376,9 +376,9 @@
     return ret;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetValidationLevel(
-    XGL_DEVICE                                  device,
-    XGL_VALIDATION_LEVEL                        validationLevel)
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetValidationLevel(
+    VK_DEVICE                                  device,
+    VK_VALIDATION_LEVEL                        validationLevel)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
@@ -386,55 +386,55 @@
     if (dbg)
         dbg->validation_level = validationLevel;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetMessageFilter(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetMessageFilter(
+    VK_DEVICE                                  device,
     int32_t                                     msgCode,
-    XGL_DBG_MSG_FILTER                          filter)
+    VK_DBG_MSG_FILTER                          filter)
 {
     struct intel_dev *dev = intel_dev(device);
 
     if (!dev->base.dbg)
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 
-    if (filter == XGL_DBG_MSG_FILTER_NONE) {
+    if (filter == VK_DBG_MSG_FILTER_NONE) {
         intel_dev_remove_msg_filter(dev, msgCode);
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
     }
 
     return intel_dev_add_msg_filter(dev, msgCode, filter);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetDeviceOption(
-    XGL_DEVICE                                  device,
-    XGL_DBG_DEVICE_OPTION                       dbgOption,
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetDeviceOption(
+    VK_DEVICE                                  device,
+    VK_DBG_DEVICE_OPTION                       dbgOption,
     size_t                                      dataSize,
     const void*                                 pData)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     if (dataSize == 0)
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
 
     switch (dbgOption) {
-    case XGL_DBG_OPTION_DISABLE_PIPELINE_LOADS:
+    case VK_DBG_OPTION_DISABLE_PIPELINE_LOADS:
         if (dbg)
             dbg->disable_pipeline_loads = *((const bool *) pData);
         break;
-    case XGL_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS:
+    case VK_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS:
         if (dbg)
             dbg->force_object_memory_reqs = *((const bool *) pData);
         break;
-    case XGL_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT:
+    case VK_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT:
         if (dbg)
             dbg->force_large_image_alignment = *((const bool *) pData);
         break;
     default:
-        ret = XGL_ERROR_INVALID_VALUE;
+        ret = VK_ERROR_INVALID_VALUE;
         break;
     }
 
diff --git a/icd/intel/dev.h b/icd/intel/dev.h
index 2df7c05..7cb1ee3 100644
--- a/icd/intel/dev.h
+++ b/icd/intel/dev.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -137,7 +137,7 @@
 
 struct intel_dev_dbg_msg_filter {
     int32_t msg_code;
-    XGL_DBG_MSG_FILTER filter;
+    VK_DBG_MSG_FILTER filter;
     bool triggered;
 
     struct intel_dev_dbg_msg_filter *next;
@@ -146,7 +146,7 @@
 struct intel_dev_dbg {
     struct intel_base_dbg base;
 
-    XGL_VALIDATION_LEVEL validation_level;
+    VK_VALIDATION_LEVEL validation_level;
     bool disable_pipeline_loads;
     bool force_object_memory_reqs;
     bool force_large_image_alignment;
@@ -176,7 +176,7 @@
     struct intel_queue *queues[INTEL_GPU_ENGINE_COUNT];
 };
 
-static inline struct intel_dev *intel_dev(XGL_DEVICE dev)
+static inline struct intel_dev *intel_dev(VK_DEVICE dev)
 {
     return (struct intel_dev *) dev;
 }
@@ -186,21 +186,21 @@
     return (struct intel_dev_dbg *) dev->base.dbg;
 }
 
-XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
-                            const XGL_DEVICE_CREATE_INFO *info,
+VK_RESULT intel_dev_create(struct intel_gpu *gpu,
+                            const VK_DEVICE_CREATE_INFO *info,
                             struct intel_dev **dev_ret);
 void intel_dev_destroy(struct intel_dev *dev);
 
-XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
+VK_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
                                     int32_t msg_code,
-                                    XGL_DBG_MSG_FILTER filter);
+                                    VK_DBG_MSG_FILTER filter);
 
 void intel_dev_remove_msg_filter(struct intel_dev *dev,
                                  int32_t msg_code);
 
 void intel_dev_log(struct intel_dev *dev,
-                   XGL_DBG_MSG_TYPE msg_type,
-                   XGL_VALIDATION_LEVEL validation_level,
+                   VK_DBG_MSG_TYPE msg_type,
+                   VK_VALIDATION_LEVEL validation_level,
                    struct intel_base *src_object,
                    size_t location,
                    int32_t msg_code,
diff --git a/icd/intel/event.c b/icd/intel/event.c
index fb65732..1d39b11 100644
--- a/icd/intel/event.c
+++ b/icd/intel/event.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -29,12 +29,12 @@
 #include "mem.h"
 #include "event.h"
 
-static XGL_RESULT event_map(struct intel_event *event, uint32_t **ptr_ret)
+static VK_RESULT event_map(struct intel_event *event, uint32_t **ptr_ret)
 {
     void *ptr;
 
     if (!event->obj.mem)
-        return XGL_ERROR_MEMORY_NOT_BOUND;
+        return VK_ERROR_MEMORY_NOT_BOUND;
 
     /*
      * This is an unsynchronous mapping.  It doesn't look like we want a
@@ -43,11 +43,11 @@
      */
     ptr = intel_mem_map(event->obj.mem, 0);
     if (!ptr)
-        return XGL_ERROR_MEMORY_MAP_FAILED;
+        return VK_ERROR_MEMORY_MAP_FAILED;
 
     *ptr_ret = (uint32_t *) ((uint8_t *) ptr + event->obj.offset);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void event_unmap(struct intel_event *event)
@@ -55,13 +55,13 @@
     intel_mem_unmap(event->obj.mem);
 }
 
-static XGL_RESULT event_write(struct intel_event *event, uint32_t val)
+static VK_RESULT event_write(struct intel_event *event, uint32_t val)
 {
-    XGL_RESULT ret;
+    VK_RESULT ret;
     uint32_t *ptr;
 
     ret = event_map(event, &ptr);
-    if (ret == XGL_SUCCESS) {
+    if (ret == VK_SUCCESS) {
         *ptr = val;
         event_unmap(event);
     }
@@ -69,13 +69,13 @@
     return ret;
 }
 
-static XGL_RESULT event_read(struct intel_event *event, uint32_t *val)
+static VK_RESULT event_read(struct intel_event *event, uint32_t *val)
 {
-    XGL_RESULT ret;
+    VK_RESULT ret;
     uint32_t *ptr;
 
     ret = event_map(event, &ptr);
-    if (ret == XGL_SUCCESS) {
+    if (ret == VK_SUCCESS) {
         *val = *ptr;
         event_unmap(event);
     }
@@ -90,23 +90,23 @@
     intel_event_destroy(event);
 }
 
-static XGL_RESULT event_get_info(struct intel_base *base, int type,
+static VK_RESULT event_get_info(struct intel_base *base, int type,
                                  size_t *size, void *data)
 {
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (type) {
-    case XGL_INFO_TYPE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
         {
-            XGL_MEMORY_REQUIREMENTS *mem_req = data;
+            VK_MEMORY_REQUIREMENTS *mem_req = data;
 
-            *size = sizeof(XGL_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
             /* use dword aligned to 64-byte boundaries */
             mem_req->size = 4;
             mem_req->alignment = 64;
-            mem_req->memType = XGL_MEMORY_TYPE_OTHER;
+            mem_req->memType = VK_MEMORY_TYPE_OTHER;
         }
         break;
     default:
@@ -117,23 +117,23 @@
     return ret;
 }
 
-XGL_RESULT intel_event_create(struct intel_dev *dev,
-                              const XGL_EVENT_CREATE_INFO *info,
+VK_RESULT intel_event_create(struct intel_dev *dev,
+                              const VK_EVENT_CREATE_INFO *info,
                               struct intel_event **event_ret)
 {
     struct intel_event *event;
 
     event = (struct intel_event *) intel_base_create(&dev->base.handle,
-            sizeof(*event), dev->base.dbg, XGL_DBG_OBJECT_EVENT, info, 0);
+            sizeof(*event), dev->base.dbg, VK_DBG_OBJECT_EVENT, info, 0);
     if (!event)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     event->obj.base.get_info = event_get_info;
     event->obj.destroy = event_destroy;
 
     *event_ret = event;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_event_destroy(struct intel_event *event)
@@ -141,32 +141,32 @@
     intel_base_destroy(&event->obj.base);
 }
 
-XGL_RESULT intel_event_set(struct intel_event *event)
+VK_RESULT intel_event_set(struct intel_event *event)
 {
     return event_write(event, 1);
 }
 
-XGL_RESULT intel_event_reset(struct intel_event *event)
+VK_RESULT intel_event_reset(struct intel_event *event)
 {
     return event_write(event, 0);
 }
 
-XGL_RESULT intel_event_get_status(struct intel_event *event)
+VK_RESULT intel_event_get_status(struct intel_event *event)
 {
-    XGL_RESULT ret;
+    VK_RESULT ret;
     uint32_t val;
 
     ret = event_read(event, &val);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
-    return (val) ? XGL_EVENT_SET : XGL_EVENT_RESET;
+    return (val) ? VK_EVENT_SET : VK_EVENT_RESET;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateEvent(
-    XGL_DEVICE                                  device,
-    const XGL_EVENT_CREATE_INFO*                pCreateInfo,
-    XGL_EVENT*                                  pEvent)
+ICD_EXPORT VK_RESULT VKAPI vkCreateEvent(
+    VK_DEVICE                                  device,
+    const VK_EVENT_CREATE_INFO*                pCreateInfo,
+    VK_EVENT*                                  pEvent)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -174,24 +174,24 @@
             (struct intel_event **) pEvent);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetEventStatus(
-    XGL_EVENT                                   event_)
+ICD_EXPORT VK_RESULT VKAPI vkGetEventStatus(
+    VK_EVENT                                   event_)
 {
     struct intel_event *event = intel_event(event_);
 
     return intel_event_get_status(event);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglSetEvent(
-    XGL_EVENT                                   event_)
+ICD_EXPORT VK_RESULT VKAPI vkSetEvent(
+    VK_EVENT                                   event_)
 {
     struct intel_event *event = intel_event(event_);
 
     return intel_event_set(event);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglResetEvent(
-    XGL_EVENT                                   event_)
+ICD_EXPORT VK_RESULT VKAPI vkResetEvent(
+    VK_EVENT                                   event_)
 {
     struct intel_event *event = intel_event(event_);
 
diff --git a/icd/intel/event.h b/icd/intel/event.h
index f1b21ec..8b891cf 100644
--- a/icd/intel/event.h
+++ b/icd/intel/event.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -37,7 +37,7 @@
     struct intel_obj obj;
 };
 
-static inline struct intel_event *intel_event(XGL_EVENT event)
+static inline struct intel_event *intel_event(VK_EVENT event)
 {
     return (struct intel_event *) event;
 }
@@ -47,13 +47,13 @@
     return (struct intel_event *) obj;
 }
 
-XGL_RESULT intel_event_create(struct intel_dev *dev,
-                              const XGL_EVENT_CREATE_INFO *info,
+VK_RESULT intel_event_create(struct intel_dev *dev,
+                              const VK_EVENT_CREATE_INFO *info,
                               struct intel_event **event_ret);
 void intel_event_destroy(struct intel_event *event);
 
-XGL_RESULT intel_event_set(struct intel_event *event);
-XGL_RESULT intel_event_reset(struct intel_event *event);
-XGL_RESULT intel_event_get_status(struct intel_event *event);
+VK_RESULT intel_event_set(struct intel_event *event);
+VK_RESULT intel_event_reset(struct intel_event *event);
+VK_RESULT intel_event_get_status(struct intel_event *event);
 
 #endif /* EVENT_H */
diff --git a/icd/intel/fb.c b/icd/intel/fb.c
index 56d2dde..4338be8 100644
--- a/icd/intel/fb.c
+++ b/icd/intel/fb.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -36,27 +36,27 @@
     intel_fb_destroy(fb);
 }
 
-XGL_RESULT intel_fb_create(struct intel_dev *dev,
-                           const XGL_FRAMEBUFFER_CREATE_INFO *info,
+VK_RESULT intel_fb_create(struct intel_dev *dev,
+                           const VK_FRAMEBUFFER_CREATE_INFO *info,
                            struct intel_fb **fb_ret)
 {
     struct intel_fb *fb;
     uint32_t width, height, array_size, i;
 
     if (info->colorAttachmentCount > INTEL_MAX_RENDER_TARGETS)
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
 
     fb = (struct intel_fb *) intel_base_create(&dev->base.handle,
-            sizeof(*fb), dev->base.dbg, XGL_DBG_OBJECT_FRAMEBUFFER, info, 0);
+            sizeof(*fb), dev->base.dbg, VK_DBG_OBJECT_FRAMEBUFFER, info, 0);
     if (!fb)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     width = info->width;
     height = info->height;
     array_size = info->layers;
 
     for (i = 0; i < info->colorAttachmentCount; i++) {
-        const XGL_COLOR_ATTACHMENT_BIND_INFO *att =
+        const VK_COLOR_ATTACHMENT_BIND_INFO *att =
             &info->pColorAttachments[i];
         const struct intel_rt_view *rt = intel_rt_view(att->view);
         const struct intel_layout *layout = &rt->img->layout;
@@ -70,7 +70,7 @@
 
         if (rt->img->samples != info->sampleCount) {
             intel_fb_destroy(fb);
-            return XGL_ERROR_INVALID_VALUE;
+            return VK_ERROR_INVALID_VALUE;
         }
 
         fb->rt[i] = rt;
@@ -79,7 +79,7 @@
     fb->rt_count = info->colorAttachmentCount;
 
     if (info->pDepthStencilAttachment) {
-        const XGL_DEPTH_STENCIL_BIND_INFO *att =
+        const VK_DEPTH_STENCIL_BIND_INFO *att =
             info->pDepthStencilAttachment;
         const struct intel_ds_view *ds = intel_ds_view(att->view);
         const struct intel_layout *layout = &ds->img->layout;
@@ -93,14 +93,14 @@
 
         if (ds->img->samples != info->sampleCount) {
             intel_fb_destroy(fb);
-            return XGL_ERROR_INVALID_VALUE;
+            return VK_ERROR_INVALID_VALUE;
         }
 
         fb->ds = ds;
 
         switch (att->layout) {
-        case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
-        case XGL_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+        case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+        case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
             fb->optimal_ds = true;
             break;
         default:
@@ -123,7 +123,7 @@
 
     *fb_ret = fb;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_fb_destroy(struct intel_fb *fb)
@@ -138,29 +138,29 @@
     intel_render_pass_destroy(rp);
 }
 
-XGL_RESULT intel_render_pass_create(struct intel_dev *dev,
-                                    const XGL_RENDER_PASS_CREATE_INFO *info,
+VK_RESULT intel_render_pass_create(struct intel_dev *dev,
+                                    const VK_RENDER_PASS_CREATE_INFO *info,
                                     struct intel_render_pass **rp_ret)
 {
     struct intel_render_pass *rp;
     uint32_t i;
 
     rp = (struct intel_render_pass *) intel_base_create(&dev->base.handle,
-            sizeof(*rp), dev->base.dbg, XGL_DBG_OBJECT_RENDER_PASS, info, 0);
+            sizeof(*rp), dev->base.dbg, VK_DBG_OBJECT_RENDER_PASS, info, 0);
     if (!rp)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     rp->obj.destroy = render_pass_destroy;
 
     /* TODO add any clear color ops */
     for (i = 0; i < info->colorAttachmentCount; i++)
-        assert(info->pColorLoadOps[i] != XGL_ATTACHMENT_LOAD_OP_CLEAR);
-    assert(info->depthLoadOp != XGL_ATTACHMENT_LOAD_OP_CLEAR);
-    assert(info->stencilLoadOp != XGL_ATTACHMENT_LOAD_OP_CLEAR);
+        assert(info->pColorLoadOps[i] != VK_ATTACHMENT_LOAD_OP_CLEAR);
+    assert(info->depthLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR);
+    assert(info->stencilLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR);
 
     *rp_ret = rp;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_render_pass_destroy(struct intel_render_pass *rp)
@@ -168,10 +168,10 @@
     intel_base_destroy(&rp->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateFramebuffer(
-    XGL_DEVICE                                  device,
-    const XGL_FRAMEBUFFER_CREATE_INFO*          pCreateInfo,
-    XGL_FRAMEBUFFER*                            pFramebuffer)
+ICD_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(
+    VK_DEVICE                                  device,
+    const VK_FRAMEBUFFER_CREATE_INFO*          pCreateInfo,
+    VK_FRAMEBUFFER*                            pFramebuffer)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -179,10 +179,10 @@
             (struct intel_fb **) pFramebuffer);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateRenderPass(
-    XGL_DEVICE                                  device,
-    const XGL_RENDER_PASS_CREATE_INFO*          pCreateInfo,
-    XGL_RENDER_PASS*                            pRenderPass)
+ICD_EXPORT VK_RESULT VKAPI vkCreateRenderPass(
+    VK_DEVICE                                  device,
+    const VK_RENDER_PASS_CREATE_INFO*          pCreateInfo,
+    VK_RENDER_PASS*                            pRenderPass)
 {
     struct intel_dev *dev = intel_dev(device);
 
diff --git a/icd/intel/fb.h b/icd/intel/fb.h
index 873f49d..ba0afa3 100644
--- a/icd/intel/fb.h
+++ b/icd/intel/fb.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -48,7 +48,7 @@
     struct intel_obj obj;
 };
 
-static inline struct intel_fb *intel_fb(XGL_FRAMEBUFFER fb)
+static inline struct intel_fb *intel_fb(VK_FRAMEBUFFER fb)
 {
     return (struct intel_fb *) fb;
 }
@@ -58,7 +58,7 @@
     return (struct intel_fb *) obj;
 }
 
-static inline struct intel_render_pass *intel_render_pass(XGL_RENDER_PASS rp)
+static inline struct intel_render_pass *intel_render_pass(VK_RENDER_PASS rp)
 {
     return (struct intel_render_pass *) rp;
 }
@@ -68,13 +68,13 @@
     return (struct intel_render_pass *) obj;
 }
 
-XGL_RESULT intel_fb_create(struct intel_dev *dev,
-                           const XGL_FRAMEBUFFER_CREATE_INFO *pInfo,
+VK_RESULT intel_fb_create(struct intel_dev *dev,
+                           const VK_FRAMEBUFFER_CREATE_INFO *pInfo,
                            struct intel_fb **fb_ret);
 void intel_fb_destroy(struct intel_fb *fb);
 
-XGL_RESULT intel_render_pass_create(struct intel_dev *dev,
-                                    const XGL_RENDER_PASS_CREATE_INFO *pInfo,
+VK_RESULT intel_render_pass_create(struct intel_dev *dev,
+                                    const VK_RENDER_PASS_CREATE_INFO *pInfo,
                                     struct intel_render_pass **rp_ret);
 void intel_render_pass_destroy(struct intel_render_pass *rp);
 
diff --git a/icd/intel/fence.c b/icd/intel/fence.c
index e6aab3c..7cc57f1 100644
--- a/icd/intel/fence.c
+++ b/icd/intel/fence.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -38,20 +38,20 @@
     intel_fence_destroy(fence);
 }
 
-XGL_RESULT intel_fence_create(struct intel_dev *dev,
-                              const XGL_FENCE_CREATE_INFO *info,
+VK_RESULT intel_fence_create(struct intel_dev *dev,
+                              const VK_FENCE_CREATE_INFO *info,
                               struct intel_fence **fence_ret)
 {
     struct intel_fence *fence;
 
     fence = (struct intel_fence *) intel_base_create(&dev->base.handle,
-            sizeof(*fence), dev->base.dbg, XGL_DBG_OBJECT_FENCE, info, 0);
+            sizeof(*fence), dev->base.dbg, VK_DBG_OBJECT_FENCE, info, 0);
     if (!fence)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     if (dev->exts[INTEL_EXT_WSI_X11]) {
-        XGL_RESULT ret = intel_wsi_fence_init(fence);
-        if (ret != XGL_SUCCESS) {
+        VK_RESULT ret = intel_wsi_fence_init(fence);
+        if (ret != VK_SUCCESS) {
             intel_fence_destroy(fence);
             return ret;
         }
@@ -60,9 +60,9 @@
     fence->obj.destroy = fence_destroy;
 
     *fence_ret = fence;
-    fence->signaled = (info->flags & XGL_FENCE_CREATE_SIGNALED_BIT);
+    fence->signaled = (info->flags & VK_FENCE_CREATE_SIGNALED_BIT);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_fence_destroy(struct intel_fence *fence)
@@ -91,34 +91,34 @@
     fence->signaled = false;
 }
 
-XGL_RESULT intel_fence_wait(struct intel_fence *fence, int64_t timeout_ns)
+VK_RESULT intel_fence_wait(struct intel_fence *fence, int64_t timeout_ns)
 {
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     ret = intel_wsi_fence_wait(fence, timeout_ns);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     if (fence->signaled) {
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
     }
 
     if (fence->seqno_bo) {
         ret = (intel_bo_wait(fence->seqno_bo, timeout_ns)) ?
-            XGL_NOT_READY : XGL_SUCCESS;
-        if (ret == XGL_SUCCESS) {
+            VK_NOT_READY : VK_SUCCESS;
+        if (ret == VK_SUCCESS) {
             fence->signaled = true;
         }
         return ret;
     }
 
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateFence(
-    XGL_DEVICE                                  device,
-    const XGL_FENCE_CREATE_INFO*                pCreateInfo,
-    XGL_FENCE*                                  pFence)
+ICD_EXPORT VK_RESULT VKAPI vkCreateFence(
+    VK_DEVICE                                  device,
+    const VK_FENCE_CREATE_INFO*                pCreateInfo,
+    VK_FENCE*                                  pFence)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -126,46 +126,46 @@
             (struct intel_fence **) pFence);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetFenceStatus(
-    XGL_FENCE                                   fence_)
+ICD_EXPORT VK_RESULT VKAPI vkGetFenceStatus(
+    VK_FENCE                                   fence_)
 {
     struct intel_fence *fence = intel_fence(fence_);
 
     return intel_fence_wait(fence, 0);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWaitForFences(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkWaitForFences(
+    VK_DEVICE                                  device,
     uint32_t                                    fenceCount,
-    const XGL_FENCE*                            pFences,
+    const VK_FENCE*                            pFences,
     bool32_t                                    waitAll,
     uint64_t                                    timeout)
 {
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
     uint32_t i;
 
     for (i = 0; i < fenceCount; i++) {
         struct intel_fence *fence = intel_fence(pFences[i]);
         int64_t ns;
-        XGL_RESULT r;
+        VK_RESULT r;
 
         /* timeout in nano seconds */
         ns = (timeout <= (uint64_t) INT64_MAX) ? ns : -1;
         r = intel_fence_wait(fence, ns);
 
-        if (!waitAll && r == XGL_SUCCESS)
-            return XGL_SUCCESS;
+        if (!waitAll && r == VK_SUCCESS)
+            return VK_SUCCESS;
 
-        if (r != XGL_SUCCESS)
+        if (r != VK_SUCCESS)
             ret = r;
     }
 
     return ret;
 }
-ICD_EXPORT XGL_RESULT XGLAPI xglResetFences(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkResetFences(
+    VK_DEVICE                                  device,
     uint32_t                                    fenceCount,
-    XGL_FENCE*                                  pFences)
+    VK_FENCE*                                  pFences)
 {
     uint32_t i;
 
@@ -174,5 +174,5 @@
         fence->signaled = false;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
diff --git a/icd/intel/fence.h b/icd/intel/fence.h
index 0ebc45e..947a227 100644
--- a/icd/intel/fence.h
+++ b/icd/intel/fence.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -43,7 +43,7 @@
     void *wsi_data;
 };
 
-static inline struct intel_fence *intel_fence(XGL_FENCE fence)
+static inline struct intel_fence *intel_fence(VK_FENCE fence)
 {
     return (struct intel_fence *) fence;
 }
@@ -53,12 +53,12 @@
     return (struct intel_fence *) obj;
 }
 
-XGL_RESULT intel_fence_create(struct intel_dev *dev,
-                              const XGL_FENCE_CREATE_INFO *info,
+VK_RESULT intel_fence_create(struct intel_dev *dev,
+                              const VK_FENCE_CREATE_INFO *info,
                               struct intel_fence **fence_ret);
 void intel_fence_destroy(struct intel_fence *fence);
 
-XGL_RESULT intel_fence_wait(struct intel_fence *fence, int64_t timeout_ns);
+VK_RESULT intel_fence_wait(struct intel_fence *fence, int64_t timeout_ns);
 
 void intel_fence_copy(struct intel_fence *fence,
                       const struct intel_fence *src);
diff --git a/icd/intel/format.c b/icd/intel/format.c
index 3019b44..178817f 100644
--- a/icd/intel/format.c
+++ b/icd/intel/format.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -377,171 +377,171 @@
 #undef CAP
 };
 
-static const int intel_color_mapping[XGL_NUM_FMT] = {
-    [XGL_FMT_R4G4_UNORM]           = 0,
-    [XGL_FMT_R4G4_USCALED]         = 0,
-    [XGL_FMT_R4G4B4A4_UNORM]       = 0,
-    [XGL_FMT_R4G4B4A4_USCALED]     = 0,
-    [XGL_FMT_R5G6B5_UNORM]         = 0,
-    [XGL_FMT_R5G6B5_USCALED]       = 0,
-    [XGL_FMT_R5G5B5A1_UNORM]       = 0,
-    [XGL_FMT_R5G5B5A1_USCALED]     = 0,
-    [XGL_FMT_R8_UNORM]             = GEN6_FORMAT_R8_UNORM,
-    [XGL_FMT_R8_SNORM]             = GEN6_FORMAT_R8_SNORM,
-    [XGL_FMT_R8_USCALED]           = GEN6_FORMAT_R8_USCALED,
-    [XGL_FMT_R8_SSCALED]           = GEN6_FORMAT_R8_SSCALED,
-    [XGL_FMT_R8_UINT]              = GEN6_FORMAT_R8_UINT,
-    [XGL_FMT_R8_SINT]              = GEN6_FORMAT_R8_SINT,
-    [XGL_FMT_R8_SRGB]              = 0,
-    [XGL_FMT_R8G8_UNORM]           = GEN6_FORMAT_R8G8_UNORM,
-    [XGL_FMT_R8G8_SNORM]           = GEN6_FORMAT_R8G8_SNORM,
-    [XGL_FMT_R8G8_USCALED]         = GEN6_FORMAT_R8G8_USCALED,
-    [XGL_FMT_R8G8_SSCALED]         = GEN6_FORMAT_R8G8_SSCALED,
-    [XGL_FMT_R8G8_UINT]            = GEN6_FORMAT_R8G8_UINT,
-    [XGL_FMT_R8G8_SINT]            = GEN6_FORMAT_R8G8_SINT,
-    [XGL_FMT_R8G8_SRGB]            = 0,
-    [XGL_FMT_R8G8B8_UNORM]         = GEN6_FORMAT_R8G8B8_UNORM,
-    [XGL_FMT_R8G8B8_SNORM]         = GEN6_FORMAT_R8G8B8_SNORM,
-    [XGL_FMT_R8G8B8_USCALED]       = GEN6_FORMAT_R8G8B8_USCALED,
-    [XGL_FMT_R8G8B8_SSCALED]       = GEN6_FORMAT_R8G8B8_SSCALED,
-    [XGL_FMT_R8G8B8_UINT]          = GEN6_FORMAT_R8G8B8_UINT,
-    [XGL_FMT_R8G8B8_SINT]          = GEN6_FORMAT_R8G8B8_SINT,
-    [XGL_FMT_R8G8B8_SRGB]          = GEN6_FORMAT_R8G8B8_UNORM_SRGB,
-    [XGL_FMT_R8G8B8A8_UNORM]       = GEN6_FORMAT_R8G8B8A8_UNORM,
-    [XGL_FMT_R8G8B8A8_SNORM]       = GEN6_FORMAT_R8G8B8A8_SNORM,
-    [XGL_FMT_R8G8B8A8_USCALED]     = GEN6_FORMAT_R8G8B8A8_USCALED,
-    [XGL_FMT_R8G8B8A8_SSCALED]     = GEN6_FORMAT_R8G8B8A8_SSCALED,
-    [XGL_FMT_R8G8B8A8_UINT]        = GEN6_FORMAT_R8G8B8A8_UINT,
-    [XGL_FMT_R8G8B8A8_SINT]        = GEN6_FORMAT_R8G8B8A8_SINT,
-    [XGL_FMT_R8G8B8A8_SRGB]        = GEN6_FORMAT_R8G8B8A8_UNORM_SRGB,
-    [XGL_FMT_R10G10B10A2_UNORM]    = GEN6_FORMAT_R10G10B10A2_UNORM,
-    [XGL_FMT_R10G10B10A2_SNORM]    = GEN6_FORMAT_R10G10B10A2_SNORM,
-    [XGL_FMT_R10G10B10A2_USCALED]  = GEN6_FORMAT_R10G10B10A2_USCALED,
-    [XGL_FMT_R10G10B10A2_SSCALED]  = GEN6_FORMAT_R10G10B10A2_SSCALED,
-    [XGL_FMT_R10G10B10A2_UINT]     = GEN6_FORMAT_R10G10B10A2_UINT,
-    [XGL_FMT_R10G10B10A2_SINT]     = GEN6_FORMAT_R10G10B10A2_SINT,
-    [XGL_FMT_R16_UNORM]            = GEN6_FORMAT_R16_UNORM,
-    [XGL_FMT_R16_SNORM]            = GEN6_FORMAT_R16_SNORM,
-    [XGL_FMT_R16_USCALED]          = GEN6_FORMAT_R16_USCALED,
-    [XGL_FMT_R16_SSCALED]          = GEN6_FORMAT_R16_SSCALED,
-    [XGL_FMT_R16_UINT]             = GEN6_FORMAT_R16_UINT,
-    [XGL_FMT_R16_SINT]             = GEN6_FORMAT_R16_SINT,
-    [XGL_FMT_R16_SFLOAT]           = GEN6_FORMAT_R16_FLOAT,
-    [XGL_FMT_R16G16_UNORM]         = GEN6_FORMAT_R16G16_UNORM,
-    [XGL_FMT_R16G16_SNORM]         = GEN6_FORMAT_R16G16_SNORM,
-    [XGL_FMT_R16G16_USCALED]       = GEN6_FORMAT_R16G16_USCALED,
-    [XGL_FMT_R16G16_SSCALED]       = GEN6_FORMAT_R16G16_SSCALED,
-    [XGL_FMT_R16G16_UINT]          = GEN6_FORMAT_R16G16_UINT,
-    [XGL_FMT_R16G16_SINT]          = GEN6_FORMAT_R16G16_SINT,
-    [XGL_FMT_R16G16_SFLOAT]        = GEN6_FORMAT_R16G16_FLOAT,
-    [XGL_FMT_R16G16B16_UNORM]      = GEN6_FORMAT_R16G16B16_UNORM,
-    [XGL_FMT_R16G16B16_SNORM]      = GEN6_FORMAT_R16G16B16_SNORM,
-    [XGL_FMT_R16G16B16_USCALED]    = GEN6_FORMAT_R16G16B16_USCALED,
-    [XGL_FMT_R16G16B16_SSCALED]    = GEN6_FORMAT_R16G16B16_SSCALED,
-    [XGL_FMT_R16G16B16_UINT]       = GEN6_FORMAT_R16G16B16_UINT,
-    [XGL_FMT_R16G16B16_SINT]       = GEN6_FORMAT_R16G16B16_SINT,
-    [XGL_FMT_R16G16B16_SFLOAT]     = 0,
-    [XGL_FMT_R16G16B16A16_UNORM]   = GEN6_FORMAT_R16G16B16A16_UNORM,
-    [XGL_FMT_R16G16B16A16_SNORM]   = GEN6_FORMAT_R16G16B16A16_SNORM,
-    [XGL_FMT_R16G16B16A16_USCALED] = GEN6_FORMAT_R16G16B16A16_USCALED,
-    [XGL_FMT_R16G16B16A16_SSCALED] = GEN6_FORMAT_R16G16B16A16_SSCALED,
-    [XGL_FMT_R16G16B16A16_UINT]    = GEN6_FORMAT_R16G16B16A16_UINT,
-    [XGL_FMT_R16G16B16A16_SINT]    = GEN6_FORMAT_R16G16B16A16_SINT,
-    [XGL_FMT_R16G16B16A16_SFLOAT]  = GEN6_FORMAT_R16G16B16A16_FLOAT,
-    [XGL_FMT_R32_UINT]             = GEN6_FORMAT_R32_UINT,
-    [XGL_FMT_R32_SINT]             = GEN6_FORMAT_R32_SINT,
-    [XGL_FMT_R32_SFLOAT]           = GEN6_FORMAT_R32_FLOAT,
-    [XGL_FMT_R32G32_UINT]          = GEN6_FORMAT_R32G32_UINT,
-    [XGL_FMT_R32G32_SINT]          = GEN6_FORMAT_R32G32_SINT,
-    [XGL_FMT_R32G32_SFLOAT]        = GEN6_FORMAT_R32G32_FLOAT,
-    [XGL_FMT_R32G32B32_UINT]       = GEN6_FORMAT_R32G32B32_UINT,
-    [XGL_FMT_R32G32B32_SINT]       = GEN6_FORMAT_R32G32B32_SINT,
-    [XGL_FMT_R32G32B32_SFLOAT]     = GEN6_FORMAT_R32G32B32_FLOAT,
-    [XGL_FMT_R32G32B32A32_UINT]    = GEN6_FORMAT_R32G32B32A32_UINT,
-    [XGL_FMT_R32G32B32A32_SINT]    = GEN6_FORMAT_R32G32B32A32_SINT,
-    [XGL_FMT_R32G32B32A32_SFLOAT]  = GEN6_FORMAT_R32G32B32A32_FLOAT,
-    [XGL_FMT_R64_SFLOAT]           = GEN6_FORMAT_R64_FLOAT,
-    [XGL_FMT_R64G64_SFLOAT]        = GEN6_FORMAT_R64G64_FLOAT,
-    [XGL_FMT_R64G64B64_SFLOAT]     = GEN6_FORMAT_R64G64B64_FLOAT,
-    [XGL_FMT_R64G64B64A64_SFLOAT]  = GEN6_FORMAT_R64G64B64A64_FLOAT,
-    [XGL_FMT_R11G11B10_UFLOAT]     = GEN6_FORMAT_R11G11B10_FLOAT,
-    [XGL_FMT_R9G9B9E5_UFLOAT]      = GEN6_FORMAT_R9G9B9E5_SHAREDEXP,
-    [XGL_FMT_BC1_RGB_UNORM]        = GEN6_FORMAT_BC1_UNORM,
-    [XGL_FMT_BC1_RGB_SRGB]         = GEN6_FORMAT_BC1_UNORM_SRGB,
-    [XGL_FMT_BC2_UNORM]            = GEN6_FORMAT_BC2_UNORM,
-    [XGL_FMT_BC2_SRGB]             = GEN6_FORMAT_BC2_UNORM_SRGB,
-    [XGL_FMT_BC3_UNORM]            = GEN6_FORMAT_BC3_UNORM,
-    [XGL_FMT_BC3_SRGB]             = GEN6_FORMAT_BC3_UNORM_SRGB,
-    [XGL_FMT_BC4_UNORM]            = GEN6_FORMAT_BC4_UNORM,
-    [XGL_FMT_BC4_SNORM]            = GEN6_FORMAT_BC4_SNORM,
-    [XGL_FMT_BC5_UNORM]            = GEN6_FORMAT_BC5_UNORM,
-    [XGL_FMT_BC5_SNORM]            = GEN6_FORMAT_BC5_SNORM,
-    [XGL_FMT_BC6H_UFLOAT]          = GEN6_FORMAT_BC6H_UF16,
-    [XGL_FMT_BC6H_SFLOAT]          = GEN6_FORMAT_BC6H_SF16,
-    [XGL_FMT_BC7_UNORM]            = GEN6_FORMAT_BC7_UNORM,
-    [XGL_FMT_BC7_SRGB]             = GEN6_FORMAT_BC7_UNORM_SRGB,
+static const int intel_color_mapping[VK_NUM_FMT] = {
+    [VK_FMT_R4G4_UNORM]           = 0,
+    [VK_FMT_R4G4_USCALED]         = 0,
+    [VK_FMT_R4G4B4A4_UNORM]       = 0,
+    [VK_FMT_R4G4B4A4_USCALED]     = 0,
+    [VK_FMT_R5G6B5_UNORM]         = 0,
+    [VK_FMT_R5G6B5_USCALED]       = 0,
+    [VK_FMT_R5G5B5A1_UNORM]       = 0,
+    [VK_FMT_R5G5B5A1_USCALED]     = 0,
+    [VK_FMT_R8_UNORM]             = GEN6_FORMAT_R8_UNORM,
+    [VK_FMT_R8_SNORM]             = GEN6_FORMAT_R8_SNORM,
+    [VK_FMT_R8_USCALED]           = GEN6_FORMAT_R8_USCALED,
+    [VK_FMT_R8_SSCALED]           = GEN6_FORMAT_R8_SSCALED,
+    [VK_FMT_R8_UINT]              = GEN6_FORMAT_R8_UINT,
+    [VK_FMT_R8_SINT]              = GEN6_FORMAT_R8_SINT,
+    [VK_FMT_R8_SRGB]              = 0,
+    [VK_FMT_R8G8_UNORM]           = GEN6_FORMAT_R8G8_UNORM,
+    [VK_FMT_R8G8_SNORM]           = GEN6_FORMAT_R8G8_SNORM,
+    [VK_FMT_R8G8_USCALED]         = GEN6_FORMAT_R8G8_USCALED,
+    [VK_FMT_R8G8_SSCALED]         = GEN6_FORMAT_R8G8_SSCALED,
+    [VK_FMT_R8G8_UINT]            = GEN6_FORMAT_R8G8_UINT,
+    [VK_FMT_R8G8_SINT]            = GEN6_FORMAT_R8G8_SINT,
+    [VK_FMT_R8G8_SRGB]            = 0,
+    [VK_FMT_R8G8B8_UNORM]         = GEN6_FORMAT_R8G8B8_UNORM,
+    [VK_FMT_R8G8B8_SNORM]         = GEN6_FORMAT_R8G8B8_SNORM,
+    [VK_FMT_R8G8B8_USCALED]       = GEN6_FORMAT_R8G8B8_USCALED,
+    [VK_FMT_R8G8B8_SSCALED]       = GEN6_FORMAT_R8G8B8_SSCALED,
+    [VK_FMT_R8G8B8_UINT]          = GEN6_FORMAT_R8G8B8_UINT,
+    [VK_FMT_R8G8B8_SINT]          = GEN6_FORMAT_R8G8B8_SINT,
+    [VK_FMT_R8G8B8_SRGB]          = GEN6_FORMAT_R8G8B8_UNORM_SRGB,
+    [VK_FMT_R8G8B8A8_UNORM]       = GEN6_FORMAT_R8G8B8A8_UNORM,
+    [VK_FMT_R8G8B8A8_SNORM]       = GEN6_FORMAT_R8G8B8A8_SNORM,
+    [VK_FMT_R8G8B8A8_USCALED]     = GEN6_FORMAT_R8G8B8A8_USCALED,
+    [VK_FMT_R8G8B8A8_SSCALED]     = GEN6_FORMAT_R8G8B8A8_SSCALED,
+    [VK_FMT_R8G8B8A8_UINT]        = GEN6_FORMAT_R8G8B8A8_UINT,
+    [VK_FMT_R8G8B8A8_SINT]        = GEN6_FORMAT_R8G8B8A8_SINT,
+    [VK_FMT_R8G8B8A8_SRGB]        = GEN6_FORMAT_R8G8B8A8_UNORM_SRGB,
+    [VK_FMT_R10G10B10A2_UNORM]    = GEN6_FORMAT_R10G10B10A2_UNORM,
+    [VK_FMT_R10G10B10A2_SNORM]    = GEN6_FORMAT_R10G10B10A2_SNORM,
+    [VK_FMT_R10G10B10A2_USCALED]  = GEN6_FORMAT_R10G10B10A2_USCALED,
+    [VK_FMT_R10G10B10A2_SSCALED]  = GEN6_FORMAT_R10G10B10A2_SSCALED,
+    [VK_FMT_R10G10B10A2_UINT]     = GEN6_FORMAT_R10G10B10A2_UINT,
+    [VK_FMT_R10G10B10A2_SINT]     = GEN6_FORMAT_R10G10B10A2_SINT,
+    [VK_FMT_R16_UNORM]            = GEN6_FORMAT_R16_UNORM,
+    [VK_FMT_R16_SNORM]            = GEN6_FORMAT_R16_SNORM,
+    [VK_FMT_R16_USCALED]          = GEN6_FORMAT_R16_USCALED,
+    [VK_FMT_R16_SSCALED]          = GEN6_FORMAT_R16_SSCALED,
+    [VK_FMT_R16_UINT]             = GEN6_FORMAT_R16_UINT,
+    [VK_FMT_R16_SINT]             = GEN6_FORMAT_R16_SINT,
+    [VK_FMT_R16_SFLOAT]           = GEN6_FORMAT_R16_FLOAT,
+    [VK_FMT_R16G16_UNORM]         = GEN6_FORMAT_R16G16_UNORM,
+    [VK_FMT_R16G16_SNORM]         = GEN6_FORMAT_R16G16_SNORM,
+    [VK_FMT_R16G16_USCALED]       = GEN6_FORMAT_R16G16_USCALED,
+    [VK_FMT_R16G16_SSCALED]       = GEN6_FORMAT_R16G16_SSCALED,
+    [VK_FMT_R16G16_UINT]          = GEN6_FORMAT_R16G16_UINT,
+    [VK_FMT_R16G16_SINT]          = GEN6_FORMAT_R16G16_SINT,
+    [VK_FMT_R16G16_SFLOAT]        = GEN6_FORMAT_R16G16_FLOAT,
+    [VK_FMT_R16G16B16_UNORM]      = GEN6_FORMAT_R16G16B16_UNORM,
+    [VK_FMT_R16G16B16_SNORM]      = GEN6_FORMAT_R16G16B16_SNORM,
+    [VK_FMT_R16G16B16_USCALED]    = GEN6_FORMAT_R16G16B16_USCALED,
+    [VK_FMT_R16G16B16_SSCALED]    = GEN6_FORMAT_R16G16B16_SSCALED,
+    [VK_FMT_R16G16B16_UINT]       = GEN6_FORMAT_R16G16B16_UINT,
+    [VK_FMT_R16G16B16_SINT]       = GEN6_FORMAT_R16G16B16_SINT,
+    [VK_FMT_R16G16B16_SFLOAT]     = 0,
+    [VK_FMT_R16G16B16A16_UNORM]   = GEN6_FORMAT_R16G16B16A16_UNORM,
+    [VK_FMT_R16G16B16A16_SNORM]   = GEN6_FORMAT_R16G16B16A16_SNORM,
+    [VK_FMT_R16G16B16A16_USCALED] = GEN6_FORMAT_R16G16B16A16_USCALED,
+    [VK_FMT_R16G16B16A16_SSCALED] = GEN6_FORMAT_R16G16B16A16_SSCALED,
+    [VK_FMT_R16G16B16A16_UINT]    = GEN6_FORMAT_R16G16B16A16_UINT,
+    [VK_FMT_R16G16B16A16_SINT]    = GEN6_FORMAT_R16G16B16A16_SINT,
+    [VK_FMT_R16G16B16A16_SFLOAT]  = GEN6_FORMAT_R16G16B16A16_FLOAT,
+    [VK_FMT_R32_UINT]             = GEN6_FORMAT_R32_UINT,
+    [VK_FMT_R32_SINT]             = GEN6_FORMAT_R32_SINT,
+    [VK_FMT_R32_SFLOAT]           = GEN6_FORMAT_R32_FLOAT,
+    [VK_FMT_R32G32_UINT]          = GEN6_FORMAT_R32G32_UINT,
+    [VK_FMT_R32G32_SINT]          = GEN6_FORMAT_R32G32_SINT,
+    [VK_FMT_R32G32_SFLOAT]        = GEN6_FORMAT_R32G32_FLOAT,
+    [VK_FMT_R32G32B32_UINT]       = GEN6_FORMAT_R32G32B32_UINT,
+    [VK_FMT_R32G32B32_SINT]       = GEN6_FORMAT_R32G32B32_SINT,
+    [VK_FMT_R32G32B32_SFLOAT]     = GEN6_FORMAT_R32G32B32_FLOAT,
+    [VK_FMT_R32G32B32A32_UINT]    = GEN6_FORMAT_R32G32B32A32_UINT,
+    [VK_FMT_R32G32B32A32_SINT]    = GEN6_FORMAT_R32G32B32A32_SINT,
+    [VK_FMT_R32G32B32A32_SFLOAT]  = GEN6_FORMAT_R32G32B32A32_FLOAT,
+    [VK_FMT_R64_SFLOAT]           = GEN6_FORMAT_R64_FLOAT,
+    [VK_FMT_R64G64_SFLOAT]        = GEN6_FORMAT_R64G64_FLOAT,
+    [VK_FMT_R64G64B64_SFLOAT]     = GEN6_FORMAT_R64G64B64_FLOAT,
+    [VK_FMT_R64G64B64A64_SFLOAT]  = GEN6_FORMAT_R64G64B64A64_FLOAT,
+    [VK_FMT_R11G11B10_UFLOAT]     = GEN6_FORMAT_R11G11B10_FLOAT,
+    [VK_FMT_R9G9B9E5_UFLOAT]      = GEN6_FORMAT_R9G9B9E5_SHAREDEXP,
+    [VK_FMT_BC1_RGB_UNORM]        = GEN6_FORMAT_BC1_UNORM,
+    [VK_FMT_BC1_RGB_SRGB]         = GEN6_FORMAT_BC1_UNORM_SRGB,
+    [VK_FMT_BC2_UNORM]            = GEN6_FORMAT_BC2_UNORM,
+    [VK_FMT_BC2_SRGB]             = GEN6_FORMAT_BC2_UNORM_SRGB,
+    [VK_FMT_BC3_UNORM]            = GEN6_FORMAT_BC3_UNORM,
+    [VK_FMT_BC3_SRGB]             = GEN6_FORMAT_BC3_UNORM_SRGB,
+    [VK_FMT_BC4_UNORM]            = GEN6_FORMAT_BC4_UNORM,
+    [VK_FMT_BC4_SNORM]            = GEN6_FORMAT_BC4_SNORM,
+    [VK_FMT_BC5_UNORM]            = GEN6_FORMAT_BC5_UNORM,
+    [VK_FMT_BC5_SNORM]            = GEN6_FORMAT_BC5_SNORM,
+    [VK_FMT_BC6H_UFLOAT]          = GEN6_FORMAT_BC6H_UF16,
+    [VK_FMT_BC6H_SFLOAT]          = GEN6_FORMAT_BC6H_SF16,
+    [VK_FMT_BC7_UNORM]            = GEN6_FORMAT_BC7_UNORM,
+    [VK_FMT_BC7_SRGB]             = GEN6_FORMAT_BC7_UNORM_SRGB,
     /* TODO: Implement for remaining compressed formats. */
-    [XGL_FMT_ETC2_R8G8B8_UNORM]    = 0,
-    [XGL_FMT_ETC2_R8G8B8A1_UNORM]  = 0,
-    [XGL_FMT_ETC2_R8G8B8A8_UNORM]  = 0,
-    [XGL_FMT_EAC_R11_UNORM]        = 0,
-    [XGL_FMT_EAC_R11_SNORM]        = 0,
-    [XGL_FMT_EAC_R11G11_UNORM]     = 0,
-    [XGL_FMT_EAC_R11G11_SNORM]     = 0,
-    [XGL_FMT_ASTC_4x4_UNORM]       = 0,
-    [XGL_FMT_ASTC_4x4_SRGB]        = 0,
-    [XGL_FMT_ASTC_5x4_UNORM]       = 0,
-    [XGL_FMT_ASTC_5x4_SRGB]        = 0,
-    [XGL_FMT_ASTC_5x5_UNORM]       = 0,
-    [XGL_FMT_ASTC_5x5_SRGB]        = 0,
-    [XGL_FMT_ASTC_6x5_UNORM]       = 0,
-    [XGL_FMT_ASTC_6x5_SRGB]        = 0,
-    [XGL_FMT_ASTC_6x6_UNORM]       = 0,
-    [XGL_FMT_ASTC_6x6_SRGB]        = 0,
-    [XGL_FMT_ASTC_8x5_UNORM]       = 0,
-    [XGL_FMT_ASTC_8x5_SRGB]        = 0,
-    [XGL_FMT_ASTC_8x6_UNORM]       = 0,
-    [XGL_FMT_ASTC_8x6_SRGB]        = 0,
-    [XGL_FMT_ASTC_8x8_UNORM]       = 0,
-    [XGL_FMT_ASTC_8x8_SRGB]        = 0,
-    [XGL_FMT_ASTC_10x5_UNORM]      = 0,
-    [XGL_FMT_ASTC_10x5_SRGB]       = 0,
-    [XGL_FMT_ASTC_10x6_UNORM]      = 0,
-    [XGL_FMT_ASTC_10x6_SRGB]       = 0,
-    [XGL_FMT_ASTC_10x8_UNORM]      = 0,
-    [XGL_FMT_ASTC_10x8_SRGB]       = 0,
-    [XGL_FMT_ASTC_10x10_UNORM]     = 0,
-    [XGL_FMT_ASTC_10x10_SRGB]      = 0,
-    [XGL_FMT_ASTC_12x10_UNORM]     = 0,
-    [XGL_FMT_ASTC_12x10_SRGB]      = 0,
-    [XGL_FMT_ASTC_12x12_UNORM]     = 0,
-    [XGL_FMT_ASTC_12x12_SRGB]      = 0,
-    [XGL_FMT_B5G6R5_UNORM]         = GEN6_FORMAT_B5G6R5_UNORM,
-    [XGL_FMT_B5G6R5_USCALED]       = 0,
-    [XGL_FMT_B8G8R8_UNORM]         = 0,
-    [XGL_FMT_B8G8R8_SNORM]         = 0,
-    [XGL_FMT_B8G8R8_USCALED]       = 0,
-    [XGL_FMT_B8G8R8_SSCALED]       = 0,
-    [XGL_FMT_B8G8R8_UINT]          = 0,
-    [XGL_FMT_B8G8R8_SINT]          = 0,
-    [XGL_FMT_B8G8R8_SRGB]          = GEN6_FORMAT_B5G6R5_UNORM_SRGB,
-    [XGL_FMT_B8G8R8A8_UNORM]       = GEN6_FORMAT_B8G8R8A8_UNORM,
-    [XGL_FMT_B8G8R8A8_SNORM]       = 0,
-    [XGL_FMT_B8G8R8A8_USCALED]     = 0,
-    [XGL_FMT_B8G8R8A8_SSCALED]     = 0,
-    [XGL_FMT_B8G8R8A8_UINT]        = 0,
-    [XGL_FMT_B8G8R8A8_SINT]        = 0,
-    [XGL_FMT_B8G8R8A8_SRGB]        = GEN6_FORMAT_B8G8R8A8_UNORM_SRGB,
-    [XGL_FMT_B10G10R10A2_UNORM]    = GEN6_FORMAT_B10G10R10A2_UNORM,
-    [XGL_FMT_B10G10R10A2_SNORM]    = GEN6_FORMAT_B10G10R10A2_SNORM,
-    [XGL_FMT_B10G10R10A2_USCALED]  = GEN6_FORMAT_B10G10R10A2_USCALED,
-    [XGL_FMT_B10G10R10A2_SSCALED]  = GEN6_FORMAT_B10G10R10A2_SSCALED,
-    [XGL_FMT_B10G10R10A2_UINT]     = GEN6_FORMAT_B10G10R10A2_UINT,
-    [XGL_FMT_B10G10R10A2_SINT]     = GEN6_FORMAT_B10G10R10A2_SINT
+    [VK_FMT_ETC2_R8G8B8_UNORM]    = 0,
+    [VK_FMT_ETC2_R8G8B8A1_UNORM]  = 0,
+    [VK_FMT_ETC2_R8G8B8A8_UNORM]  = 0,
+    [VK_FMT_EAC_R11_UNORM]        = 0,
+    [VK_FMT_EAC_R11_SNORM]        = 0,
+    [VK_FMT_EAC_R11G11_UNORM]     = 0,
+    [VK_FMT_EAC_R11G11_SNORM]     = 0,
+    [VK_FMT_ASTC_4x4_UNORM]       = 0,
+    [VK_FMT_ASTC_4x4_SRGB]        = 0,
+    [VK_FMT_ASTC_5x4_UNORM]       = 0,
+    [VK_FMT_ASTC_5x4_SRGB]        = 0,
+    [VK_FMT_ASTC_5x5_UNORM]       = 0,
+    [VK_FMT_ASTC_5x5_SRGB]        = 0,
+    [VK_FMT_ASTC_6x5_UNORM]       = 0,
+    [VK_FMT_ASTC_6x5_SRGB]        = 0,
+    [VK_FMT_ASTC_6x6_UNORM]       = 0,
+    [VK_FMT_ASTC_6x6_SRGB]        = 0,
+    [VK_FMT_ASTC_8x5_UNORM]       = 0,
+    [VK_FMT_ASTC_8x5_SRGB]        = 0,
+    [VK_FMT_ASTC_8x6_UNORM]       = 0,
+    [VK_FMT_ASTC_8x6_SRGB]        = 0,
+    [VK_FMT_ASTC_8x8_UNORM]       = 0,
+    [VK_FMT_ASTC_8x8_SRGB]        = 0,
+    [VK_FMT_ASTC_10x5_UNORM]      = 0,
+    [VK_FMT_ASTC_10x5_SRGB]       = 0,
+    [VK_FMT_ASTC_10x6_UNORM]      = 0,
+    [VK_FMT_ASTC_10x6_SRGB]       = 0,
+    [VK_FMT_ASTC_10x8_UNORM]      = 0,
+    [VK_FMT_ASTC_10x8_SRGB]       = 0,
+    [VK_FMT_ASTC_10x10_UNORM]     = 0,
+    [VK_FMT_ASTC_10x10_SRGB]      = 0,
+    [VK_FMT_ASTC_12x10_UNORM]     = 0,
+    [VK_FMT_ASTC_12x10_SRGB]      = 0,
+    [VK_FMT_ASTC_12x12_UNORM]     = 0,
+    [VK_FMT_ASTC_12x12_SRGB]      = 0,
+    [VK_FMT_B5G6R5_UNORM]         = GEN6_FORMAT_B5G6R5_UNORM,
+    [VK_FMT_B5G6R5_USCALED]       = 0,
+    [VK_FMT_B8G8R8_UNORM]         = 0,
+    [VK_FMT_B8G8R8_SNORM]         = 0,
+    [VK_FMT_B8G8R8_USCALED]       = 0,
+    [VK_FMT_B8G8R8_SSCALED]       = 0,
+    [VK_FMT_B8G8R8_UINT]          = 0,
+    [VK_FMT_B8G8R8_SINT]          = 0,
+    [VK_FMT_B8G8R8_SRGB]          = GEN6_FORMAT_B5G6R5_UNORM_SRGB,
+    [VK_FMT_B8G8R8A8_UNORM]       = GEN6_FORMAT_B8G8R8A8_UNORM,
+    [VK_FMT_B8G8R8A8_SNORM]       = 0,
+    [VK_FMT_B8G8R8A8_USCALED]     = 0,
+    [VK_FMT_B8G8R8A8_SSCALED]     = 0,
+    [VK_FMT_B8G8R8A8_UINT]        = 0,
+    [VK_FMT_B8G8R8A8_SINT]        = 0,
+    [VK_FMT_B8G8R8A8_SRGB]        = GEN6_FORMAT_B8G8R8A8_UNORM_SRGB,
+    [VK_FMT_B10G10R10A2_UNORM]    = GEN6_FORMAT_B10G10R10A2_UNORM,
+    [VK_FMT_B10G10R10A2_SNORM]    = GEN6_FORMAT_B10G10R10A2_SNORM,
+    [VK_FMT_B10G10R10A2_USCALED]  = GEN6_FORMAT_B10G10R10A2_USCALED,
+    [VK_FMT_B10G10R10A2_SSCALED]  = GEN6_FORMAT_B10G10R10A2_SSCALED,
+    [VK_FMT_B10G10R10A2_UINT]     = GEN6_FORMAT_B10G10R10A2_UINT,
+    [VK_FMT_B10G10R10A2_SINT]     = GEN6_FORMAT_B10G10R10A2_SINT
 };
 
 int intel_format_translate_color(const struct intel_gpu *gpu,
-                                 XGL_FORMAT format)
+                                 VK_FORMAT format)
 {
     int fmt;
 
@@ -552,7 +552,7 @@
     /* TODO: Implement for remaining compressed formats. */
 
     /* GEN6_FORMAT_R32G32B32A32_FLOAT happens to be 0 */
-    if (format == XGL_FMT_R32G32B32A32_SFLOAT)
+    if (format == VK_FMT_R32G32B32A32_SFLOAT)
         assert(fmt == 0);
     else if (!fmt)
         fmt = -1;
@@ -560,14 +560,14 @@
     return fmt;
 }
 
-static XGL_FLAGS intel_format_get_color_features(const struct intel_dev *dev,
-                                                 XGL_FORMAT format)
+static VK_FLAGS intel_format_get_color_features(const struct intel_dev *dev,
+                                                 VK_FORMAT format)
 {
     const int fmt = intel_format_translate_color(dev->gpu, format);
     const struct intel_vf_cap *vf;
     const struct intel_sampler_cap *sampler;
     const struct intel_dp_cap *dp;
-    XGL_FLAGS features;
+    VK_FLAGS features;
 
     if (fmt < 0)
         return 0;
@@ -577,7 +577,7 @@
     vf = (fmt < ARRAY_SIZE(intel_vf_caps)) ?  &intel_vf_caps[fmt] : NULL;
     dp = (fmt < ARRAY_SIZE(intel_dp_caps)) ?  &intel_dp_caps[fmt] : NULL;
 
-    features = XGL_FORMAT_MEMORY_SHADER_ACCESS_BIT;
+    features = VK_FORMAT_MEMORY_SHADER_ACCESS_BIT;
 
 #define TEST(dev, func, cap) ((func) && (func)->cap && \
         intel_gpu_gen((dev)->gpu) >= (func)->cap)
@@ -588,21 +588,21 @@
     if (TEST(dev, sampler, sampling)) {
         if (icd_format_is_int(format) ||
             TEST(dev, sampler, filtering))
-            features |= XGL_FORMAT_IMAGE_SHADER_READ_BIT;
+            features |= VK_FORMAT_IMAGE_SHADER_READ_BIT;
     }
 
     if (TEST(dev, dp, typed_write))
-        features |= XGL_FORMAT_IMAGE_SHADER_WRITE_BIT;
+        features |= VK_FORMAT_IMAGE_SHADER_WRITE_BIT;
 
     if (TEST(dev, dp, rt_write)) {
-        features |= XGL_FORMAT_COLOR_ATTACHMENT_WRITE_BIT;
+        features |= VK_FORMAT_COLOR_ATTACHMENT_WRITE_BIT;
 
         if (TEST(dev, dp, rt_write_blending))
-            features |= XGL_FORMAT_COLOR_ATTACHMENT_BLEND_BIT;
+            features |= VK_FORMAT_COLOR_ATTACHMENT_BLEND_BIT;
 
-        if (features & XGL_FORMAT_IMAGE_SHADER_READ_BIT) {
-            features |= XGL_FORMAT_IMAGE_COPY_BIT |
-                        XGL_FORMAT_CONVERSION_BIT;
+        if (features & VK_FORMAT_IMAGE_SHADER_READ_BIT) {
+            features |= VK_FORMAT_IMAGE_COPY_BIT |
+                        VK_FORMAT_CONVERSION_BIT;
         }
     }
 #undef TEST
@@ -610,27 +610,27 @@
     return features;
 }
 
-static XGL_FLAGS intel_format_get_ds_features(const struct intel_dev *dev,
-                                              XGL_FORMAT format)
+static VK_FLAGS intel_format_get_ds_features(const struct intel_dev *dev,
+                                              VK_FORMAT format)
 {
-    XGL_FLAGS features;
+    VK_FLAGS features;
 
     assert(icd_format_is_ds(format));
 
     switch (format) {
-    case XGL_FMT_S8_UINT:
-        features = XGL_FORMAT_STENCIL_ATTACHMENT_BIT;;
+    case VK_FMT_S8_UINT:
+        features = VK_FORMAT_STENCIL_ATTACHMENT_BIT;;
         break;
-    case XGL_FMT_D16_UNORM:
-    case XGL_FMT_D24_UNORM:
-    case XGL_FMT_D32_SFLOAT:
-        features = XGL_FORMAT_DEPTH_ATTACHMENT_BIT;
+    case VK_FMT_D16_UNORM:
+    case VK_FMT_D24_UNORM:
+    case VK_FMT_D32_SFLOAT:
+        features = VK_FORMAT_DEPTH_ATTACHMENT_BIT;
         break;
-    case XGL_FMT_D16_UNORM_S8_UINT:
-    case XGL_FMT_D24_UNORM_S8_UINT:
-    case XGL_FMT_D32_SFLOAT_S8_UINT:
-        features = XGL_FORMAT_DEPTH_ATTACHMENT_BIT |
-                   XGL_FORMAT_STENCIL_ATTACHMENT_BIT;
+    case VK_FMT_D16_UNORM_S8_UINT:
+    case VK_FMT_D24_UNORM_S8_UINT:
+    case VK_FMT_D32_SFLOAT_S8_UINT:
+        features = VK_FORMAT_DEPTH_ATTACHMENT_BIT |
+                   VK_FORMAT_STENCIL_ATTACHMENT_BIT;
         break;
     default:
         features = 0;
@@ -640,16 +640,16 @@
     return features;
 }
 
-static XGL_FLAGS intel_format_get_raw_features(const struct intel_dev *dev,
-                                               XGL_FORMAT format)
+static VK_FLAGS intel_format_get_raw_features(const struct intel_dev *dev,
+                                               VK_FORMAT format)
 {
-    return (format == XGL_FMT_UNDEFINED) ?
-        XGL_FORMAT_MEMORY_SHADER_ACCESS_BIT : 0;
+    return (format == VK_FMT_UNDEFINED) ?
+        VK_FORMAT_MEMORY_SHADER_ACCESS_BIT : 0;
 }
 
 static void intel_format_get_props(const struct intel_dev *dev,
-                                   XGL_FORMAT format,
-                                   XGL_FORMAT_PROPERTIES *props)
+                                   VK_FORMAT format,
+                                   VK_FORMAT_PROPERTIES *props)
 {
     if (icd_format_is_undef(format)) {
         props->linearTilingFeatures =
@@ -669,25 +669,25 @@
     }
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetFormatInfo(
-    XGL_DEVICE                                  device,
-    XGL_FORMAT                                  format,
-    XGL_FORMAT_INFO_TYPE                        infoType,
+ICD_EXPORT VK_RESULT VKAPI vkGetFormatInfo(
+    VK_DEVICE                                  device,
+    VK_FORMAT                                  format,
+    VK_FORMAT_INFO_TYPE                        infoType,
     size_t*                                     pDataSize,
     void*                                       pData)
 {
     const struct intel_dev *dev = intel_dev(device);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (infoType) {
-    case XGL_INFO_TYPE_FORMAT_PROPERTIES:
-        *pDataSize = sizeof(XGL_FORMAT_PROPERTIES);
+    case VK_INFO_TYPE_FORMAT_PROPERTIES:
+        *pDataSize = sizeof(VK_FORMAT_PROPERTIES);
         if (pData == NULL)
             return ret;
         intel_format_get_props(dev, format, pData);
         break;
     default:
-        ret = XGL_ERROR_INVALID_VALUE;
+        ret = VK_ERROR_INVALID_VALUE;
         break;
     }
 
diff --git a/icd/intel/format.h b/icd/intel/format.h
index 1991394..7234503 100644
--- a/icd/intel/format.h
+++ b/icd/intel/format.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -33,17 +33,17 @@
 struct intel_gpu;
 
 static inline bool intel_format_has_depth(const struct intel_gpu *gpu,
-                                          XGL_FORMAT format)
+                                          VK_FORMAT format)
 {
     bool has_depth = false;
 
     switch (format) {
-    case XGL_FMT_D16_UNORM:
-    case XGL_FMT_D24_UNORM:
-    case XGL_FMT_D32_SFLOAT:
-    /* XGL_FMT_D16_UNORM_S8_UINT is unsupported */
-    case XGL_FMT_D24_UNORM_S8_UINT:
-    case XGL_FMT_D32_SFLOAT_S8_UINT:
+    case VK_FMT_D16_UNORM:
+    case VK_FMT_D24_UNORM:
+    case VK_FMT_D32_SFLOAT:
+    /* VK_FMT_D16_UNORM_S8_UINT is unsupported */
+    case VK_FMT_D24_UNORM_S8_UINT:
+    case VK_FMT_D32_SFLOAT_S8_UINT:
         has_depth = true;
         break;
     default:
@@ -54,6 +54,6 @@
 }
 
 int intel_format_translate_color(const struct intel_gpu *gpu,
-                                 XGL_FORMAT format);
+                                 VK_FORMAT format);
 
 #endif /* FORMAT_H */
diff --git a/icd/intel/gpu.c b/icd/intel/gpu.c
index 3abd9ed..813c36d 100644
--- a/icd/intel/gpu.c
+++ b/icd/intel/gpu.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -39,7 +39,7 @@
 #include "wsi.h"
 
 static const char * const intel_gpu_exts[INTEL_EXT_COUNT] = {
-    [INTEL_EXT_WSI_X11] = "XGL_WSI_X11",
+    [INTEL_EXT_WSI_X11] = "VK_WSI_X11",
 };
 
 static int gpu_open_primary_node(struct intel_gpu *gpu)
@@ -63,7 +63,7 @@
     if (gpu->render_fd_internal < 0 && gpu->render_node) {
         gpu->render_fd_internal = open(gpu->render_node, O_RDWR);
         if (gpu->render_fd_internal < 0) {
-            intel_log(gpu, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, NULL, 0,
+            intel_log(gpu, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0,
                     0, "failed to open %s", gpu->render_node);
         }
     }
@@ -145,7 +145,7 @@
     return gen;
 }
 
-XGL_RESULT intel_gpu_create(const struct intel_instance *instance, int devid,
+VK_RESULT intel_gpu_create(const struct intel_instance *instance, int devid,
                             const char *primary_node, const char *render_node,
                             struct intel_gpu **gpu_ret)
 {
@@ -154,18 +154,18 @@
     struct intel_gpu *gpu;
 
     if (gen < 0) {
-        intel_log(instance, XGL_DBG_MSG_WARNING, XGL_VALIDATION_LEVEL_0,
-                XGL_NULL_HANDLE, 0, 0, "unsupported device id 0x%04x", devid);
-        return XGL_ERROR_INITIALIZATION_FAILED;
+        intel_log(instance, VK_DBG_MSG_WARNING, VK_VALIDATION_LEVEL_0,
+                VK_NULL_HANDLE, 0, 0, "unsupported device id 0x%04x", devid);
+        return VK_ERROR_INITIALIZATION_FAILED;
     }
 
-    gpu = intel_alloc(instance, sizeof(*gpu), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
+    gpu = intel_alloc(instance, sizeof(*gpu), 0, VK_SYSTEM_ALLOC_API_OBJECT);
     if (!gpu)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     memset(gpu, 0, sizeof(*gpu));
-    /* there is no XGL_DBG_OBJECT_GPU */
-    intel_handle_init(&gpu->handle, XGL_DBG_OBJECT_UNKNOWN, instance->icd);
+    /* there is no VK_DBG_OBJECT_GPU */
+    intel_handle_init(&gpu->handle, VK_DBG_OBJECT_UNKNOWN, instance->icd);
 
     gpu->devid = devid;
 
@@ -173,10 +173,10 @@
     render_len = (render_node) ? strlen(render_node) : 0;
 
     gpu->primary_node = intel_alloc(gpu, primary_len + 1 +
-            ((render_len) ? (render_len + 1) : 0), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+            ((render_len) ? (render_len + 1) : 0), 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!gpu->primary_node) {
         intel_free(instance, gpu);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     memcpy(gpu->primary_node, primary_node, primary_len + 1);
@@ -212,11 +212,11 @@
 
     *gpu_ret = gpu;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_gpu_get_props(const struct intel_gpu *gpu,
-                         XGL_PHYSICAL_GPU_PROPERTIES *props)
+                         VK_PHYSICAL_GPU_PROPERTIES *props)
 {
     const char *name;
     size_t name_len;
@@ -227,7 +227,7 @@
     props->vendorId = 0x8086;
     props->deviceId = gpu->devid;
 
-    props->gpuType = XGL_GPU_TYPE_INTEGRATED;
+    props->gpuType = VK_GPU_TYPE_INTEGRATED;
 
     /* copy GPU name */
     name = gpu_get_name(gpu);
@@ -250,7 +250,7 @@
 }
 
 void intel_gpu_get_perf(const struct intel_gpu *gpu,
-                        XGL_PHYSICAL_GPU_PERFORMANCE *perf)
+                        VK_PHYSICAL_GPU_PERFORMANCE *perf)
 {
     /* TODO */
     perf->maxGpuClock = 1.0f;
@@ -262,11 +262,11 @@
 
 void intel_gpu_get_queue_props(const struct intel_gpu *gpu,
                                enum intel_gpu_engine_type engine,
-                               XGL_PHYSICAL_GPU_QUEUE_PROPERTIES *props)
+                               VK_PHYSICAL_GPU_QUEUE_PROPERTIES *props)
 {
     switch (engine) {
     case INTEL_GPU_ENGINE_3D:
-        props->queueFlags = XGL_QUEUE_GRAPHICS_BIT | XGL_QUEUE_COMPUTE_BIT;
+        props->queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
         props->queueCount = 1;
         props->maxAtomicCounters = INTEL_QUEUE_ATOMIC_COUNTER_COUNT;
         props->supportsTimestamps = true;
@@ -279,21 +279,21 @@
 }
 
 void intel_gpu_get_memory_props(const struct intel_gpu *gpu,
-                                XGL_PHYSICAL_GPU_MEMORY_PROPERTIES *props)
+                                VK_PHYSICAL_GPU_MEMORY_PROPERTIES *props)
 {
     props->supportsMigration = false;
     props->supportsPinning = true;
 }
 
 int intel_gpu_get_max_threads(const struct intel_gpu *gpu,
-                              XGL_PIPELINE_SHADER_STAGE stage)
+                              VK_PIPELINE_SHADER_STAGE stage)
 {
     switch (intel_gpu_gen(gpu)) {
     case INTEL_GEN(7.5):
         switch (stage) {
-        case XGL_SHADER_STAGE_VERTEX:
+        case VK_SHADER_STAGE_VERTEX:
             return (gpu->gt >= 2) ? 280 : 70;
-        case XGL_SHADER_STAGE_FRAGMENT:
+        case VK_SHADER_STAGE_FRAGMENT:
             return (gpu->gt == 3) ? 408 :
                    (gpu->gt == 2) ? 204 : 102;
         default:
@@ -302,9 +302,9 @@
         break;
     case INTEL_GEN(7):
         switch (stage) {
-        case XGL_SHADER_STAGE_VERTEX:
+        case VK_SHADER_STAGE_VERTEX:
             return (gpu->gt == 2) ? 128 : 36;
-        case XGL_SHADER_STAGE_FRAGMENT:
+        case VK_SHADER_STAGE_FRAGMENT:
             return (gpu->gt == 2) ? 172 : 48;
         default:
             break;
@@ -312,9 +312,9 @@
         break;
     case INTEL_GEN(6):
         switch (stage) {
-        case XGL_SHADER_STAGE_VERTEX:
+        case VK_SHADER_STAGE_VERTEX:
             return (gpu->gt == 2) ? 60 : 24;
-        case XGL_SHADER_STAGE_FRAGMENT:
+        case VK_SHADER_STAGE_FRAGMENT:
             return (gpu->gt == 2) ? 80 : 40;
         default:
             break;
@@ -324,13 +324,13 @@
         break;
     }
 
-    intel_log(gpu, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
+    intel_log(gpu, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE,
             0, 0, "unknown Gen or shader stage");
 
     switch (stage) {
-    case XGL_SHADER_STAGE_VERTEX:
+    case VK_SHADER_STAGE_VERTEX:
         return 1;
-    case XGL_SHADER_STAGE_FRAGMENT:
+    case VK_SHADER_STAGE_FRAGMENT:
         return 4;
     default:
         return 1;
@@ -342,7 +342,7 @@
     return gpu_open_primary_node(gpu);
 }
 
-XGL_RESULT intel_gpu_init_winsys(struct intel_gpu *gpu)
+VK_RESULT intel_gpu_init_winsys(struct intel_gpu *gpu)
 {
     int fd;
 
@@ -350,17 +350,17 @@
 
     fd = gpu_open_render_node(gpu);
     if (fd < 0)
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
 
     gpu->winsys = intel_winsys_create_for_fd(gpu->handle.icd, fd);
     if (!gpu->winsys) {
-        intel_log(gpu, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0,
-                XGL_NULL_HANDLE, 0, 0, "failed to create GPU winsys");
+        intel_log(gpu, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0,
+                VK_NULL_HANDLE, 0, 0, "failed to create GPU winsys");
         gpu_close_render_node(gpu);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_gpu_cleanup_winsys(struct intel_gpu *gpu)
@@ -389,8 +389,8 @@
     return type;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglEnumerateLayers(
-    XGL_PHYSICAL_GPU                            gpu,
+ICD_EXPORT VK_RESULT VKAPI vkEnumerateLayers(
+    VK_PHYSICAL_GPU                            gpu,
     size_t                                      maxLayerCount,
     size_t                                      maxStringSize,
     size_t*                                     pOutLayerCount,
@@ -398,51 +398,51 @@
     void*                                       pReserved)
 {
     if (!pOutLayerCount)
-        return XGL_ERROR_INVALID_POINTER;
+        return VK_ERROR_INVALID_POINTER;
 
     *pOutLayerCount = 0;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetGpuInfo(
-    XGL_PHYSICAL_GPU                            gpu_,
-    XGL_PHYSICAL_GPU_INFO_TYPE                  infoType,
+ICD_EXPORT VK_RESULT VKAPI vkGetGpuInfo(
+    VK_PHYSICAL_GPU                            gpu_,
+    VK_PHYSICAL_GPU_INFO_TYPE                  infoType,
     size_t*                                     pDataSize,
     void*                                       pData)
 {
     struct intel_gpu *gpu = intel_gpu(gpu_);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (infoType) {
-    case XGL_INFO_TYPE_PHYSICAL_GPU_PROPERTIES:
-        *pDataSize = sizeof(XGL_PHYSICAL_GPU_PROPERTIES);
+    case VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES:
+        *pDataSize = sizeof(VK_PHYSICAL_GPU_PROPERTIES);
         if (pData == NULL) {
             return ret;
         }
         intel_gpu_get_props(gpu, pData);
         break;
 
-    case XGL_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE:
-        *pDataSize = sizeof(XGL_PHYSICAL_GPU_PERFORMANCE);
+    case VK_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE:
+        *pDataSize = sizeof(VK_PHYSICAL_GPU_PERFORMANCE);
         if (pData == NULL) {
             return ret;
         }
         intel_gpu_get_perf(gpu, pData);
         break;
 
-    case XGL_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES:
+    case VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES:
         /*
-         * XGL Programmers guide, page 33:
+         * Vulkan Programmers guide, page 33:
          * to determine the data size an application calls
-         * xglGetGpuInfo() with a NULL data pointer. The
+         * vkGetGpuInfo() with a NULL data pointer. The
          * expected data size for all queue property structures
          * is returned in pDataSize
          */
-        *pDataSize = sizeof(XGL_PHYSICAL_GPU_QUEUE_PROPERTIES) *
+        *pDataSize = sizeof(VK_PHYSICAL_GPU_QUEUE_PROPERTIES) *
             INTEL_GPU_ENGINE_COUNT;
         if (pData != NULL) {
-            XGL_PHYSICAL_GPU_QUEUE_PROPERTIES *dst = pData;
+            VK_PHYSICAL_GPU_QUEUE_PROPERTIES *dst = pData;
             int engine;
 
             for (engine = 0; engine < INTEL_GPU_ENGINE_COUNT; engine++) {
@@ -452,8 +452,8 @@
         }
         break;
 
-    case XGL_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES:
-        *pDataSize = sizeof(XGL_PHYSICAL_GPU_MEMORY_PROPERTIES);
+    case VK_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES:
+        *pDataSize = sizeof(VK_PHYSICAL_GPU_MEMORY_PROPERTIES);
         if (pData == NULL) {
             return ret;
         }
@@ -468,34 +468,34 @@
     return ret;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetExtensionSupport(
-    XGL_PHYSICAL_GPU                            gpu_,
+ICD_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(
+    VK_PHYSICAL_GPU                            gpu_,
     const char*                                 pExtName)
 {
     struct intel_gpu *gpu = intel_gpu(gpu_);
     const enum intel_ext_type ext = intel_gpu_lookup_extension(gpu, pExtName);
 
     return (ext != INTEL_EXT_INVALID) ?
-        XGL_SUCCESS : XGL_ERROR_INVALID_EXTENSION;
+        VK_SUCCESS : VK_ERROR_INVALID_EXTENSION;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetMultiGpuCompatibility(
-    XGL_PHYSICAL_GPU                            gpu0_,
-    XGL_PHYSICAL_GPU                            gpu1_,
-    XGL_GPU_COMPATIBILITY_INFO*                 pInfo)
+ICD_EXPORT VK_RESULT VKAPI vkGetMultiGpuCompatibility(
+    VK_PHYSICAL_GPU                            gpu0_,
+    VK_PHYSICAL_GPU                            gpu1_,
+    VK_GPU_COMPATIBILITY_INFO*                 pInfo)
 {
     const struct intel_gpu *gpu0 = intel_gpu(gpu0_);
     const struct intel_gpu *gpu1 = intel_gpu(gpu1_);
-    XGL_FLAGS compat = XGL_GPU_COMPAT_IQ_MATCH_BIT |
-                       XGL_GPU_COMPAT_PEER_TRANSFER_BIT |
-                       XGL_GPU_COMPAT_SHARED_MEMORY_BIT |
-                       XGL_GPU_COMPAT_SHARED_GPU0_DISPLAY_BIT |
-                       XGL_GPU_COMPAT_SHARED_GPU1_DISPLAY_BIT;
+    VK_FLAGS compat = VK_GPU_COMPAT_IQ_MATCH_BIT |
+                       VK_GPU_COMPAT_PEER_TRANSFER_BIT |
+                       VK_GPU_COMPAT_SHARED_MEMORY_BIT |
+                       VK_GPU_COMPAT_SHARED_GPU0_DISPLAY_BIT |
+                       VK_GPU_COMPAT_SHARED_GPU1_DISPLAY_BIT;
 
     if (intel_gpu_gen(gpu0) == intel_gpu_gen(gpu1))
-        compat |= XGL_GPU_COMPAT_ASIC_FEATURES_BIT;
+        compat |= VK_GPU_COMPAT_ASIC_FEATURES_BIT;
 
     pInfo->compatibilityFlags = compat;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
diff --git a/icd/intel/gpu.h b/icd/intel/gpu.h
index 1afd347..70237e2 100644
--- a/icd/intel/gpu.h
+++ b/icd/intel/gpu.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -66,7 +66,7 @@
     int gen_opaque;     /* always read this with intel_gpu_gen() */
     int gt;
 
-    XGL_GPU_SIZE max_batch_buffer_size;
+    VK_GPU_SIZE max_batch_buffer_size;
     uint32_t batch_buffer_reloc_count;
 
     /*
@@ -85,7 +85,7 @@
     uint32_t display_count;
 };
 
-static inline struct intel_gpu *intel_gpu(XGL_PHYSICAL_GPU gpu)
+static inline struct intel_gpu *intel_gpu(VK_PHYSICAL_GPU gpu)
 {
     return (struct intel_gpu *) gpu;
 }
@@ -99,27 +99,27 @@
 #endif
 }
 
-XGL_RESULT intel_gpu_create(const struct intel_instance *instance, int devid,
+VK_RESULT intel_gpu_create(const struct intel_instance *instance, int devid,
                             const char *primary_node, const char *render_node,
                             struct intel_gpu **gpu_ret);
 void intel_gpu_destroy(struct intel_gpu *gpu);
 
 void intel_gpu_get_props(const struct intel_gpu *gpu,
-                         XGL_PHYSICAL_GPU_PROPERTIES *props);
+                         VK_PHYSICAL_GPU_PROPERTIES *props);
 void intel_gpu_get_perf(const struct intel_gpu *gpu,
-                        XGL_PHYSICAL_GPU_PERFORMANCE *perf);
+                        VK_PHYSICAL_GPU_PERFORMANCE *perf);
 void intel_gpu_get_queue_props(const struct intel_gpu *gpu,
                                enum intel_gpu_engine_type engine,
-                               XGL_PHYSICAL_GPU_QUEUE_PROPERTIES *props);
+                               VK_PHYSICAL_GPU_QUEUE_PROPERTIES *props);
 void intel_gpu_get_memory_props(const struct intel_gpu *gpu,
-                                XGL_PHYSICAL_GPU_MEMORY_PROPERTIES *props);
+                                VK_PHYSICAL_GPU_MEMORY_PROPERTIES *props);
 
 int intel_gpu_get_max_threads(const struct intel_gpu *gpu,
-                              XGL_PIPELINE_SHADER_STAGE stage);
+                              VK_PIPELINE_SHADER_STAGE stage);
 
 int intel_gpu_get_primary_fd(struct intel_gpu *gpu);
 
-XGL_RESULT intel_gpu_init_winsys(struct intel_gpu *gpu);
+VK_RESULT intel_gpu_init_winsys(struct intel_gpu *gpu);
 void intel_gpu_cleanup_winsys(struct intel_gpu *gpu);
 
 enum intel_ext_type intel_gpu_lookup_extension(const struct intel_gpu *gpu,
diff --git a/icd/intel/img.c b/icd/intel/img.c
index cb2faf3..19bd3cf 100644
--- a/icd/intel/img.c
+++ b/icd/intel/img.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -47,34 +47,34 @@
     intel_img_destroy(img);
 }
 
-static XGL_RESULT img_get_info(struct intel_base *base, int type,
+static VK_RESULT img_get_info(struct intel_base *base, int type,
                                size_t *size, void *data)
 {
     struct intel_img *img = intel_img_from_base(base);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (type) {
-    case XGL_INFO_TYPE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
         {
-            XGL_MEMORY_REQUIREMENTS *mem_req = data;
+            VK_MEMORY_REQUIREMENTS *mem_req = data;
 
-            *size = sizeof(XGL_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
             mem_req->size = img->total_size;
             mem_req->alignment = 4096;
-            if (img->format_class == XGL_IMAGE_FORMAT_CLASS_LINEAR) {
-                mem_req->memType = XGL_MEMORY_TYPE_BUFFER;
+            if (img->format_class == VK_IMAGE_FORMAT_CLASS_LINEAR) {
+                mem_req->memType = VK_MEMORY_TYPE_BUFFER;
             } else {
-                mem_req->memType = XGL_MEMORY_TYPE_IMAGE;
+                mem_req->memType = VK_MEMORY_TYPE_IMAGE;
             }
         }
         break;
-    case XGL_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS:
         {
-            XGL_IMAGE_MEMORY_REQUIREMENTS *img_req = data;
+            VK_IMAGE_MEMORY_REQUIREMENTS *img_req = data;
 
-            *size = sizeof(XGL_IMAGE_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
             img_req->usage = img->usage;
@@ -82,11 +82,11 @@
             img_req->samples = img->samples;
         }
         break;
-    case XGL_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
         {
-            XGL_BUFFER_MEMORY_REQUIREMENTS *buf_req = data;
+            VK_BUFFER_MEMORY_REQUIREMENTS *buf_req = data;
 
-            *size = sizeof(XGL_BUFFER_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
             buf_req->usage = img->usage;
@@ -100,8 +100,8 @@
     return ret;
 }
 
-XGL_RESULT intel_img_create(struct intel_dev *dev,
-                            const XGL_IMAGE_CREATE_INFO *info,
+VK_RESULT intel_img_create(struct intel_dev *dev,
+                            const VK_IMAGE_CREATE_INFO *info,
                             bool scanout,
                             struct intel_img **img_ret)
 {
@@ -109,9 +109,9 @@
     struct intel_layout *layout;
 
     img = (struct intel_img *) intel_base_create(&dev->base.handle,
-            sizeof(*img), dev->base.dbg, XGL_DBG_OBJECT_IMAGE, info, 0);
+            sizeof(*img), dev->base.dbg, VK_DBG_OBJECT_IMAGE, info, 0);
     if (!img)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     layout = &img->layout;
 
@@ -120,18 +120,18 @@
     img->mip_levels = info->mipLevels;
     img->array_size = info->arraySize;
     img->usage = info->usage;
-    if (info->tiling == XGL_LINEAR_TILING)
-        img->format_class = XGL_IMAGE_FORMAT_CLASS_LINEAR;
+    if (info->tiling == VK_LINEAR_TILING)
+        img->format_class = VK_IMAGE_FORMAT_CLASS_LINEAR;
     else
         img->format_class = icd_format_get_class(info->format);
     img->samples = info->samples;
     intel_layout_init(layout, dev, info, scanout);
 
     if (layout->bo_stride > intel_max_resource_size / layout->bo_height) {
-        intel_dev_log(dev, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0,
-                XGL_NULL_HANDLE, 0, 0, "image too big");
+        intel_dev_log(dev, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0,
+                VK_NULL_HANDLE, 0, 0, "image too big");
         intel_img_destroy(img);
-        return XGL_ERROR_INVALID_MEMORY_SIZE;
+        return VK_ERROR_INVALID_MEMORY_SIZE;
     }
 
     img->total_size = img->layout.bo_stride * img->layout.bo_height;
@@ -143,19 +143,19 @@
     }
 
     if (layout->separate_stencil) {
-        XGL_IMAGE_CREATE_INFO s8_info;
+        VK_IMAGE_CREATE_INFO s8_info;
 
         img->s8_layout = intel_alloc(img, sizeof(*img->s8_layout), 0,
-                XGL_SYSTEM_ALLOC_INTERNAL);
+                VK_SYSTEM_ALLOC_INTERNAL);
         if (!img->s8_layout) {
             intel_img_destroy(img);
-            return XGL_ERROR_OUT_OF_MEMORY;
+            return VK_ERROR_OUT_OF_MEMORY;
         }
 
         s8_info = *info;
-        s8_info.format = XGL_FMT_S8_UINT;
+        s8_info.format = VK_FMT_S8_UINT;
         /* no stencil texturing */
-        s8_info.usage &= ~XGL_IMAGE_USAGE_SHADER_ACCESS_READ_BIT;
+        s8_info.usage &= ~VK_IMAGE_USAGE_SHADER_ACCESS_READ_BIT;
         assert(icd_format_is_ds(info->format));
 
         intel_layout_init(img->s8_layout, dev, &s8_info, scanout);
@@ -166,8 +166,8 @@
     }
 
     if (scanout) {
-        XGL_RESULT ret = intel_wsi_img_init(img);
-        if (ret != XGL_SUCCESS) {
+        VK_RESULT ret = intel_wsi_img_init(img);
+        if (ret != VK_SUCCESS) {
             intel_img_destroy(img);
             return ret;
         }
@@ -178,7 +178,7 @@
 
     *img_ret = img;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_img_destroy(struct intel_img *img)
@@ -192,19 +192,19 @@
     intel_base_destroy(&img->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglOpenPeerImage(
-    XGL_DEVICE                                  device,
-    const XGL_PEER_IMAGE_OPEN_INFO*             pOpenInfo,
-    XGL_IMAGE*                                  pImage,
-    XGL_GPU_MEMORY*                             pMem)
+ICD_EXPORT VK_RESULT VKAPI vkOpenPeerImage(
+    VK_DEVICE                                  device,
+    const VK_PEER_IMAGE_OPEN_INFO*             pOpenInfo,
+    VK_IMAGE*                                  pImage,
+    VK_GPU_MEMORY*                             pMem)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateImage(
-    XGL_DEVICE                                  device,
-    const XGL_IMAGE_CREATE_INFO*                pCreateInfo,
-    XGL_IMAGE*                                  pImage)
+ICD_EXPORT VK_RESULT VKAPI vkCreateImage(
+    VK_DEVICE                                  device,
+    const VK_IMAGE_CREATE_INFO*                pCreateInfo,
+    VK_IMAGE*                                  pImage)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -212,27 +212,27 @@
             (struct intel_img **) pImage);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetImageSubresourceInfo(
-    XGL_IMAGE                                   image,
-    const XGL_IMAGE_SUBRESOURCE*                pSubresource,
-    XGL_SUBRESOURCE_INFO_TYPE                   infoType,
+ICD_EXPORT VK_RESULT VKAPI vkGetImageSubresourceInfo(
+    VK_IMAGE                                   image,
+    const VK_IMAGE_SUBRESOURCE*                pSubresource,
+    VK_SUBRESOURCE_INFO_TYPE                   infoType,
     size_t*                                     pDataSize,
     void*                                       pData)
 {
     const struct intel_img *img = intel_img(image);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (infoType) {
-    case XGL_INFO_TYPE_SUBRESOURCE_LAYOUT:
+    case VK_INFO_TYPE_SUBRESOURCE_LAYOUT:
         {
-            XGL_SUBRESOURCE_LAYOUT *layout = (XGL_SUBRESOURCE_LAYOUT *) pData;
+            VK_SUBRESOURCE_LAYOUT *layout = (VK_SUBRESOURCE_LAYOUT *) pData;
             unsigned x, y;
 
             intel_layout_get_slice_pos(&img->layout, pSubresource->mipLevel,
                     pSubresource->arraySlice, &x, &y);
             intel_layout_pos_to_mem(&img->layout, x, y, &x, &y);
 
-            *pDataSize = sizeof(XGL_SUBRESOURCE_LAYOUT);
+            *pDataSize = sizeof(VK_SUBRESOURCE_LAYOUT);
 
             if (pData == NULL)
                 return ret;
@@ -245,7 +245,7 @@
         }
         break;
     default:
-        ret = XGL_ERROR_INVALID_VALUE;
+        ret = VK_ERROR_INVALID_VALUE;
         break;
     }
 
diff --git a/icd/intel/img.h b/icd/intel/img.h
index 30289f9..7488696 100644
--- a/icd/intel/img.h
+++ b/icd/intel/img.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -36,12 +36,12 @@
 struct intel_img {
     struct intel_obj obj;
 
-    XGL_IMAGE_TYPE type;
+    VK_IMAGE_TYPE type;
     int32_t depth;
     uint32_t mip_levels;
     uint32_t array_size;
-    XGL_FLAGS usage;
-    XGL_IMAGE_FORMAT_CLASS format_class;  // should this be integrated into intel_layout?
+    VK_FLAGS usage;
+    VK_IMAGE_FORMAT_CLASS format_class;  // should this be integrated into intel_layout?
     uint32_t samples;
     struct intel_layout layout;
 
@@ -55,7 +55,7 @@
     void *wsi_data;
 };
 
-static inline struct intel_img *intel_img(XGL_IMAGE image)
+static inline struct intel_img *intel_img(VK_IMAGE image)
 {
     return (struct intel_img *) image;
 }
@@ -70,8 +70,8 @@
     return intel_img_from_base(&obj->base);
 }
 
-XGL_RESULT intel_img_create(struct intel_dev *dev,
-                            const XGL_IMAGE_CREATE_INFO *info,
+VK_RESULT intel_img_create(struct intel_dev *dev,
+                            const VK_IMAGE_CREATE_INFO *info,
                             bool scanout,
                             struct intel_img **img_ret);
 
diff --git a/icd/intel/instance.c b/icd/intel/instance.c
index d2112b1..b39774c 100644
--- a/icd/intel/instance.c
+++ b/icd/intel/instance.c
@@ -1,5 +1,5 @@
 /*
- * XGL 3-D graphics library
+ * Vulkan 3-D graphics library
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -108,7 +108,7 @@
     icd_instance_destroy(icd);
 }
 
-static struct intel_instance *intel_instance_create(const XGL_INSTANCE_CREATE_INFO* info)
+static struct intel_instance *intel_instance_create(const VK_INSTANCE_CREATE_INFO* info)
 {
     struct intel_instance *instance;
     struct icd_instance *icd;
@@ -121,14 +121,14 @@
         return NULL;
 
     instance = icd_instance_alloc(icd, sizeof(*instance), 0,
-            XGL_SYSTEM_ALLOC_API_OBJECT);
+            VK_SYSTEM_ALLOC_API_OBJECT);
     if (!instance) {
         icd_instance_destroy(icd);
         return NULL;
     }
 
     memset(instance, 0, sizeof(*instance));
-    intel_handle_init(&instance->handle, XGL_DBG_OBJECT_INSTANCE, icd);
+    intel_handle_init(&instance->handle, VK_DBG_OBJECT_INSTANCE, icd);
 
     instance->icd = icd;
 
@@ -142,47 +142,47 @@
     return instance;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateInstance(
-    const XGL_INSTANCE_CREATE_INFO*             pCreateInfo,
-    XGL_INSTANCE*                               pInstance)
+ICD_EXPORT VK_RESULT VKAPI vkCreateInstance(
+    const VK_INSTANCE_CREATE_INFO*             pCreateInfo,
+    VK_INSTANCE*                               pInstance)
 {
     struct intel_instance *instance;
 
     instance = intel_instance_create(pCreateInfo);
     if (!instance)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
-    *pInstance = (XGL_INSTANCE) instance;
+    *pInstance = (VK_INSTANCE) instance;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDestroyInstance(
-    XGL_INSTANCE                                pInstance)
+ICD_EXPORT VK_RESULT VKAPI vkDestroyInstance(
+    VK_INSTANCE                                pInstance)
 {
     struct intel_instance *instance = intel_instance(pInstance);
 
     intel_instance_destroy(instance);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglEnumerateGpus(
-    XGL_INSTANCE                                instance_,
+ICD_EXPORT VK_RESULT VKAPI vkEnumerateGpus(
+    VK_INSTANCE                                instance_,
     uint32_t                                    maxGpus,
     uint32_t*                                   pGpuCount,
-    XGL_PHYSICAL_GPU*                           pGpus)
+    VK_PHYSICAL_GPU*                           pGpus)
 {
     struct intel_instance *instance = intel_instance(instance_);
     struct icd_drm_device *devices, *dev;
-    XGL_RESULT ret;
+    VK_RESULT ret;
     uint32_t count;
 
     intel_instance_remove_gpus(instance);
 
     if (!maxGpus) {
         *pGpuCount = 0;
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
     }
 
     devices = icd_drm_enumerate(instance->icd, 0x8086);
@@ -203,10 +203,10 @@
         devid = (intel_devid_override) ? intel_devid_override : dev->devid;
         ret = intel_gpu_create(instance, devid,
                 primary_node, render_node, &gpu);
-        if (ret == XGL_SUCCESS) {
+        if (ret == VK_SUCCESS) {
             intel_instance_add_gpu(instance, gpu);
 
-            pGpus[count++] = (XGL_PHYSICAL_GPU) gpu;
+            pGpus[count++] = (VK_PHYSICAL_GPU) gpu;
             if (count >= maxGpus)
                 break;
         }
@@ -218,12 +218,12 @@
 
     *pGpuCount = count;
 
-    return (count > 0) ? XGL_SUCCESS : XGL_ERROR_UNAVAILABLE;
+    return (count > 0) ? VK_SUCCESS : VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgRegisterMsgCallback(
-    XGL_INSTANCE                                instance_,
-    XGL_DBG_MSG_CALLBACK_FUNCTION               pfnMsgCallback,
+ICD_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(
+    VK_INSTANCE                                instance_,
+    VK_DBG_MSG_CALLBACK_FUNCTION               pfnMsgCallback,
     void*                                       pUserData)
 {
     struct intel_instance *instance = intel_instance(instance_);
@@ -231,36 +231,36 @@
     return icd_instance_add_logger(instance->icd, pfnMsgCallback, pUserData);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgUnregisterMsgCallback(
-    XGL_INSTANCE                                instance_,
-    XGL_DBG_MSG_CALLBACK_FUNCTION               pfnMsgCallback)
+ICD_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(
+    VK_INSTANCE                                instance_,
+    VK_DBG_MSG_CALLBACK_FUNCTION               pfnMsgCallback)
 {
     struct intel_instance *instance = intel_instance(instance_);
 
     return icd_instance_remove_logger(instance->icd, pfnMsgCallback);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetGlobalOption(
-    XGL_INSTANCE                                instance_,
-    XGL_DBG_GLOBAL_OPTION                       dbgOption,
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(
+    VK_INSTANCE                                instance_,
+    VK_DBG_GLOBAL_OPTION                       dbgOption,
     size_t                                      dataSize,
     const void*                                 pData)
 {
     struct intel_instance *instance = intel_instance(instance_);
-    XGL_RESULT res = XGL_SUCCESS;
+    VK_RESULT res = VK_SUCCESS;
 
     if (dataSize == 0)
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
 
     switch (dbgOption) {
-    case XGL_DBG_OPTION_DEBUG_ECHO_ENABLE:
-    case XGL_DBG_OPTION_BREAK_ON_ERROR:
-    case XGL_DBG_OPTION_BREAK_ON_WARNING:
+    case VK_DBG_OPTION_DEBUG_ECHO_ENABLE:
+    case VK_DBG_OPTION_BREAK_ON_ERROR:
+    case VK_DBG_OPTION_BREAK_ON_WARNING:
         res = icd_instance_set_bool(instance->icd, dbgOption,
                 *((const bool *) pData));
         break;
     default:
-        res = XGL_ERROR_INVALID_VALUE;
+        res = VK_ERROR_INVALID_VALUE;
         break;
     }
 
diff --git a/icd/intel/instance.h b/icd/intel/instance.h
index 474e0b5..b61c2ef 100644
--- a/icd/intel/instance.h
+++ b/icd/intel/instance.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2015 LunarG, Inc.
  *
@@ -41,7 +41,7 @@
     bool exts[INTEL_EXT_COUNT];
 };
 
-static inline struct intel_instance *intel_instance(XGL_INSTANCE instance)
+static inline struct intel_instance *intel_instance(VK_INSTANCE instance)
 {
     return (struct intel_instance *) instance;
 }
diff --git a/icd/intel/intel.h b/icd/intel/intel.h
index 5d0bb72..89f9476 100644
--- a/icd/intel/intel.h
+++ b/icd/intel/intel.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -36,10 +36,10 @@
 #include <string.h>
 #include <assert.h>
 
-#include <xgl.h>
-#include <xglDbg.h>
-#include <xglWsiX11Ext.h>
-#include <xglIcd.h>
+#include <vulkan.h>
+#include <vkDbg.h>
+#include <vkWsiX11Ext.h>
+#include <vkIcd.h>
 
 #include "icd.h"
 #include "icd-spv.h"
@@ -47,7 +47,7 @@
 #include "icd-instance.h"
 #include "icd-utils.h"
 
-#define INTEL_API_VERSION XGL_API_VERSION
+#define INTEL_API_VERSION VK_API_VERSION
 #define INTEL_DRIVER_VERSION 0
 
 #define INTEL_GEN(gen) ((int) ((gen) * 100))
@@ -78,7 +78,7 @@
 static const uint32_t intel_handle_magic = 0x494e544c;
 
 static inline void intel_handle_init(struct intel_handle *handle,
-                                     XGL_DBG_OBJECT_TYPE type,
+                                     VK_DBG_OBJECT_TYPE type,
                                      const struct icd_instance *icd)
 {
     set_loader_magic_value(handle);
@@ -97,7 +97,7 @@
     const uint32_t handle_type =
         ((const struct intel_handle *) handle)->magic - intel_handle_magic;
 
-    return (handle_type <= XGL_DBG_OBJECT_TYPE_END_RANGE);
+    return (handle_type <= VK_DBG_OBJECT_TYPE_END_RANGE);
 }
 
 /**
@@ -106,7 +106,7 @@
  * \see intel_handle_validate().
  */
 static inline bool intel_handle_validate_type(const void *handle,
-                                              XGL_DBG_OBJECT_TYPE type)
+                                              VK_DBG_OBJECT_TYPE type)
 {
     const uint32_t handle_type =
         ((const struct intel_handle *) handle)->magic - intel_handle_magic;
@@ -116,7 +116,7 @@
 
 static inline void *intel_alloc(const void *handle,
                                 size_t size, size_t alignment,
-                                XGL_SYSTEM_ALLOC_TYPE type)
+                                VK_SYSTEM_ALLOC_TYPE type)
 {
     assert(intel_handle_validate(handle));
     return icd_instance_alloc(((const struct intel_handle *) handle)->icd,
@@ -130,9 +130,9 @@
 }
 
 static inline void intel_logv(const void *handle,
-                              XGL_DBG_MSG_TYPE msg_type,
-                              XGL_VALIDATION_LEVEL validation_level,
-                              XGL_BASE_OBJECT src_object,
+                              VK_DBG_MSG_TYPE msg_type,
+                              VK_VALIDATION_LEVEL validation_level,
+                              VK_BASE_OBJECT src_object,
                               size_t location, int32_t msg_code,
                               const char *format, va_list ap)
 {
@@ -149,9 +149,9 @@
 }
 
 static inline void intel_log(const void *handle,
-                             XGL_DBG_MSG_TYPE msg_type,
-                             XGL_VALIDATION_LEVEL validation_level,
-                             XGL_BASE_OBJECT src_object,
+                             VK_DBG_MSG_TYPE msg_type,
+                             VK_VALIDATION_LEVEL validation_level,
+                             VK_BASE_OBJECT src_object,
                              size_t location, int32_t msg_code,
                              const char *format, ...)
 {
diff --git a/icd/intel/kmd/winsys_drm.c b/icd/intel/kmd/winsys_drm.c
index 92d6a9e..349cc88 100644
--- a/icd/intel/kmd/winsys_drm.c
+++ b/icd/intel/kmd/winsys_drm.c
@@ -168,7 +168,7 @@
    struct intel_winsys *winsys;
 
    winsys = icd_instance_alloc(instance, sizeof(*winsys), 0,
-           XGL_SYSTEM_ALLOC_INTERNAL);
+           VK_SYSTEM_ALLOC_INTERNAL);
    if (!winsys)
       return NULL;
 
diff --git a/icd/intel/layout.c b/icd/intel/layout.c
index 44114f1..78f2868 100644
--- a/icd/intel/layout.c
+++ b/icd/intel/layout.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -46,7 +46,7 @@
    struct intel_dev *dev;
 
    const struct intel_gpu *gpu;
-   const XGL_IMAGE_CREATE_INFO *info;
+   const VK_IMAGE_CREATE_INFO *info;
    bool scanout;
 
    bool compressed;
@@ -60,7 +60,7 @@
                       const struct intel_layout_params *params,
                       unsigned level, unsigned *width, unsigned *height)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    unsigned w, h;
 
    w = u_minify(layout->width0, level);
@@ -161,7 +161,7 @@
 layout_get_num_layers(const struct intel_layout *layout,
                       const struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    unsigned num_layers = info->arraySize;
 
    /* samples of the same index are stored in a layer */
@@ -175,7 +175,7 @@
 layout_init_layer_height(struct intel_layout *layout,
                          struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    unsigned num_layers;
 
    if (layout->walk != INTEL_LAYOUT_WALK_LAYER)
@@ -233,7 +233,7 @@
 layout_init_lods(struct intel_layout *layout,
                  struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    unsigned cur_x, cur_y;
    unsigned lv;
 
@@ -259,7 +259,7 @@
 
          /* every LOD begins at tile boundaries */
          if (info->mipLevels > 1) {
-            assert(layout->format == XGL_FMT_S8_UINT);
+            assert(layout->format == VK_FMT_S8_UINT);
             cur_x = u_align(cur_x, 64);
             cur_y = u_align(cur_y, 64);
          }
@@ -306,7 +306,7 @@
 layout_init_alignments(struct intel_layout *layout,
                        struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
 
    /*
     * From the Sandy Bridge PRM, volume 1 part 1, page 113:
@@ -400,14 +400,14 @@
       /* this happens to be the case */
       layout->align_i = layout->block_width;
       layout->align_j = layout->block_height;
-   } else if (info->usage & XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
+   } else if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
       if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7)) {
          switch (layout->format) {
-         case XGL_FMT_D16_UNORM:
+         case VK_FMT_D16_UNORM:
             layout->align_i = 8;
             layout->align_j = 4;
             break;
-         case XGL_FMT_S8_UINT:
+         case VK_FMT_S8_UINT:
             layout->align_i = 8;
             layout->align_j = 8;
             break;
@@ -418,7 +418,7 @@
          }
       } else {
          switch (layout->format) {
-         case XGL_FMT_S8_UINT:
+         case VK_FMT_S8_UINT:
             layout->align_i = 4;
             layout->align_j = 2;
             break;
@@ -434,11 +434,11 @@
          (intel_gpu_gen(params->gpu) >= INTEL_GEN(8)) ||
          (intel_gpu_gen(params->gpu) >= INTEL_GEN(7) &&
           layout->tiling == GEN6_TILING_Y &&
-          (info->usage & XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT));
+          (info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT));
 
       if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7) &&
           intel_gpu_gen(params->gpu) <= INTEL_GEN(7.5) && valign_4)
-         assert(layout->format != XGL_FMT_R32G32B32_SFLOAT);
+         assert(layout->format != VK_FMT_R32G32B32_SFLOAT);
 
       layout->align_i = 4;
       layout->align_j = (valign_4) ? 4 : 2;
@@ -464,8 +464,8 @@
 layout_get_valid_tilings(const struct intel_layout *layout,
                          const struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
-   const XGL_FORMAT format = layout->format;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
+   const VK_FORMAT format = layout->format;
    unsigned valid_tilings = LAYOUT_TILING_ALL;
 
    /*
@@ -477,7 +477,7 @@
    if (params->scanout)
        valid_tilings &= LAYOUT_TILING_X;
 
-   if (info->tiling == XGL_LINEAR_TILING)
+   if (info->tiling == VK_LINEAR_TILING)
        valid_tilings &= LAYOUT_TILING_NONE;
 
    /*
@@ -492,9 +492,9 @@
     *
     *     "W-Major Tile Format is used for separate stencil."
     */
-   if (info->usage & XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
+   if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
       switch (format) {
-      case XGL_FMT_S8_UINT:
+      case VK_FMT_S8_UINT:
          valid_tilings &= LAYOUT_TILING_W;
          break;
       default:
@@ -503,7 +503,7 @@
       }
    }
 
-   if (info->usage & XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
+   if (info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
       /*
        * From the Sandy Bridge PRM, volume 1 part 2, page 32:
        *
@@ -528,13 +528,13 @@
        */
       if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7) &&
           intel_gpu_gen(params->gpu) <= INTEL_GEN(7.5) &&
-          layout->format == XGL_FMT_R32G32B32_SFLOAT)
+          layout->format == VK_FMT_R32G32B32_SFLOAT)
          valid_tilings &= ~LAYOUT_TILING_Y;
 
       valid_tilings &= ~LAYOUT_TILING_W;
    }
 
-   if (info->usage & XGL_IMAGE_USAGE_SHADER_ACCESS_READ_BIT) {
+   if (info->usage & VK_IMAGE_USAGE_SHADER_ACCESS_READ_BIT) {
       if (intel_gpu_gen(params->gpu) < INTEL_GEN(8))
          valid_tilings &= ~LAYOUT_TILING_W;
    }
@@ -549,7 +549,7 @@
 layout_init_tiling(struct intel_layout *layout,
                    struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    unsigned preferred_tilings;
 
    layout->valid_tilings = layout_get_valid_tilings(layout, params);
@@ -560,8 +560,8 @@
    if (preferred_tilings & ~LAYOUT_TILING_W)
       preferred_tilings &= ~LAYOUT_TILING_W;
 
-   if (info->usage & (XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
-                      XGL_IMAGE_USAGE_SHADER_ACCESS_READ_BIT)) {
+   if (info->usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+                      VK_IMAGE_USAGE_SHADER_ACCESS_READ_BIT)) {
       /*
        * heuristically set a minimum width/height for enabling tiling
        */
@@ -593,7 +593,7 @@
 layout_init_walk_gen7(struct intel_layout *layout,
                               struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
 
    /*
     * It is not explicitly states, but render targets are expected to be
@@ -602,14 +602,14 @@
     *
     * See "Multisampled Surface Storage Format" field of SURFACE_STATE.
     */
-   if (info->usage & XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
+   if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
       /*
        * From the Ivy Bridge PRM, volume 1 part 1, page 111:
        *
        *     "note that the depth buffer and stencil buffer have an implied
        *      value of ARYSPC_FULL"
        */
-      layout->walk = (info->imageType == XGL_IMAGE_3D) ?
+      layout->walk = (info->imageType == VK_IMAGE_3D) ?
          INTEL_LAYOUT_WALK_3D : INTEL_LAYOUT_WALK_LAYER;
 
       layout->interleaved_samples = true;
@@ -628,7 +628,7 @@
          assert(info->mipLevels == 1);
 
       layout->walk =
-         (info->imageType == XGL_IMAGE_3D) ? INTEL_LAYOUT_WALK_3D :
+         (info->imageType == VK_IMAGE_3D) ? INTEL_LAYOUT_WALK_3D :
          (info->mipLevels > 1) ? INTEL_LAYOUT_WALK_LAYER :
          INTEL_LAYOUT_WALK_LOD;
 
@@ -652,8 +652,8 @@
     * GEN6 does not support compact spacing otherwise.
     */
    layout->walk =
-      (params->info->imageType == XGL_IMAGE_3D) ? INTEL_LAYOUT_WALK_3D :
-      (layout->format == XGL_FMT_S8_UINT) ? INTEL_LAYOUT_WALK_LOD :
+      (params->info->imageType == VK_IMAGE_3D) ? INTEL_LAYOUT_WALK_3D :
+      (layout->format == VK_FMT_S8_UINT) ? INTEL_LAYOUT_WALK_LOD :
       INTEL_LAYOUT_WALK_LAYER;
 
    /* GEN6 supports only interleaved samples */
@@ -674,8 +674,8 @@
 layout_init_size_and_format(struct intel_layout *layout,
                             struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
-   XGL_FORMAT format = info->format;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
+   VK_FORMAT format = info->format;
    bool require_separate_stencil = false;
 
    layout->width0 = info->extent.width;
@@ -689,7 +689,7 @@
     *
     * GEN7+ requires separate stencil buffers.
     */
-   if (info->usage & XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
+   if (info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_BIT) {
       if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7))
          require_separate_stencil = true;
       else
@@ -697,15 +697,15 @@
    }
 
    switch (format) {
-   case XGL_FMT_D24_UNORM_S8_UINT:
+   case VK_FMT_D24_UNORM_S8_UINT:
       if (require_separate_stencil) {
-         format = XGL_FMT_D24_UNORM;
+         format = VK_FMT_D24_UNORM;
          layout->separate_stencil = true;
       }
       break;
-   case XGL_FMT_D32_SFLOAT_S8_UINT:
+   case VK_FMT_D32_SFLOAT_S8_UINT:
       if (require_separate_stencil) {
-         format = XGL_FMT_D32_SFLOAT;
+         format = VK_FMT_D32_SFLOAT;
          layout->separate_stencil = true;
       }
       break;
@@ -725,15 +725,15 @@
 layout_want_mcs(struct intel_layout *layout,
                 struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    bool want_mcs = false;
 
    /* MCS is for RT on GEN7+ */
    if (intel_gpu_gen(params->gpu) < INTEL_GEN(7))
       return false;
 
-   if (info->imageType != XGL_IMAGE_2D ||
-       !(info->usage & XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
+   if (info->imageType != VK_IMAGE_2D ||
+       !(info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
       return false;
 
    /*
@@ -784,12 +784,12 @@
 layout_want_hiz(const struct intel_layout *layout,
                 const struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
 
    if (intel_debug & INTEL_DEBUG_NOHIZ)
        return false;
 
-   if (!(info->usage & XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT))
+   if (!(info->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_BIT))
       return false;
 
    if (!intel_format_has_depth(params->gpu, info->format))
@@ -819,7 +819,7 @@
 static void
 layout_align(struct intel_layout *layout, struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    int align_w = 1, align_h = 1, pad_h = 0;
 
    /*
@@ -844,14 +844,14 @@
     *      padding purposes. The value of 4 for j still applies for mip level
     *      alignment and QPitch calculation."
     */
-   if (info->usage & XGL_IMAGE_USAGE_SHADER_ACCESS_READ_BIT) {
+   if (info->usage & VK_IMAGE_USAGE_SHADER_ACCESS_READ_BIT) {
       if (align_w < layout->align_i)
           align_w = layout->align_i;
       if (align_h < layout->align_j)
           align_h = layout->align_j;
 
       /* in case it is used as a cube */
-      if (info->imageType == XGL_IMAGE_2D)
+      if (info->imageType == VK_IMAGE_2D)
          pad_h += 2;
 
       if (params->compressed && align_h < layout->align_j * 2)
@@ -864,7 +864,7 @@
     *     "If the surface contains an odd number of rows of data, a final row
     *      below the surface must be allocated."
     */
-   if ((info->usage & XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && align_h < 2)
+   if ((info->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && align_h < 2)
       align_h = 2;
 
    /*
@@ -911,7 +911,7 @@
        *      required above."
        */
       if (intel_gpu_gen(params->gpu) >= INTEL_GEN(7.5) &&
-          (params->info->usage & XGL_IMAGE_USAGE_SHADER_ACCESS_READ_BIT) &&
+          (params->info->usage & VK_IMAGE_USAGE_SHADER_ACCESS_READ_BIT) &&
           layout->tiling == GEN6_TILING_NONE)
          h += (64 + layout->bo_stride - 1) / layout->bo_stride;
 
@@ -1000,7 +1000,7 @@
 layout_calculate_hiz_size(struct intel_layout *layout,
                           struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    const unsigned hz_align_j = 8;
    enum intel_layout_walk_type hz_walk;
    unsigned hz_width, hz_height, lv;
@@ -1164,7 +1164,7 @@
 layout_calculate_mcs_size(struct intel_layout *layout,
                           struct intel_layout_params *params)
 {
-   const XGL_IMAGE_CREATE_INFO *info = params->info;
+   const VK_IMAGE_CREATE_INFO *info = params->info;
    int mcs_width, mcs_height, mcs_cpp;
    int downscale_x, downscale_y;
 
@@ -1292,7 +1292,7 @@
  */
 void intel_layout_init(struct intel_layout *layout,
                        struct intel_dev *dev,
-                       const XGL_IMAGE_CREATE_INFO *info,
+                       const VK_IMAGE_CREATE_INFO *info,
                        bool scanout)
 {
    struct intel_layout_params params;
diff --git a/icd/intel/layout.h b/icd/intel/layout.h
index f88feb1..0732117 100644
--- a/icd/intel/layout.h
+++ b/icd/intel/layout.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -84,7 +84,7 @@
    /* physical width0, height0, and format */
    unsigned width0;
    unsigned height0;
-   XGL_FORMAT format;
+   VK_FORMAT format;
    bool separate_stencil;
 
    /*
@@ -126,7 +126,7 @@
 
 void intel_layout_init(struct intel_layout *layout,
                        struct intel_dev *dev,
-                       const XGL_IMAGE_CREATE_INFO *info,
+                       const VK_IMAGE_CREATE_INFO *info,
                        bool scanout);
 
 /**
diff --git a/icd/intel/mem.c b/icd/intel/mem.c
index 7feddc3..833bf98 100644
--- a/icd/intel/mem.c
+++ b/icd/intel/mem.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -28,8 +28,8 @@
 #include "dev.h"
 #include "mem.h"
 
-XGL_RESULT intel_mem_alloc(struct intel_dev *dev,
-                           const XGL_MEMORY_ALLOC_INFO *info,
+VK_RESULT intel_mem_alloc(struct intel_dev *dev,
+                           const VK_MEMORY_ALLOC_INFO *info,
                            struct intel_mem **mem_ret)
 {
     struct intel_mem *mem;
@@ -37,22 +37,22 @@
     /* ignore any IMAGE_INFO and BUFFER_INFO usage: they don't alter allocations */
 
     mem = (struct intel_mem *) intel_base_create(&dev->base.handle,
-            sizeof(*mem), dev->base.dbg, XGL_DBG_OBJECT_GPU_MEMORY, info, 0);
+            sizeof(*mem), dev->base.dbg, VK_DBG_OBJECT_GPU_MEMORY, info, 0);
     if (!mem)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     mem->bo = intel_winsys_alloc_bo(dev->winsys,
-            "xgl-gpu-memory", info->allocationSize, 0);
+            "vk-gpu-memory", info->allocationSize, 0);
     if (!mem->bo) {
         intel_mem_free(mem);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
     mem->size = info->allocationSize;
 
     *mem_ret = mem;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_mem_free(struct intel_mem *mem)
@@ -62,7 +62,7 @@
     intel_base_destroy(&mem->base);
 }
 
-XGL_RESULT intel_mem_import_userptr(struct intel_dev *dev,
+VK_RESULT intel_mem_import_userptr(struct intel_dev *dev,
                                     const void *userptr,
                                     size_t size,
                                     struct intel_mem **mem_ret)
@@ -71,66 +71,66 @@
     struct intel_mem *mem;
 
     if ((uintptr_t) userptr % alignment || size % alignment)
-        return XGL_ERROR_INVALID_ALIGNMENT;
+        return VK_ERROR_INVALID_ALIGNMENT;
 
     mem = (struct intel_mem *) intel_base_create(&dev->base.handle,
-            sizeof(*mem), dev->base.dbg, XGL_DBG_OBJECT_GPU_MEMORY, NULL, 0);
+            sizeof(*mem), dev->base.dbg, VK_DBG_OBJECT_GPU_MEMORY, NULL, 0);
     if (!mem)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     mem->bo = intel_winsys_import_userptr(dev->winsys,
-            "xgl-gpu-memory-userptr", (void *) userptr, size, 0);
+            "vk-gpu-memory-userptr", (void *) userptr, size, 0);
     if (!mem->bo) {
         intel_mem_free(mem);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
     mem->size = size;
 
     *mem_ret = mem;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_mem_set_priority(struct intel_mem *mem,
-                                  XGL_MEMORY_PRIORITY priority)
+VK_RESULT intel_mem_set_priority(struct intel_mem *mem,
+                                  VK_MEMORY_PRIORITY priority)
 {
-    /* pin the bo when XGL_MEMORY_PRIORITY_VERY_HIGH? */
-    return XGL_SUCCESS;
+    /* pin the bo when VK_MEMORY_PRIORITY_VERY_HIGH? */
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglAllocMemory(
-    XGL_DEVICE                                  device,
-    const XGL_MEMORY_ALLOC_INFO*                pAllocInfo,
-    XGL_GPU_MEMORY*                             pMem)
+ICD_EXPORT VK_RESULT VKAPI vkAllocMemory(
+    VK_DEVICE                                  device,
+    const VK_MEMORY_ALLOC_INFO*                pAllocInfo,
+    VK_GPU_MEMORY*                             pMem)
 {
     struct intel_dev *dev = intel_dev(device);
 
     return intel_mem_alloc(dev, pAllocInfo, (struct intel_mem **) pMem);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglFreeMemory(
-    XGL_GPU_MEMORY                              mem_)
+ICD_EXPORT VK_RESULT VKAPI vkFreeMemory(
+    VK_GPU_MEMORY                              mem_)
 {
     struct intel_mem *mem = intel_mem(mem_);
 
     intel_mem_free(mem);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglSetMemoryPriority(
-    XGL_GPU_MEMORY                              mem_,
-    XGL_MEMORY_PRIORITY                         priority)
+ICD_EXPORT VK_RESULT VKAPI vkSetMemoryPriority(
+    VK_GPU_MEMORY                              mem_,
+    VK_MEMORY_PRIORITY                         priority)
 {
     struct intel_mem *mem = intel_mem(mem_);
 
     return intel_mem_set_priority(mem, priority);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglMapMemory(
-    XGL_GPU_MEMORY                              mem_,
-    XGL_FLAGS                                   flags,
+ICD_EXPORT VK_RESULT VKAPI vkMapMemory(
+    VK_GPU_MEMORY                              mem_,
+    VK_FLAGS                                   flags,
     void**                                      ppData)
 {
     struct intel_mem *mem = intel_mem(mem_);
@@ -138,24 +138,24 @@
 
     *ppData = ptr;
 
-    return (ptr) ? XGL_SUCCESS : XGL_ERROR_UNKNOWN;
+    return (ptr) ? VK_SUCCESS : VK_ERROR_UNKNOWN;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglUnmapMemory(
-    XGL_GPU_MEMORY                              mem_)
+ICD_EXPORT VK_RESULT VKAPI vkUnmapMemory(
+    VK_GPU_MEMORY                              mem_)
 {
     struct intel_mem *mem = intel_mem(mem_);
 
     intel_mem_unmap(mem);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglPinSystemMemory(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkPinSystemMemory(
+    VK_DEVICE                                  device,
     const void*                                 pSysMem,
     size_t                                      memSize,
-    XGL_GPU_MEMORY*                             pMem)
+    VK_GPU_MEMORY*                             pMem)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -163,18 +163,18 @@
             (struct intel_mem **) pMem);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglOpenSharedMemory(
-    XGL_DEVICE                                  device,
-    const XGL_MEMORY_OPEN_INFO*                 pOpenInfo,
-    XGL_GPU_MEMORY*                             pMem)
+ICD_EXPORT VK_RESULT VKAPI vkOpenSharedMemory(
+    VK_DEVICE                                  device,
+    const VK_MEMORY_OPEN_INFO*                 pOpenInfo,
+    VK_GPU_MEMORY*                             pMem)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglOpenPeerMemory(
-    XGL_DEVICE                                  device,
-    const XGL_PEER_MEMORY_OPEN_INFO*            pOpenInfo,
-    XGL_GPU_MEMORY*                             pMem)
+ICD_EXPORT VK_RESULT VKAPI vkOpenPeerMemory(
+    VK_DEVICE                                  device,
+    const VK_PEER_MEMORY_OPEN_INFO*            pOpenInfo,
+    VK_GPU_MEMORY*                             pMem)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
diff --git a/icd/intel/mem.h b/icd/intel/mem.h
index 47639b2..f81468c 100644
--- a/icd/intel/mem.h
+++ b/icd/intel/mem.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -36,23 +36,23 @@
     struct intel_base base;
 
     struct intel_bo *bo;
-    XGL_GPU_SIZE size;
+    VK_GPU_SIZE size;
 };
 
-XGL_RESULT intel_mem_alloc(struct intel_dev *dev,
-                           const XGL_MEMORY_ALLOC_INFO *info,
+VK_RESULT intel_mem_alloc(struct intel_dev *dev,
+                           const VK_MEMORY_ALLOC_INFO *info,
                            struct intel_mem **mem_ret);
 void intel_mem_free(struct intel_mem *mem);
 
-XGL_RESULT intel_mem_import_userptr(struct intel_dev *dev,
+VK_RESULT intel_mem_import_userptr(struct intel_dev *dev,
                                     const void *userptr,
                                     size_t size,
                                     struct intel_mem **mem_ret);
 
-XGL_RESULT intel_mem_set_priority(struct intel_mem *mem,
-                                  XGL_MEMORY_PRIORITY priority);
+VK_RESULT intel_mem_set_priority(struct intel_mem *mem,
+                                  VK_MEMORY_PRIORITY priority);
 
-static inline void *intel_mem_map(struct intel_mem *mem, XGL_FLAGS flags)
+static inline void *intel_mem_map(struct intel_mem *mem, VK_FLAGS flags)
 {
     return intel_bo_map_async(mem->bo);
 }
@@ -72,7 +72,7 @@
     return intel_bo_is_busy(mem->bo);
 }
 
-static inline struct intel_mem *intel_mem(XGL_GPU_MEMORY mem)
+static inline struct intel_mem *intel_mem(VK_GPU_MEMORY mem)
 {
     return (struct intel_mem *) mem;
 }
diff --git a/icd/intel/obj.c b/icd/intel/obj.c
index edb04e4..26a5dbd 100644
--- a/icd/intel/obj.c
+++ b/icd/intel/obj.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -30,48 +30,48 @@
 #include "mem.h"
 #include "obj.h"
 
-XGL_RESULT intel_base_get_info(struct intel_base *base, int type,
+VK_RESULT intel_base_get_info(struct intel_base *base, int type,
                                size_t *size, void *data)
 {
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
     size_t s;
     uint32_t *count;
 
     switch (type) {
-    case XGL_INFO_TYPE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
         {
-            XGL_MEMORY_REQUIREMENTS *mem_req = data;
-            s = sizeof(XGL_MEMORY_REQUIREMENTS);
+            VK_MEMORY_REQUIREMENTS *mem_req = data;
+            s = sizeof(VK_MEMORY_REQUIREMENTS);
             *size = s;
             if (data == NULL)
                 return ret;
             memset(data, 0, s);
-            mem_req->memType =  XGL_MEMORY_TYPE_OTHER;
+            mem_req->memType =  VK_MEMORY_TYPE_OTHER;
             break;
         }
-    case XGL_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
+    case VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
         *size = sizeof(uint32_t);
         if (data == NULL)
             return ret;
         count = (uint32_t *) data;
         *count = 1;
         break;
-    case XGL_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS:
-        s = sizeof(XGL_IMAGE_MEMORY_REQUIREMENTS);
+    case VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS:
+        s = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
         *size = s;
         if (data == NULL)
             return ret;
         memset(data, 0, s);
         break;
-    case XGL_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
-        s = sizeof(XGL_BUFFER_MEMORY_REQUIREMENTS);
+    case VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
+        s = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
         *size = s;
         if (data == NULL)
             return ret;
         memset(data, 0, s);
         break;
     default:
-        ret = XGL_ERROR_INVALID_VALUE;
+        ret = VK_ERROR_INVALID_VALUE;
         break;
     }
 
@@ -85,7 +85,7 @@
     const union {
         const void *ptr;
         const struct {
-            XGL_STRUCTURE_TYPE struct_type;
+            VK_STRUCTURE_TYPE struct_type;
             void *next;
         } *header;
     } info = { .ptr = create_info };
@@ -95,98 +95,98 @@
         return true;
 
     switch (dbg->type) {
-    case XGL_DBG_OBJECT_DEVICE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
+    case VK_DBG_OBJECT_DEVICE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_GPU_MEMORY:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
+    case VK_DBG_OBJECT_GPU_MEMORY:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
         break;
-    case XGL_DBG_OBJECT_EVENT:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_EVENT_CREATE_INFO);
-        shallow_copy = sizeof(XGL_EVENT_CREATE_INFO);
+    case VK_DBG_OBJECT_EVENT:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+        shallow_copy = sizeof(VK_EVENT_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_FENCE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_FENCE_CREATE_INFO);
-        shallow_copy = sizeof(XGL_FENCE_CREATE_INFO);
+    case VK_DBG_OBJECT_FENCE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
+        shallow_copy = sizeof(VK_FENCE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_QUERY_POOL:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
-        shallow_copy = sizeof(XGL_QUERY_POOL_CREATE_INFO);
+    case VK_DBG_OBJECT_QUERY_POOL:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
+        shallow_copy = sizeof(VK_QUERY_POOL_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_BUFFER:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
-        shallow_copy = sizeof(XGL_BUFFER_CREATE_INFO);
+    case VK_DBG_OBJECT_BUFFER:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
+        shallow_copy = sizeof(VK_BUFFER_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_BUFFER_VIEW:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
-        shallow_copy = sizeof(XGL_BUFFER_VIEW_CREATE_INFO);
+    case VK_DBG_OBJECT_BUFFER_VIEW:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
+        shallow_copy = sizeof(VK_BUFFER_VIEW_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_IMAGE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
-        shallow_copy = sizeof(XGL_IMAGE_CREATE_INFO);
+    case VK_DBG_OBJECT_IMAGE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
+        shallow_copy = sizeof(VK_IMAGE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_IMAGE_VIEW:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO);
-        shallow_copy = sizeof(XGL_IMAGE_VIEW_CREATE_INFO);
+    case VK_DBG_OBJECT_IMAGE_VIEW:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO);
+        shallow_copy = sizeof(VK_IMAGE_VIEW_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_COLOR_TARGET_VIEW:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO);
-        shallow_copy = sizeof(XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO);
+    case VK_DBG_OBJECT_COLOR_TARGET_VIEW:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO);
+        shallow_copy = sizeof(VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO);
-        shallow_copy = sizeof(XGL_DEPTH_STENCIL_VIEW_CREATE_INFO);
+    case VK_DBG_OBJECT_DEPTH_STENCIL_VIEW:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO);
+        shallow_copy = sizeof(VK_DEPTH_STENCIL_VIEW_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_SAMPLER:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
-        shallow_copy = sizeof(XGL_SAMPLER_CREATE_INFO);
+    case VK_DBG_OBJECT_SAMPLER:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
+        shallow_copy = sizeof(VK_SAMPLER_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_DESCRIPTOR_SET:
+    case VK_DBG_OBJECT_DESCRIPTOR_SET:
         /* no create info */
         break;
-    case XGL_DBG_OBJECT_VIEWPORT_STATE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
-        shallow_copy = sizeof(XGL_DYNAMIC_VP_STATE_CREATE_INFO);
+    case VK_DBG_OBJECT_VIEWPORT_STATE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
+        shallow_copy = sizeof(VK_DYNAMIC_VP_STATE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_RASTER_STATE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
-        shallow_copy = sizeof(XGL_DYNAMIC_RS_STATE_CREATE_INFO);
+    case VK_DBG_OBJECT_RASTER_STATE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
+        shallow_copy = sizeof(VK_DYNAMIC_RS_STATE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_COLOR_BLEND_STATE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
-        shallow_copy = sizeof(XGL_DYNAMIC_CB_STATE_CREATE_INFO);
+    case VK_DBG_OBJECT_COLOR_BLEND_STATE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
+        shallow_copy = sizeof(VK_DYNAMIC_CB_STATE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_DEPTH_STENCIL_STATE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO);
-        shallow_copy = sizeof(XGL_DYNAMIC_DS_STATE_CREATE_INFO);
+    case VK_DBG_OBJECT_DEPTH_STENCIL_STATE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO);
+        shallow_copy = sizeof(VK_DYNAMIC_DS_STATE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_CMD_BUFFER:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO);
-        shallow_copy = sizeof(XGL_CMD_BUFFER_CREATE_INFO);
+    case VK_DBG_OBJECT_CMD_BUFFER:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO);
+        shallow_copy = sizeof(VK_CMD_BUFFER_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_GRAPHICS_PIPELINE:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
+    case VK_DBG_OBJECT_GRAPHICS_PIPELINE:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_SHADER:
-        assert(info.header->struct_type == XGL_STRUCTURE_TYPE_SHADER_CREATE_INFO);
-        shallow_copy = sizeof(XGL_SHADER_CREATE_INFO);
+    case VK_DBG_OBJECT_SHADER:
+        assert(info.header->struct_type == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
+        shallow_copy = sizeof(VK_SHADER_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_FRAMEBUFFER:
-        assert(info.header->struct_type ==  XGL_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
-        shallow_copy = sizeof(XGL_FRAMEBUFFER_CREATE_INFO);
+    case VK_DBG_OBJECT_FRAMEBUFFER:
+        assert(info.header->struct_type ==  VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
+        shallow_copy = sizeof(VK_FRAMEBUFFER_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_RENDER_PASS:
-        assert(info.header->struct_type ==  XGL_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
-        shallow_copy = sizeof(XGL_RENDER_PASS_CREATE_INFO);
+    case VK_DBG_OBJECT_RENDER_PASS:
+        assert(info.header->struct_type ==  VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
+        shallow_copy = sizeof(VK_RENDER_PASS_CREATE_INFO);
         break;
-    case XGL_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT:
-        assert(info.header->struct_type ==  XGL_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
+    case VK_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT:
+        assert(info.header->struct_type ==  VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
         /* TODO */
-        shallow_copy = sizeof(XGL_DESCRIPTOR_SET_LAYOUT_CREATE_INFO) * 0;
+        shallow_copy = sizeof(VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO) * 0;
         break;
-    case XGL_DBG_OBJECT_DESCRIPTOR_POOL:
-        assert(info.header->struct_type ==  XGL_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO);
-        shallow_copy = sizeof(XGL_DESCRIPTOR_POOL_CREATE_INFO);
+    case VK_DBG_OBJECT_DESCRIPTOR_POOL:
+        assert(info.header->struct_type ==  VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO);
+        shallow_copy = sizeof(VK_DESCRIPTOR_POOL_CREATE_INFO);
         break;
     default:
         assert(!"unknown dbg object type");
@@ -196,36 +196,36 @@
 
     if (shallow_copy) {
         dbg->create_info = intel_alloc(handle, shallow_copy, 0,
-                XGL_SYSTEM_ALLOC_DEBUG);
+                VK_SYSTEM_ALLOC_DEBUG);
         if (!dbg->create_info)
             return false;
 
         memcpy(dbg->create_info, create_info, shallow_copy);
         dbg->create_info_size = shallow_copy;
     } else if (info.header->struct_type ==
-            XGL_STRUCTURE_TYPE_MEMORY_ALLOC_INFO) {
+            VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO) {
         size_t size;
-        const XGL_MEMORY_ALLOC_INFO *ptr_next, *src = info.ptr;
-        XGL_MEMORY_ALLOC_INFO *dst;
+        const VK_MEMORY_ALLOC_INFO *ptr_next, *src = info.ptr;
+        VK_MEMORY_ALLOC_INFO *dst;
         uint8_t *d;
         size = sizeof(*src);
 
         ptr_next = src->pNext;
         while (ptr_next != NULL) {
             switch (ptr_next->sType) {
-                case XGL_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO:
-                    size += sizeof(XGL_MEMORY_ALLOC_IMAGE_INFO);
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO:
+                    size += sizeof(VK_MEMORY_ALLOC_IMAGE_INFO);
                     break;
-                case XGL_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO:
-                    size += sizeof(XGL_MEMORY_ALLOC_BUFFER_INFO);
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO:
+                    size += sizeof(VK_MEMORY_ALLOC_BUFFER_INFO);
                     break;
                 default:
                     return false;
             }
-            ptr_next = (XGL_MEMORY_ALLOC_INFO *) ptr_next->pNext;
+            ptr_next = (VK_MEMORY_ALLOC_INFO *) ptr_next->pNext;
         }
         dbg->create_info_size = size;
-        dst = intel_alloc(handle, size, 0, XGL_SYSTEM_ALLOC_DEBUG);
+        dst = intel_alloc(handle, size, 0, VK_SYSTEM_ALLOC_DEBUG);
         if (!dst)
             return false;
         memcpy(dst, src, sizeof(*src));
@@ -235,24 +235,24 @@
         d += sizeof(*src);
         while (ptr_next != NULL) {
             switch (ptr_next->sType) {
-            case XGL_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO:
-                memcpy(d, ptr_next, sizeof(XGL_MEMORY_ALLOC_IMAGE_INFO));
-                d += sizeof(XGL_MEMORY_ALLOC_IMAGE_INFO);
+            case VK_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO:
+                memcpy(d, ptr_next, sizeof(VK_MEMORY_ALLOC_IMAGE_INFO));
+                d += sizeof(VK_MEMORY_ALLOC_IMAGE_INFO);
                 break;
-            case XGL_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO:
-                memcpy(d, ptr_next, sizeof(XGL_MEMORY_ALLOC_BUFFER_INFO));
-                d += sizeof(XGL_MEMORY_ALLOC_BUFFER_INFO);
+            case VK_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO:
+                memcpy(d, ptr_next, sizeof(VK_MEMORY_ALLOC_BUFFER_INFO));
+                d += sizeof(VK_MEMORY_ALLOC_BUFFER_INFO);
                 break;
             default:
                 return false;
             }
-            ptr_next = (XGL_MEMORY_ALLOC_INFO *) ptr_next->pNext;
+            ptr_next = (VK_MEMORY_ALLOC_INFO *) ptr_next->pNext;
         }
         dbg->create_info = dst;
     } else if (info.header->struct_type ==
-            XGL_STRUCTURE_TYPE_DEVICE_CREATE_INFO) {
-        const XGL_DEVICE_CREATE_INFO *src = info.ptr;
-        XGL_DEVICE_CREATE_INFO *dst;
+            VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO) {
+        const VK_DEVICE_CREATE_INFO *src = info.ptr;
+        VK_DEVICE_CREATE_INFO *dst;
         uint8_t *d;
         size_t size;
         uint32_t i;
@@ -266,7 +266,7 @@
             size += 1 + strlen(src->ppEnabledExtensionNames[i]);
         }
 
-        dst = intel_alloc(handle, size, 0, XGL_SYSTEM_ALLOC_DEBUG);
+        dst = intel_alloc(handle, size, 0, VK_SYSTEM_ALLOC_DEBUG);
         if (!dst)
             return false;
 
@@ -277,7 +277,7 @@
 
         size = sizeof(src->pRequestedQueues[0]) * src->queueRecordCount;
         memcpy(d, src->pRequestedQueues, size);
-        dst->pRequestedQueues = (const XGL_DEVICE_QUEUE_CREATE_INFO *) d;
+        dst->pRequestedQueues = (const VK_DEVICE_QUEUE_CREATE_INFO *) d;
         d += size;
 
         size = sizeof(src->ppEnabledExtensionNames[0]) * src->extensionCount;
@@ -292,7 +292,7 @@
             size += len + 1;
         }
         dbg->create_info = dst;
-    } else if (info.header->struct_type == XGL_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO) {
+    } else if (info.header->struct_type == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO) {
         // TODO: What do we want to copy here?
     }
 
@@ -304,7 +304,7 @@
  * size is allocated and zeroed.
  */
 struct intel_base_dbg *intel_base_dbg_create(const struct intel_handle *handle,
-                                             XGL_DBG_OBJECT_TYPE type,
+                                             VK_DBG_OBJECT_TYPE type,
                                              const void *create_info,
                                              size_t dbg_size)
 {
@@ -315,7 +315,7 @@
 
     assert(dbg_size >= sizeof(*dbg));
 
-    dbg = intel_alloc(handle, dbg_size, 0, XGL_SYSTEM_ALLOC_DEBUG);
+    dbg = intel_alloc(handle, dbg_size, 0, VK_SYSTEM_ALLOC_DEBUG);
     if (!dbg)
         return NULL;
 
@@ -349,7 +349,7 @@
  */
 struct intel_base *intel_base_create(const struct intel_handle *handle,
                                      size_t obj_size, bool debug,
-                                     XGL_DBG_OBJECT_TYPE type,
+                                     VK_DBG_OBJECT_TYPE type,
                                      const void *create_info,
                                      size_t dbg_size)
 {
@@ -360,7 +360,7 @@
 
     assert(obj_size >= sizeof(*base));
 
-    base = intel_alloc(handle, obj_size, 0, XGL_SYSTEM_ALLOC_API_OBJECT);
+    base = intel_alloc(handle, obj_size, 0, VK_SYSTEM_ALLOC_API_OBJECT);
     if (!base)
         return NULL;
 
@@ -388,19 +388,19 @@
     intel_free(base, base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDestroyObject(
-    XGL_OBJECT                                  object)
+ICD_EXPORT VK_RESULT VKAPI vkDestroyObject(
+    VK_OBJECT                                  object)
 {
     struct intel_obj *obj = intel_obj(object);
 
     obj->destroy(obj);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetObjectInfo(
-    XGL_BASE_OBJECT                             object,
-    XGL_OBJECT_INFO_TYPE                        infoType,
+ICD_EXPORT VK_RESULT VKAPI vkGetObjectInfo(
+    VK_BASE_OBJECT                             object,
+    VK_OBJECT_INFO_TYPE                        infoType,
     size_t*                                     pDataSize,
     void*                                       pData)
 {
@@ -409,43 +409,43 @@
     return base->get_info(base, infoType, pDataSize, pData);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglBindObjectMemory(
-    XGL_OBJECT                                  object,
+ICD_EXPORT VK_RESULT VKAPI vkBindObjectMemory(
+    VK_OBJECT                                  object,
     uint32_t                                    allocationIdx,
-    XGL_GPU_MEMORY                              mem_,
-    XGL_GPU_SIZE                                memOffset)
+    VK_GPU_MEMORY                              mem_,
+    VK_GPU_SIZE                                memOffset)
 {
     struct intel_obj *obj = intel_obj(object);
     struct intel_mem *mem = intel_mem(mem_);
 
     intel_obj_bind_mem(obj, mem, memOffset);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglBindObjectMemoryRange(
-    XGL_OBJECT                                  object,
+ICD_EXPORT VK_RESULT VKAPI vkBindObjectMemoryRange(
+    VK_OBJECT                                  object,
     uint32_t                                    allocationIdx,
-    XGL_GPU_SIZE                                rangeOffset,
-    XGL_GPU_SIZE                                rangeSize,
-    XGL_GPU_MEMORY                              mem,
-    XGL_GPU_SIZE                                memOffset)
+    VK_GPU_SIZE                                rangeOffset,
+    VK_GPU_SIZE                                rangeSize,
+    VK_GPU_MEMORY                              mem,
+    VK_GPU_SIZE                                memOffset)
 {
-    return XGL_ERROR_UNKNOWN;
+    return VK_ERROR_UNKNOWN;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglBindImageMemoryRange(
-    XGL_IMAGE                                   image,
+ICD_EXPORT VK_RESULT VKAPI vkBindImageMemoryRange(
+    VK_IMAGE                                   image,
     uint32_t                                    allocationIdx,
-    const XGL_IMAGE_MEMORY_BIND_INFO*           bindInfo,
-    XGL_GPU_MEMORY                              mem,
-    XGL_GPU_SIZE                                memOffset)
+    const VK_IMAGE_MEMORY_BIND_INFO*           bindInfo,
+    VK_GPU_MEMORY                              mem,
+    VK_GPU_SIZE                                memOffset)
 {
-    return XGL_ERROR_UNKNOWN;
+    return VK_ERROR_UNKNOWN;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetObjectTag(
-    XGL_BASE_OBJECT                             object,
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetObjectTag(
+    VK_BASE_OBJECT                             object,
     size_t                                      tagSize,
     const void*                                 pTag)
 {
@@ -454,11 +454,11 @@
     void *tag;
 
     if (!dbg)
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 
-    tag = intel_alloc(base, tagSize, 0, XGL_SYSTEM_ALLOC_DEBUG);
+    tag = intel_alloc(base, tagSize, 0, VK_SYSTEM_ALLOC_DEBUG);
     if (!tag)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     memcpy(tag, pTag, tagSize);
 
@@ -468,5 +468,5 @@
     dbg->tag = tag;
     dbg->tag_size = tagSize;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
diff --git a/icd/intel/obj.h b/icd/intel/obj.h
index ee38a9d..b178698 100644
--- a/icd/intel/obj.h
+++ b/icd/intel/obj.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -34,7 +34,7 @@
 struct intel_mem;
 
 struct intel_base_dbg {
-    XGL_DBG_OBJECT_TYPE type;
+    VK_DBG_OBJECT_TYPE type;
 
     void *create_info;
     size_t create_info_size;
@@ -48,7 +48,7 @@
 
     struct intel_base_dbg *dbg;
 
-    XGL_RESULT (*get_info)(struct intel_base *base, int type,
+    VK_RESULT (*get_info)(struct intel_base *base, int type,
                            size_t *size, void *data);
 };
 
@@ -62,29 +62,29 @@
     size_t offset;
 };
 
-static inline struct intel_base *intel_base(XGL_BASE_OBJECT base)
+static inline struct intel_base *intel_base(VK_BASE_OBJECT base)
 {
     return (struct intel_base *) base;
 }
 
-static inline struct intel_obj *intel_obj(XGL_OBJECT obj)
+static inline struct intel_obj *intel_obj(VK_OBJECT obj)
 {
     return (struct intel_obj *) obj;
 }
 
 static inline void intel_obj_bind_mem(struct intel_obj *obj,
                                       struct intel_mem *mem,
-                                      XGL_GPU_SIZE offset)
+                                      VK_GPU_SIZE offset)
 {
     obj->mem = mem;
     obj->offset = offset;
 }
 
-XGL_RESULT intel_base_get_info(struct intel_base *base, int type,
+VK_RESULT intel_base_get_info(struct intel_base *base, int type,
                                size_t *size, void *data);
 
 struct intel_base_dbg *intel_base_dbg_create(const struct intel_handle *handle,
-                                             XGL_DBG_OBJECT_TYPE type,
+                                             VK_DBG_OBJECT_TYPE type,
                                              const void *create_info,
                                              size_t dbg_size);
 void intel_base_dbg_destroy(const struct intel_handle *handle,
@@ -92,7 +92,7 @@
 
 struct intel_base *intel_base_create(const struct intel_handle *handle,
                                      size_t obj_size, bool debug,
-                                     XGL_DBG_OBJECT_TYPE type,
+                                     VK_DBG_OBJECT_TYPE type,
                                      const void *create_info,
                                      size_t dbg_size);
 void intel_base_destroy(struct intel_base *base);
diff --git a/icd/intel/pipeline.c b/icd/intel/pipeline.c
index f1bde06..6564c2b 100644
--- a/icd/intel/pipeline.c
+++ b/icd/intel/pipeline.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -33,76 +33,76 @@
 #include "shader.h"
 #include "pipeline.h"
 
-static int translate_blend_func(XGL_BLEND_FUNC func)
+static int translate_blend_func(VK_BLEND_FUNC func)
 {
    switch (func) {
-   case XGL_BLEND_FUNC_ADD:                return GEN6_BLENDFUNCTION_ADD;
-   case XGL_BLEND_FUNC_SUBTRACT:           return GEN6_BLENDFUNCTION_SUBTRACT;
-   case XGL_BLEND_FUNC_REVERSE_SUBTRACT:   return GEN6_BLENDFUNCTION_REVERSE_SUBTRACT;
-   case XGL_BLEND_FUNC_MIN:                return GEN6_BLENDFUNCTION_MIN;
-   case XGL_BLEND_FUNC_MAX:                return GEN6_BLENDFUNCTION_MAX;
+   case VK_BLEND_FUNC_ADD:                return GEN6_BLENDFUNCTION_ADD;
+   case VK_BLEND_FUNC_SUBTRACT:           return GEN6_BLENDFUNCTION_SUBTRACT;
+   case VK_BLEND_FUNC_REVERSE_SUBTRACT:   return GEN6_BLENDFUNCTION_REVERSE_SUBTRACT;
+   case VK_BLEND_FUNC_MIN:                return GEN6_BLENDFUNCTION_MIN;
+   case VK_BLEND_FUNC_MAX:                return GEN6_BLENDFUNCTION_MAX;
    default:
       assert(!"unknown blend func");
       return GEN6_BLENDFUNCTION_ADD;
    };
 }
 
-static int translate_blend(XGL_BLEND blend)
+static int translate_blend(VK_BLEND blend)
 {
    switch (blend) {
-   case XGL_BLEND_ZERO:                     return GEN6_BLENDFACTOR_ZERO;
-   case XGL_BLEND_ONE:                      return GEN6_BLENDFACTOR_ONE;
-   case XGL_BLEND_SRC_COLOR:                return GEN6_BLENDFACTOR_SRC_COLOR;
-   case XGL_BLEND_ONE_MINUS_SRC_COLOR:      return GEN6_BLENDFACTOR_INV_SRC_COLOR;
-   case XGL_BLEND_DEST_COLOR:               return GEN6_BLENDFACTOR_DST_COLOR;
-   case XGL_BLEND_ONE_MINUS_DEST_COLOR:     return GEN6_BLENDFACTOR_INV_DST_COLOR;
-   case XGL_BLEND_SRC_ALPHA:                return GEN6_BLENDFACTOR_SRC_ALPHA;
-   case XGL_BLEND_ONE_MINUS_SRC_ALPHA:      return GEN6_BLENDFACTOR_INV_SRC_ALPHA;
-   case XGL_BLEND_DEST_ALPHA:               return GEN6_BLENDFACTOR_DST_ALPHA;
-   case XGL_BLEND_ONE_MINUS_DEST_ALPHA:     return GEN6_BLENDFACTOR_INV_DST_ALPHA;
-   case XGL_BLEND_CONSTANT_COLOR:           return GEN6_BLENDFACTOR_CONST_COLOR;
-   case XGL_BLEND_ONE_MINUS_CONSTANT_COLOR: return GEN6_BLENDFACTOR_INV_CONST_COLOR;
-   case XGL_BLEND_CONSTANT_ALPHA:           return GEN6_BLENDFACTOR_CONST_ALPHA;
-   case XGL_BLEND_ONE_MINUS_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_INV_CONST_ALPHA;
-   case XGL_BLEND_SRC_ALPHA_SATURATE:       return GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE;
-   case XGL_BLEND_SRC1_COLOR:               return GEN6_BLENDFACTOR_SRC1_COLOR;
-   case XGL_BLEND_ONE_MINUS_SRC1_COLOR:     return GEN6_BLENDFACTOR_INV_SRC1_COLOR;
-   case XGL_BLEND_SRC1_ALPHA:               return GEN6_BLENDFACTOR_SRC1_ALPHA;
-   case XGL_BLEND_ONE_MINUS_SRC1_ALPHA:     return GEN6_BLENDFACTOR_INV_SRC1_ALPHA;
+   case VK_BLEND_ZERO:                     return GEN6_BLENDFACTOR_ZERO;
+   case VK_BLEND_ONE:                      return GEN6_BLENDFACTOR_ONE;
+   case VK_BLEND_SRC_COLOR:                return GEN6_BLENDFACTOR_SRC_COLOR;
+   case VK_BLEND_ONE_MINUS_SRC_COLOR:      return GEN6_BLENDFACTOR_INV_SRC_COLOR;
+   case VK_BLEND_DEST_COLOR:               return GEN6_BLENDFACTOR_DST_COLOR;
+   case VK_BLEND_ONE_MINUS_DEST_COLOR:     return GEN6_BLENDFACTOR_INV_DST_COLOR;
+   case VK_BLEND_SRC_ALPHA:                return GEN6_BLENDFACTOR_SRC_ALPHA;
+   case VK_BLEND_ONE_MINUS_SRC_ALPHA:      return GEN6_BLENDFACTOR_INV_SRC_ALPHA;
+   case VK_BLEND_DEST_ALPHA:               return GEN6_BLENDFACTOR_DST_ALPHA;
+   case VK_BLEND_ONE_MINUS_DEST_ALPHA:     return GEN6_BLENDFACTOR_INV_DST_ALPHA;
+   case VK_BLEND_CONSTANT_COLOR:           return GEN6_BLENDFACTOR_CONST_COLOR;
+   case VK_BLEND_ONE_MINUS_CONSTANT_COLOR: return GEN6_BLENDFACTOR_INV_CONST_COLOR;
+   case VK_BLEND_CONSTANT_ALPHA:           return GEN6_BLENDFACTOR_CONST_ALPHA;
+   case VK_BLEND_ONE_MINUS_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_INV_CONST_ALPHA;
+   case VK_BLEND_SRC_ALPHA_SATURATE:       return GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE;
+   case VK_BLEND_SRC1_COLOR:               return GEN6_BLENDFACTOR_SRC1_COLOR;
+   case VK_BLEND_ONE_MINUS_SRC1_COLOR:     return GEN6_BLENDFACTOR_INV_SRC1_COLOR;
+   case VK_BLEND_SRC1_ALPHA:               return GEN6_BLENDFACTOR_SRC1_ALPHA;
+   case VK_BLEND_ONE_MINUS_SRC1_ALPHA:     return GEN6_BLENDFACTOR_INV_SRC1_ALPHA;
    default:
       assert(!"unknown blend factor");
       return GEN6_BLENDFACTOR_ONE;
    };
 }
 
-static int translate_compare_func(XGL_COMPARE_FUNC func)
+static int translate_compare_func(VK_COMPARE_FUNC func)
 {
     switch (func) {
-    case XGL_COMPARE_NEVER:         return GEN6_COMPAREFUNCTION_NEVER;
-    case XGL_COMPARE_LESS:          return GEN6_COMPAREFUNCTION_LESS;
-    case XGL_COMPARE_EQUAL:         return GEN6_COMPAREFUNCTION_EQUAL;
-    case XGL_COMPARE_LESS_EQUAL:    return GEN6_COMPAREFUNCTION_LEQUAL;
-    case XGL_COMPARE_GREATER:       return GEN6_COMPAREFUNCTION_GREATER;
-    case XGL_COMPARE_NOT_EQUAL:     return GEN6_COMPAREFUNCTION_NOTEQUAL;
-    case XGL_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
-    case XGL_COMPARE_ALWAYS:        return GEN6_COMPAREFUNCTION_ALWAYS;
+    case VK_COMPARE_NEVER:         return GEN6_COMPAREFUNCTION_NEVER;
+    case VK_COMPARE_LESS:          return GEN6_COMPAREFUNCTION_LESS;
+    case VK_COMPARE_EQUAL:         return GEN6_COMPAREFUNCTION_EQUAL;
+    case VK_COMPARE_LESS_EQUAL:    return GEN6_COMPAREFUNCTION_LEQUAL;
+    case VK_COMPARE_GREATER:       return GEN6_COMPAREFUNCTION_GREATER;
+    case VK_COMPARE_NOT_EQUAL:     return GEN6_COMPAREFUNCTION_NOTEQUAL;
+    case VK_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
+    case VK_COMPARE_ALWAYS:        return GEN6_COMPAREFUNCTION_ALWAYS;
     default:
       assert(!"unknown compare_func");
       return GEN6_COMPAREFUNCTION_NEVER;
     }
 }
 
-static int translate_stencil_op(XGL_STENCIL_OP op)
+static int translate_stencil_op(VK_STENCIL_OP op)
 {
     switch (op) {
-    case XGL_STENCIL_OP_KEEP:       return GEN6_STENCILOP_KEEP;
-    case XGL_STENCIL_OP_ZERO:       return GEN6_STENCILOP_ZERO;
-    case XGL_STENCIL_OP_REPLACE:    return GEN6_STENCILOP_REPLACE;
-    case XGL_STENCIL_OP_INC_CLAMP:  return GEN6_STENCILOP_INCRSAT;
-    case XGL_STENCIL_OP_DEC_CLAMP:  return GEN6_STENCILOP_DECRSAT;
-    case XGL_STENCIL_OP_INVERT:     return GEN6_STENCILOP_INVERT;
-    case XGL_STENCIL_OP_INC_WRAP:   return GEN6_STENCILOP_INCR;
-    case XGL_STENCIL_OP_DEC_WRAP:   return GEN6_STENCILOP_DECR;
+    case VK_STENCIL_OP_KEEP:       return GEN6_STENCILOP_KEEP;
+    case VK_STENCIL_OP_ZERO:       return GEN6_STENCILOP_ZERO;
+    case VK_STENCIL_OP_REPLACE:    return GEN6_STENCILOP_REPLACE;
+    case VK_STENCIL_OP_INC_CLAMP:  return GEN6_STENCILOP_INCRSAT;
+    case VK_STENCIL_OP_DEC_CLAMP:  return GEN6_STENCILOP_DECRSAT;
+    case VK_STENCIL_OP_INVERT:     return GEN6_STENCILOP_INVERT;
+    case VK_STENCIL_OP_INC_WRAP:   return GEN6_STENCILOP_INCR;
+    case VK_STENCIL_OP_DEC_WRAP:   return GEN6_STENCILOP_DECR;
     default:
       assert(!"unknown stencil op");
       return GEN6_STENCILOP_KEEP;
@@ -110,22 +110,22 @@
 }
 
 struct intel_pipeline_create_info {
-    XGL_GRAPHICS_PIPELINE_CREATE_INFO   graphics;
-    XGL_PIPELINE_VERTEX_INPUT_CREATE_INFO vi;
-    XGL_PIPELINE_IA_STATE_CREATE_INFO   ia;
-    XGL_PIPELINE_DS_STATE_CREATE_INFO   db;
-    XGL_PIPELINE_CB_STATE_CREATE_INFO   cb;
-    XGL_PIPELINE_RS_STATE_CREATE_INFO   rs;
-    XGL_PIPELINE_TESS_STATE_CREATE_INFO tess;
-    XGL_PIPELINE_MS_STATE_CREATE_INFO   ms;
-    XGL_PIPELINE_VP_STATE_CREATE_INFO   vp;
-    XGL_PIPELINE_SHADER                 vs;
-    XGL_PIPELINE_SHADER                 tcs;
-    XGL_PIPELINE_SHADER                 tes;
-    XGL_PIPELINE_SHADER                 gs;
-    XGL_PIPELINE_SHADER                 fs;
+    VK_GRAPHICS_PIPELINE_CREATE_INFO   graphics;
+    VK_PIPELINE_VERTEX_INPUT_CREATE_INFO vi;
+    VK_PIPELINE_IA_STATE_CREATE_INFO   ia;
+    VK_PIPELINE_DS_STATE_CREATE_INFO   db;
+    VK_PIPELINE_CB_STATE_CREATE_INFO   cb;
+    VK_PIPELINE_RS_STATE_CREATE_INFO   rs;
+    VK_PIPELINE_TESS_STATE_CREATE_INFO tess;
+    VK_PIPELINE_MS_STATE_CREATE_INFO   ms;
+    VK_PIPELINE_VP_STATE_CREATE_INFO   vp;
+    VK_PIPELINE_SHADER                 vs;
+    VK_PIPELINE_SHADER                 tcs;
+    VK_PIPELINE_SHADER                 tes;
+    VK_PIPELINE_SHADER                 gs;
+    VK_PIPELINE_SHADER                 fs;
 
-    XGL_COMPUTE_PIPELINE_CREATE_INFO    compute;
+    VK_COMPUTE_PIPELINE_CREATE_INFO    compute;
 };
 
 /* in S1.3 */
@@ -202,15 +202,15 @@
                                                                 enum intel_dev_meta_shader id)
 {
     struct intel_pipeline_shader *sh;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
-    sh = intel_alloc(dev, sizeof(*sh), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+    sh = intel_alloc(dev, sizeof(*sh), 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!sh)
         return NULL;
     memset(sh, 0, sizeof(*sh));
 
     ret = intel_pipeline_shader_compile_meta(sh, dev->gpu, id);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_free(dev, sh);
         return NULL;
     }
@@ -220,11 +220,11 @@
     case INTEL_DEV_META_VS_COPY_MEM:
     case INTEL_DEV_META_VS_COPY_MEM_UNALIGNED:
         sh->max_threads = intel_gpu_get_max_threads(dev->gpu,
-                XGL_SHADER_STAGE_VERTEX);
+                VK_SHADER_STAGE_VERTEX);
         break;
     default:
         sh->max_threads = intel_gpu_get_max_threads(dev->gpu,
-                XGL_SHADER_STAGE_FRAGMENT);
+                VK_SHADER_STAGE_FRAGMENT);
         break;
     }
 
@@ -238,16 +238,16 @@
     intel_free(dev, sh);
 }
 
-static XGL_RESULT pipeline_build_shader(struct intel_pipeline *pipeline,
+static VK_RESULT pipeline_build_shader(struct intel_pipeline *pipeline,
                                         const struct intel_desc_layout_chain *chain,
-                                        const XGL_PIPELINE_SHADER *sh_info,
+                                        const VK_PIPELINE_SHADER *sh_info,
                                         struct intel_pipeline_shader *sh)
 {
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     ret = intel_pipeline_shader_compile(sh,
             pipeline->dev->gpu, chain, sh_info);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     sh->max_threads =
@@ -260,38 +260,38 @@
 
     pipeline->active_shaders |= 1 << sh_info->stage;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT pipeline_build_shaders(struct intel_pipeline *pipeline,
+static VK_RESULT pipeline_build_shaders(struct intel_pipeline *pipeline,
                                          const struct intel_pipeline_create_info *info)
 {
     const struct intel_desc_layout_chain *chain =
         intel_desc_layout_chain(info->graphics.pSetLayoutChain);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
-    if (ret == XGL_SUCCESS && info->vs.shader) {
+    if (ret == VK_SUCCESS && info->vs.shader) {
         ret = pipeline_build_shader(pipeline, chain,
                 &info->vs, &pipeline->vs);
     }
-    if (ret == XGL_SUCCESS && info->tcs.shader) {
+    if (ret == VK_SUCCESS && info->tcs.shader) {
         ret = pipeline_build_shader(pipeline, chain,
                 &info->tcs,&pipeline->tcs);
     }
-    if (ret == XGL_SUCCESS && info->tes.shader) {
+    if (ret == VK_SUCCESS && info->tes.shader) {
         ret = pipeline_build_shader(pipeline, chain,
                 &info->tes,&pipeline->tes);
     }
-    if (ret == XGL_SUCCESS && info->gs.shader) {
+    if (ret == VK_SUCCESS && info->gs.shader) {
         ret = pipeline_build_shader(pipeline, chain,
                 &info->gs, &pipeline->gs);
     }
-    if (ret == XGL_SUCCESS && info->fs.shader) {
+    if (ret == VK_SUCCESS && info->fs.shader) {
         ret = pipeline_build_shader(pipeline, chain,
                 &info->fs, &pipeline->fs);
     }
 
-    if (ret == XGL_SUCCESS && info->compute.cs.shader) {
+    if (ret == VK_SUCCESS && info->compute.cs.shader) {
         chain = intel_desc_layout_chain(info->compute.setLayoutChain);
         ret = pipeline_build_shader(pipeline, chain,
                 &info->compute.cs, &pipeline->cs);
@@ -309,52 +309,52 @@
     return ptr;
 }
 
-static XGL_RESULT pipeline_build_ia(struct intel_pipeline *pipeline,
+static VK_RESULT pipeline_build_ia(struct intel_pipeline *pipeline,
                                     const struct intel_pipeline_create_info* info)
 {
     pipeline->topology = info->ia.topology;
     pipeline->disable_vs_cache = info->ia.disableVertexReuse;
 
     switch (info->ia.topology) {
-    case XGL_TOPOLOGY_POINT_LIST:
+    case VK_TOPOLOGY_POINT_LIST:
         pipeline->prim_type = GEN6_3DPRIM_POINTLIST;
         break;
-    case XGL_TOPOLOGY_LINE_LIST:
+    case VK_TOPOLOGY_LINE_LIST:
         pipeline->prim_type = GEN6_3DPRIM_LINELIST;
         break;
-    case XGL_TOPOLOGY_LINE_STRIP:
+    case VK_TOPOLOGY_LINE_STRIP:
         pipeline->prim_type = GEN6_3DPRIM_LINESTRIP;
         break;
-    case XGL_TOPOLOGY_TRIANGLE_LIST:
+    case VK_TOPOLOGY_TRIANGLE_LIST:
         pipeline->prim_type = GEN6_3DPRIM_TRILIST;
         break;
-    case XGL_TOPOLOGY_TRIANGLE_STRIP:
+    case VK_TOPOLOGY_TRIANGLE_STRIP:
         pipeline->prim_type = GEN6_3DPRIM_TRISTRIP;
         break;
-    case XGL_TOPOLOGY_TRIANGLE_FAN:
+    case VK_TOPOLOGY_TRIANGLE_FAN:
         pipeline->prim_type = GEN6_3DPRIM_TRIFAN;
         break;
-    case XGL_TOPOLOGY_LINE_LIST_ADJ:
+    case VK_TOPOLOGY_LINE_LIST_ADJ:
         pipeline->prim_type = GEN6_3DPRIM_LINELIST_ADJ;
         break;
-    case XGL_TOPOLOGY_LINE_STRIP_ADJ:
+    case VK_TOPOLOGY_LINE_STRIP_ADJ:
         pipeline->prim_type = GEN6_3DPRIM_LINESTRIP_ADJ;
         break;
-    case XGL_TOPOLOGY_TRIANGLE_LIST_ADJ:
+    case VK_TOPOLOGY_TRIANGLE_LIST_ADJ:
         pipeline->prim_type = GEN6_3DPRIM_TRILIST_ADJ;
         break;
-    case XGL_TOPOLOGY_TRIANGLE_STRIP_ADJ:
+    case VK_TOPOLOGY_TRIANGLE_STRIP_ADJ:
         pipeline->prim_type = GEN6_3DPRIM_TRISTRIP_ADJ;
         break;
-    case XGL_TOPOLOGY_PATCH:
+    case VK_TOPOLOGY_PATCH:
         if (!info->tess.patchControlPoints ||
             info->tess.patchControlPoints > 32)
-            return XGL_ERROR_BAD_PIPELINE_DATA;
+            return VK_ERROR_BAD_PIPELINE_DATA;
         pipeline->prim_type = GEN7_3DPRIM_PATCHLIST_1 +
             info->tess.patchControlPoints - 1;
         break;
     default:
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
     }
 
     if (info->ia.primitiveRestartEnable) {
@@ -364,20 +364,20 @@
         pipeline->primitive_restart = false;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT pipeline_build_rs_state(struct intel_pipeline *pipeline,
+static VK_RESULT pipeline_build_rs_state(struct intel_pipeline *pipeline,
                                           const struct intel_pipeline_create_info* info)
 {
-    const XGL_PIPELINE_RS_STATE_CREATE_INFO *rs_state = &info->rs;
+    const VK_PIPELINE_RS_STATE_CREATE_INFO *rs_state = &info->rs;
     bool ccw;
 
     pipeline->depthClipEnable = rs_state->depthClipEnable;
     pipeline->rasterizerDiscardEnable = rs_state->rasterizerDiscardEnable;
     pipeline->use_rs_point_size = !rs_state->programPointSize;
 
-    if (rs_state->provokingVertex == XGL_PROVOKING_VERTEX_FIRST) {
+    if (rs_state->provokingVertex == VK_PROVOKING_VERTEX_FIRST) {
         pipeline->provoking_vertex_tri = 0;
         pipeline->provoking_vertex_trifan = 1;
         pipeline->provoking_vertex_line = 0;
@@ -388,24 +388,24 @@
     }
 
     switch (rs_state->fillMode) {
-    case XGL_FILL_POINTS:
+    case VK_FILL_POINTS:
         pipeline->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_POINT |
                               GEN7_SF_DW1_BACKFACE_POINT;
         break;
-    case XGL_FILL_WIREFRAME:
+    case VK_FILL_WIREFRAME:
         pipeline->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_WIREFRAME |
                               GEN7_SF_DW1_BACKFACE_WIREFRAME;
         break;
-    case XGL_FILL_SOLID:
+    case VK_FILL_SOLID:
     default:
         pipeline->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_SOLID |
                               GEN7_SF_DW1_BACKFACE_SOLID;
         break;
     }
 
-    ccw = (rs_state->frontFace == XGL_FRONT_FACE_CCW);
+    ccw = (rs_state->frontFace == VK_FRONT_FACE_CCW);
     /* flip the winding order */
-    if (info->vp.clipOrigin == XGL_COORDINATE_ORIGIN_LOWER_LEFT)
+    if (info->vp.clipOrigin == VK_COORDINATE_ORIGIN_LOWER_LEFT)
         ccw = !ccw;
 
     if (ccw) {
@@ -414,20 +414,20 @@
     }
 
     switch (rs_state->cullMode) {
-    case XGL_CULL_NONE:
+    case VK_CULL_NONE:
     default:
         pipeline->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_NONE;
         pipeline->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_NONE;
         break;
-    case XGL_CULL_FRONT:
+    case VK_CULL_FRONT:
         pipeline->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_FRONT;
         pipeline->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_FRONT;
         break;
-    case XGL_CULL_BACK:
+    case VK_CULL_BACK:
         pipeline->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BACK;
         pipeline->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BACK;
         break;
-    case XGL_CULL_FRONT_AND_BACK:
+    case VK_CULL_FRONT_AND_BACK:
         pipeline->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BOTH;
         pipeline->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BOTH;
         break;
@@ -437,7 +437,7 @@
     if (intel_gpu_gen(pipeline->dev->gpu) == INTEL_GEN(6))
         pipeline->cmd_clip_cull = 0;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void pipeline_destroy(struct intel_obj *obj)
@@ -471,22 +471,22 @@
     intel_base_destroy(&pipeline->obj.base);
 }
 
-static XGL_RESULT pipeline_get_info(struct intel_base *base, int type,
+static VK_RESULT pipeline_get_info(struct intel_base *base, int type,
                                     size_t *size, void *data)
 {
     struct intel_pipeline *pipeline = intel_pipeline_from_base(base);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (type) {
-    case XGL_INFO_TYPE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
         {
-            XGL_MEMORY_REQUIREMENTS *mem_req = data;
+            VK_MEMORY_REQUIREMENTS *mem_req = data;
 
-            *size = sizeof(XGL_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_MEMORY_REQUIREMENTS);
             if (data) {
                 mem_req->size = pipeline->scratch_size;
                 mem_req->alignment = 1024;
-                mem_req->memType =  XGL_MEMORY_TYPE_OTHER;
+                mem_req->memType =  VK_MEMORY_TYPE_OTHER;
             }
         }
         break;
@@ -498,14 +498,14 @@
     return ret;
 }
 
-static XGL_RESULT pipeline_validate(struct intel_pipeline *pipeline)
+static VK_RESULT pipeline_validate(struct intel_pipeline *pipeline)
 {
     /*
      * Validate required elements
      */
     if (!(pipeline->active_shaders & SHADER_VERTEX_FLAG)) {
         // TODO: Log debug message: Vertex Shader required.
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
     }
 
     /*
@@ -515,7 +515,7 @@
     if (((pipeline->active_shaders & SHADER_TESS_CONTROL_FLAG) == 0) !=
          ((pipeline->active_shaders & SHADER_TESS_EVAL_FLAG) == 0) ) {
         // TODO: Log debug message: Both Tess control and Tess eval are required to use tessalation
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
     }
 
     if ((pipeline->active_shaders & SHADER_COMPUTE_FLAG) &&
@@ -523,26 +523,26 @@
                                      SHADER_TESS_EVAL_FLAG | SHADER_GEOMETRY_FLAG |
                                      SHADER_FRAGMENT_FLAG))) {
         // TODO: Log debug message: Can only specify compute shader when doing compute
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
     }
 
     /*
-     * XGL_TOPOLOGY_PATCH primitive topology is only valid for tessellation pipelines.
+     * VK_TOPOLOGY_PATCH primitive topology is only valid for tessellation pipelines.
      * Mismatching primitive topology and tessellation fails graphics pipeline creation.
      */
     if (pipeline->active_shaders & (SHADER_TESS_CONTROL_FLAG | SHADER_TESS_EVAL_FLAG) &&
-        (pipeline->topology != XGL_TOPOLOGY_PATCH)) {
+        (pipeline->topology != VK_TOPOLOGY_PATCH)) {
         // TODO: Log debug message: Invalid topology used with tessalation shader.
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
     }
 
-    if ((pipeline->topology == XGL_TOPOLOGY_PATCH) &&
+    if ((pipeline->topology == VK_TOPOLOGY_PATCH) &&
             (pipeline->active_shaders & ~(SHADER_TESS_CONTROL_FLAG | SHADER_TESS_EVAL_FLAG))) {
         // TODO: Log debug message: Cannot use TOPOLOGY_PATCH on non-tessalation shader.
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void pipeline_build_urb_alloc_gen6(struct intel_pipeline *pipeline,
@@ -744,7 +744,7 @@
 
     /* VERTEX_ELEMENT_STATE */
     for (i = 0, attrs_processed = 0; attrs_processed < attr_count; i++) {
-        XGL_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION *attr = NULL;
+        VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION *attr = NULL;
 
         /*
          * The compiler will pack the shader references and then
@@ -762,7 +762,7 @@
          */
         for (j = 0; j < info->vi.attributeCount; j++) {
             if (info->vi.pVertexAttributeDescriptions[j].location == i) {
-                attr = (XGL_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION *) &info->vi.pVertexAttributeDescriptions[j];
+                attr = (VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION *) &info->vi.pVertexAttributeDescriptions[j];
                 attrs_processed++;
                 break;
             }
@@ -824,10 +824,10 @@
                                     const struct intel_pipeline_create_info *info)
 {
     switch (info->vp.depthMode) {
-    case XGL_DEPTH_MODE_ZERO_TO_ONE:
+    case VK_DEPTH_MODE_ZERO_TO_ONE:
         pipeline->depth_zero_to_one = true;
         break;
-    case XGL_DEPTH_MODE_NEGATIVE_ONE_TO_ONE:
+    case VK_DEPTH_MODE_NEGATIVE_ONE_TO_ONE:
     default:
         pipeline->depth_zero_to_one = false;
         break;
@@ -888,10 +888,10 @@
           vue_offset << GEN7_SBE_DW1_URB_READ_OFFSET__SHIFT;
 
     switch (info->rs.pointOrigin) {
-    case XGL_COORDINATE_ORIGIN_UPPER_LEFT:
+    case VK_COORDINATE_ORIGIN_UPPER_LEFT:
         body[1] |= GEN7_SBE_DW1_POINT_SPRITE_TEXCOORD_UPPERLEFT;
         break;
-    case XGL_COORDINATE_ORIGIN_LOWER_LEFT:
+    case VK_COORDINATE_ORIGIN_LOWER_LEFT:
         body[1] |= GEN7_SBE_DW1_POINT_SPRITE_TEXCOORD_LOWERLEFT;
         break;
     default:
@@ -949,7 +949,7 @@
         body[2 + i] = hi << GEN8_SBE_SWIZ_HIGH__SHIFT | lo;
     }
 
-    if (info->ia.topology == XGL_TOPOLOGY_POINT_LIST)
+    if (info->ia.topology == VK_TOPOLOGY_POINT_LIST)
         body[10] = fs->point_sprite_enables;
     else
         body[10] = 0;
@@ -1094,7 +1094,7 @@
     uint32_t *dw = pipeline->cmd_cb;
 
     for (i = 0; i < info->cb.attachmentCount; i++) {
-        const XGL_PIPELINE_CB_ATTACHMENT_STATE *att = &info->cb.pAttachments[i];
+        const VK_PIPELINE_CB_ATTACHMENT_STATE *att = &info->cb.pAttachments[i];
         uint32_t dw0, dw1;
 
 
@@ -1120,25 +1120,25 @@
             pipeline->dual_source_blend_enable = icd_pipeline_cb_att_needs_dual_source_blending(att);
         }
 
-        if (info->cb.logicOp != XGL_LOGIC_OP_COPY) {
+        if (info->cb.logicOp != VK_LOGIC_OP_COPY) {
             int logicop;
 
             switch (info->cb.logicOp) {
-            case XGL_LOGIC_OP_CLEAR:            logicop = GEN6_LOGICOP_CLEAR; break;
-            case XGL_LOGIC_OP_AND:              logicop = GEN6_LOGICOP_AND; break;
-            case XGL_LOGIC_OP_AND_REVERSE:      logicop = GEN6_LOGICOP_AND_REVERSE; break;
-            case XGL_LOGIC_OP_AND_INVERTED:     logicop = GEN6_LOGICOP_AND_INVERTED; break;
-            case XGL_LOGIC_OP_NOOP:             logicop = GEN6_LOGICOP_NOOP; break;
-            case XGL_LOGIC_OP_XOR:              logicop = GEN6_LOGICOP_XOR; break;
-            case XGL_LOGIC_OP_OR:               logicop = GEN6_LOGICOP_OR; break;
-            case XGL_LOGIC_OP_NOR:              logicop = GEN6_LOGICOP_NOR; break;
-            case XGL_LOGIC_OP_EQUIV:            logicop = GEN6_LOGICOP_EQUIV; break;
-            case XGL_LOGIC_OP_INVERT:           logicop = GEN6_LOGICOP_INVERT; break;
-            case XGL_LOGIC_OP_OR_REVERSE:       logicop = GEN6_LOGICOP_OR_REVERSE; break;
-            case XGL_LOGIC_OP_COPY_INVERTED:    logicop = GEN6_LOGICOP_COPY_INVERTED; break;
-            case XGL_LOGIC_OP_OR_INVERTED:      logicop = GEN6_LOGICOP_OR_INVERTED; break;
-            case XGL_LOGIC_OP_NAND:             logicop = GEN6_LOGICOP_NAND; break;
-            case XGL_LOGIC_OP_SET:              logicop = GEN6_LOGICOP_SET; break;
+            case VK_LOGIC_OP_CLEAR:            logicop = GEN6_LOGICOP_CLEAR; break;
+            case VK_LOGIC_OP_AND:              logicop = GEN6_LOGICOP_AND; break;
+            case VK_LOGIC_OP_AND_REVERSE:      logicop = GEN6_LOGICOP_AND_REVERSE; break;
+            case VK_LOGIC_OP_AND_INVERTED:     logicop = GEN6_LOGICOP_AND_INVERTED; break;
+            case VK_LOGIC_OP_NOOP:             logicop = GEN6_LOGICOP_NOOP; break;
+            case VK_LOGIC_OP_XOR:              logicop = GEN6_LOGICOP_XOR; break;
+            case VK_LOGIC_OP_OR:               logicop = GEN6_LOGICOP_OR; break;
+            case VK_LOGIC_OP_NOR:              logicop = GEN6_LOGICOP_NOR; break;
+            case VK_LOGIC_OP_EQUIV:            logicop = GEN6_LOGICOP_EQUIV; break;
+            case VK_LOGIC_OP_INVERT:           logicop = GEN6_LOGICOP_INVERT; break;
+            case VK_LOGIC_OP_OR_REVERSE:       logicop = GEN6_LOGICOP_OR_REVERSE; break;
+            case VK_LOGIC_OP_COPY_INVERTED:    logicop = GEN6_LOGICOP_COPY_INVERTED; break;
+            case VK_LOGIC_OP_OR_INVERTED:      logicop = GEN6_LOGICOP_OR_INVERTED; break;
+            case VK_LOGIC_OP_NAND:             logicop = GEN6_LOGICOP_NAND; break;
+            case VK_LOGIC_OP_SET:              logicop = GEN6_LOGICOP_SET; break;
             default:
                 assert(!"unknown logic op");
                 logicop = GEN6_LOGICOP_CLEAR;
@@ -1177,18 +1177,18 @@
 }
 
 
-static XGL_RESULT pipeline_build_all(struct intel_pipeline *pipeline,
+static VK_RESULT pipeline_build_all(struct intel_pipeline *pipeline,
                                      const struct intel_pipeline_create_info *info)
 {
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     ret = pipeline_build_shaders(pipeline, info);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     if (info->vi.bindingCount > ARRAY_SIZE(pipeline->vb) ||
         info->vi.attributeCount > ARRAY_SIZE(pipeline->vb))
-        return XGL_ERROR_BAD_PIPELINE_DATA;
+        return VK_ERROR_BAD_PIPELINE_DATA;
 
     pipeline->vb_count = info->vi.bindingCount;
     memcpy(pipeline->vb, info->vi.pVertexBindingDescriptions,
@@ -1221,10 +1221,10 @@
 
     ret = pipeline_build_ia(pipeline, info);
 
-    if (ret == XGL_SUCCESS)
+    if (ret == VK_SUCCESS)
         ret = pipeline_build_rs_state(pipeline, info);
 
-    if (ret == XGL_SUCCESS) {
+    if (ret == VK_SUCCESS) {
         pipeline->db_format = info->db.format;
         pipeline_build_cb(pipeline, info);
         pipeline->cb_state = info->cb;
@@ -1235,11 +1235,11 @@
 }
 
 struct intel_pipeline_create_info_header {
-    XGL_STRUCTURE_TYPE struct_type;
+    VK_STRUCTURE_TYPE struct_type;
     const struct intel_pipeline_create_info_header *next;
 };
 
-static XGL_RESULT pipeline_create_info_init(struct intel_pipeline_create_info *info,
+static VK_RESULT pipeline_create_info_init(struct intel_pipeline_create_info *info,
                                             const struct intel_pipeline_create_info_header *header)
 {
     memset(info, 0, sizeof(*info));
@@ -1258,78 +1258,78 @@
         void *dst;
 
         switch (header->struct_type) {
-        case XGL_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO:
             size = sizeof(info->graphics);
             dst = &info->graphics;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO:
             size = sizeof(info->vi);
             dst = &info->vi;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO:
             size = sizeof(info->ia);
             dst = &info->ia;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO:
             size = sizeof(info->db);
             dst = &info->db;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO:
             size = sizeof(info->cb);
             dst = &info->cb;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO:
             size = sizeof(info->rs);
             dst = &info->rs;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO:
             size = sizeof(info->tess);
             dst = &info->tess;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO:
             size = sizeof(info->ms);
             dst = &info->ms;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO:
             size = sizeof(info->vp);
             dst = &info->vp;
             break;
-        case XGL_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
             {
-                const XGL_PIPELINE_SHADER *shader =
-                    (const XGL_PIPELINE_SHADER *) (header + 1);
+                const VK_PIPELINE_SHADER *shader =
+                    (const VK_PIPELINE_SHADER *) (header + 1);
 
                 src = (const void *) shader;
                 size = sizeof(*shader);
 
                 switch (shader->stage) {
-                case XGL_SHADER_STAGE_VERTEX:
+                case VK_SHADER_STAGE_VERTEX:
                     dst = &info->vs;
                     break;
-                case XGL_SHADER_STAGE_TESS_CONTROL:
+                case VK_SHADER_STAGE_TESS_CONTROL:
                     dst = &info->tcs;
                     break;
-                case XGL_SHADER_STAGE_TESS_EVALUATION:
+                case VK_SHADER_STAGE_TESS_EVALUATION:
                     dst = &info->tes;
                     break;
-                case XGL_SHADER_STAGE_GEOMETRY:
+                case VK_SHADER_STAGE_GEOMETRY:
                     dst = &info->gs;
                     break;
-                case XGL_SHADER_STAGE_FRAGMENT:
+                case VK_SHADER_STAGE_FRAGMENT:
                     dst = &info->fs;
                     break;
                 default:
-                    return XGL_ERROR_BAD_PIPELINE_DATA;
+                    return VK_ERROR_BAD_PIPELINE_DATA;
                     break;
                 }
             }
             break;
-        case XGL_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO:
+        case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO:
             size = sizeof(info->compute);
             dst = &info->compute;
             break;
         default:
-            return XGL_ERROR_BAD_PIPELINE_DATA;
+            return VK_ERROR_BAD_PIPELINE_DATA;
             break;
         }
 
@@ -1337,49 +1337,49 @@
         header = header->next;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT graphics_pipeline_create(struct intel_dev *dev,
-                                           const XGL_GRAPHICS_PIPELINE_CREATE_INFO *info_,
+static VK_RESULT graphics_pipeline_create(struct intel_dev *dev,
+                                           const VK_GRAPHICS_PIPELINE_CREATE_INFO *info_,
                                            struct intel_pipeline **pipeline_ret)
 {
     struct intel_pipeline_create_info info;
     struct intel_pipeline *pipeline;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     ret = pipeline_create_info_init(&info,
             (const struct intel_pipeline_create_info_header *) info_);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     pipeline = (struct intel_pipeline *) intel_base_create(&dev->base.handle,
             sizeof(*pipeline), dev->base.dbg,
-            XGL_DBG_OBJECT_GRAPHICS_PIPELINE, info_, 0);
+            VK_DBG_OBJECT_GRAPHICS_PIPELINE, info_, 0);
     if (!pipeline)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     pipeline->dev = dev;
     pipeline->obj.base.get_info = pipeline_get_info;
     pipeline->obj.destroy = pipeline_destroy;
 
     ret = pipeline_build_all(pipeline, &info);
-    if (ret == XGL_SUCCESS)
+    if (ret == VK_SUCCESS)
         ret = pipeline_validate(pipeline);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         pipeline_destroy(&pipeline->obj);
         return ret;
     }
 
     *pipeline_ret = pipeline;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateGraphicsPipeline(
-    XGL_DEVICE                                  device,
-    const XGL_GRAPHICS_PIPELINE_CREATE_INFO*    pCreateInfo,
-    XGL_PIPELINE*                               pPipeline)
+ICD_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(
+    VK_DEVICE                                  device,
+    const VK_GRAPHICS_PIPELINE_CREATE_INFO*    pCreateInfo,
+    VK_PIPELINE*                               pPipeline)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -1387,11 +1387,11 @@
             (struct intel_pipeline **) pPipeline);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateGraphicsPipelineDerivative(
-    XGL_DEVICE                                  device,
-    const XGL_GRAPHICS_PIPELINE_CREATE_INFO*    pCreateInfo,
-    XGL_PIPELINE                                basePipeline,
-    XGL_PIPELINE*                               pPipeline)
+ICD_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
+    VK_DEVICE                                  device,
+    const VK_GRAPHICS_PIPELINE_CREATE_INFO*    pCreateInfo,
+    VK_PIPELINE                                basePipeline,
+    VK_PIPELINE*                               pPipeline)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -1401,37 +1401,37 @@
             (struct intel_pipeline **) pPipeline);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateComputePipeline(
-    XGL_DEVICE                                  device,
-    const XGL_COMPUTE_PIPELINE_CREATE_INFO*     pCreateInfo,
-    XGL_PIPELINE*                               pPipeline)
+ICD_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(
+    VK_DEVICE                                  device,
+    const VK_COMPUTE_PIPELINE_CREATE_INFO*     pCreateInfo,
+    VK_PIPELINE*                               pPipeline)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglStorePipeline(
-    XGL_PIPELINE                                pipeline,
+ICD_EXPORT VK_RESULT VKAPI vkStorePipeline(
+    VK_PIPELINE                                pipeline,
     size_t*                                     pDataSize,
     void*                                       pData)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglLoadPipeline(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkLoadPipeline(
+    VK_DEVICE                                  device,
     size_t                                    dataSize,
     const void*                                 pData,
-    XGL_PIPELINE*                               pPipeline)
+    VK_PIPELINE*                               pPipeline)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglLoadPipelineDerivative(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkLoadPipelineDerivative(
+    VK_DEVICE                                  device,
     size_t                                      dataSize,
     const void*                                 pData,
-    XGL_PIPELINE                                basePipeline,
-    XGL_PIPELINE*                               pPipeline)
+    VK_PIPELINE                                basePipeline,
+    VK_PIPELINE*                               pPipeline)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
diff --git a/icd/intel/pipeline.h b/icd/intel/pipeline.h
index bcfb065..102d466 100644
--- a/icd/intel/pipeline.h
+++ b/icd/intel/pipeline.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -93,12 +93,12 @@
     uint32_t slot_count;
 };
 
-#define SHADER_VERTEX_FLAG            (1 << XGL_SHADER_STAGE_VERTEX)
-#define SHADER_TESS_CONTROL_FLAG      (1 << XGL_SHADER_STAGE_TESS_CONTROL)
-#define SHADER_TESS_EVAL_FLAG         (1 << XGL_SHADER_STAGE_TESS_EVALUATION)
-#define SHADER_GEOMETRY_FLAG          (1 << XGL_SHADER_STAGE_GEOMETRY)
-#define SHADER_FRAGMENT_FLAG          (1 << XGL_SHADER_STAGE_FRAGMENT)
-#define SHADER_COMPUTE_FLAG           (1 << XGL_SHADER_STAGE_COMPUTE)
+#define SHADER_VERTEX_FLAG            (1 << VK_SHADER_STAGE_VERTEX)
+#define SHADER_TESS_CONTROL_FLAG      (1 << VK_SHADER_STAGE_TESS_CONTROL)
+#define SHADER_TESS_EVAL_FLAG         (1 << VK_SHADER_STAGE_TESS_EVALUATION)
+#define SHADER_GEOMETRY_FLAG          (1 << VK_SHADER_STAGE_GEOMETRY)
+#define SHADER_FRAGMENT_FLAG          (1 << VK_SHADER_STAGE_FRAGMENT)
+#define SHADER_COMPUTE_FLAG           (1 << VK_SHADER_STAGE_COMPUTE)
 
 struct intel_pipeline_shader {
     /* this is not an intel_obj */
@@ -110,7 +110,7 @@
      * must grab everything we need from shader object as that
      * can go away after the pipeline is created
      */
-    XGL_FLAGS uses;
+    VK_FLAGS uses;
     uint64_t inputs_read;
     uint64_t outputs_written;
     uint32_t outputs_offset;
@@ -132,10 +132,10 @@
     /* If present, where does the SIMD16 kernel start? */
     uint32_t offset_16;
 
-    XGL_FLAGS barycentric_interps;
-    XGL_FLAGS point_sprite_enables;
+    VK_FLAGS barycentric_interps;
+    VK_FLAGS point_sprite_enables;
 
-    XGL_GPU_SIZE per_thread_scratch_size;
+    VK_GPU_SIZE per_thread_scratch_size;
 
     enum intel_computed_depth_mode computed_depth_mode;
 
@@ -143,7 +143,7 @@
 
     /* these are set up by the driver */
     uint32_t max_threads;
-    XGL_GPU_SIZE scratch_offset;
+    VK_GPU_SIZE scratch_offset;
 };
 
 /*
@@ -173,11 +173,11 @@
 
     struct intel_dev *dev;
 
-    XGL_VERTEX_INPUT_BINDING_DESCRIPTION vb[INTEL_MAX_VERTEX_BINDING_COUNT];
+    VK_VERTEX_INPUT_BINDING_DESCRIPTION vb[INTEL_MAX_VERTEX_BINDING_COUNT];
     uint32_t vb_count;
 
-    /* XGL_PIPELINE_IA_STATE_CREATE_INFO */
-    XGL_PRIMITIVE_TOPOLOGY topology;
+    /* VK_PIPELINE_IA_STATE_CREATE_INFO */
+    VK_PRIMITIVE_TOPOLOGY topology;
     int prim_type;
     bool disable_vs_cache;
     bool primitive_restart;
@@ -187,20 +187,20 @@
     int provoking_vertex_trifan;
     int provoking_vertex_line;
 
-    // TODO: This should probably be Intel HW state, not XGL state.
+    // TODO: This should probably be Intel HW state, not VK state.
     /* Depth Buffer format */
-    XGL_FORMAT db_format;
+    VK_FORMAT db_format;
 
     bool depth_zero_to_one;
 
-    XGL_PIPELINE_CB_STATE_CREATE_INFO cb_state;
+    VK_PIPELINE_CB_STATE_CREATE_INFO cb_state;
 
-    // XGL_PIPELINE_RS_STATE_CREATE_INFO rs_state;
+    // VK_PIPELINE_RS_STATE_CREATE_INFO rs_state;
     bool depthClipEnable;
     bool rasterizerDiscardEnable;
     bool use_rs_point_size;
 
-    XGL_PIPELINE_TESS_STATE_CREATE_INFO tess_state;
+    VK_PIPELINE_TESS_STATE_CREATE_INFO tess_state;
 
     uint32_t active_shaders;
     struct intel_pipeline_shader vs;
@@ -209,7 +209,7 @@
     struct intel_pipeline_shader gs;
     struct intel_pipeline_shader fs;
     struct intel_pipeline_shader cs;
-    XGL_GPU_SIZE scratch_size;
+    VK_GPU_SIZE scratch_size;
 
     uint32_t wa_flags;
 
@@ -221,7 +221,7 @@
     /* The following are only partial HW commands that will need
      * more processing before sending to the HW
      */
-    // XGL_PIPELINE_DS_STATE_CREATE_INFO ds_state
+    // VK_PIPELINE_DS_STATE_CREATE_INFO ds_state
     bool stencilTestEnable;
     uint32_t cmd_depth_stencil;
     uint32_t cmd_depth_test;
@@ -236,7 +236,7 @@
     uint32_t cmd_3dstate_sbe[14];
 };
 
-static inline struct intel_pipeline *intel_pipeline(XGL_PIPELINE pipeline)
+static inline struct intel_pipeline *intel_pipeline(VK_PIPELINE pipeline)
 {
     return (struct intel_pipeline *) pipeline;
 }
diff --git a/icd/intel/query.c b/icd/intel/query.c
index 2c4700d..93e5de6 100644
--- a/icd/intel/query.c
+++ b/icd/intel/query.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -36,23 +36,23 @@
     intel_query_destroy(query);
 }
 
-static XGL_RESULT query_get_info(struct intel_base *base, int type,
+static VK_RESULT query_get_info(struct intel_base *base, int type,
                                  size_t *size, void *data)
 {
     struct intel_query *query = intel_query_from_base(base);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     switch (type) {
-    case XGL_INFO_TYPE_MEMORY_REQUIREMENTS:
+    case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
         {
-            XGL_MEMORY_REQUIREMENTS *mem_req = data;
+            VK_MEMORY_REQUIREMENTS *mem_req = data;
 
-            *size = sizeof(XGL_MEMORY_REQUIREMENTS);
+            *size = sizeof(VK_MEMORY_REQUIREMENTS);
             if (data == NULL)
                 return ret;
             mem_req->size = query->slot_stride * query->slot_count;
             mem_req->alignment = 64;
-            mem_req->memType =  XGL_MEMORY_TYPE_OTHER;
+            mem_req->memType =  VK_MEMORY_TYPE_OTHER;
         }
         break;
     default:
@@ -63,17 +63,17 @@
     return ret;
 }
 
-XGL_RESULT intel_query_create(struct intel_dev *dev,
-                              const XGL_QUERY_POOL_CREATE_INFO *info,
+VK_RESULT intel_query_create(struct intel_dev *dev,
+                              const VK_QUERY_POOL_CREATE_INFO *info,
                               struct intel_query **query_ret)
 {
     struct intel_query *query;
 
     query = (struct intel_query *) intel_base_create(&dev->base.handle,
-            sizeof(*query), dev->base.dbg, XGL_DBG_OBJECT_QUERY_POOL,
+            sizeof(*query), dev->base.dbg, VK_DBG_OBJECT_QUERY_POOL,
             info, 0);
     if (!query)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     query->type = info->queryType;
     query->slot_count = info->slots;
@@ -84,12 +84,12 @@
      * compare the differences to get the query results.
      */
     switch (info->queryType) {
-    case XGL_QUERY_OCCLUSION:
+    case VK_QUERY_OCCLUSION:
         query->slot_stride = u_align(sizeof(uint64_t) * 2, 64);
         break;
-    case XGL_QUERY_PIPELINE_STATISTICS:
+    case VK_QUERY_PIPELINE_STATISTICS:
         query->slot_stride =
-            u_align(sizeof(XGL_PIPELINE_STATISTICS_DATA) * 2, 64);
+            u_align(sizeof(VK_PIPELINE_STATISTICS_DATA) * 2, 64);
         break;
     default:
         break;
@@ -97,7 +97,7 @@
 
     if (!query->slot_stride) {
         intel_query_destroy(query);
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
     }
 
     query->obj.base.get_info = query_get_info;
@@ -105,7 +105,7 @@
 
     *query_ret = query;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_query_destroy(struct intel_query *query)
@@ -131,7 +131,7 @@
 static void
 query_process_pipeline_statistics(const struct intel_query *query,
                                   uint32_t count, const uint8_t *raw,
-                                  XGL_PIPELINE_STATISTICS_DATA *results)
+                                  VK_PIPELINE_STATISTICS_DATA *results)
 {
     const uint32_t num_regs = sizeof(results[0]) / sizeof(uint64_t);
     uint32_t i, j;
@@ -148,29 +148,29 @@
     }
 }
 
-XGL_RESULT intel_query_get_results(struct intel_query *query,
+VK_RESULT intel_query_get_results(struct intel_query *query,
                                    uint32_t slot_start, uint32_t slot_count,
                                    void *results)
 {
     const uint8_t *ptr;
 
     if (!query->obj.mem)
-        return XGL_ERROR_MEMORY_NOT_BOUND;
+        return VK_ERROR_MEMORY_NOT_BOUND;
 
     if (intel_mem_is_busy(query->obj.mem))
-        return XGL_NOT_READY;
+        return VK_NOT_READY;
 
     ptr = (const uint8_t *) intel_mem_map_sync(query->obj.mem, false);
     if (!ptr)
-        return XGL_ERROR_MEMORY_MAP_FAILED;
+        return VK_ERROR_MEMORY_MAP_FAILED;
 
     ptr += query->obj.offset + query->slot_stride * slot_start;
 
     switch (query->type) {
-    case XGL_QUERY_OCCLUSION:
+    case VK_QUERY_OCCLUSION:
         query_process_occlusion(query, slot_count, ptr, results);
         break;
-    case XGL_QUERY_PIPELINE_STATISTICS:
+    case VK_QUERY_PIPELINE_STATISTICS:
         query_process_pipeline_statistics(query, slot_count, ptr, results);
         break;
     default:
@@ -180,13 +180,13 @@
 
     intel_mem_unmap(query->obj.mem);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateQueryPool(
-    XGL_DEVICE                                  device,
-    const XGL_QUERY_POOL_CREATE_INFO*           pCreateInfo,
-    XGL_QUERY_POOL*                             pQueryPool)
+ICD_EXPORT VK_RESULT VKAPI vkCreateQueryPool(
+    VK_DEVICE                                  device,
+    const VK_QUERY_POOL_CREATE_INFO*           pCreateInfo,
+    VK_QUERY_POOL*                             pQueryPool)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -194,8 +194,8 @@
             (struct intel_query **) pQueryPool);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetQueryPoolResults(
-    XGL_QUERY_POOL                              queryPool,
+ICD_EXPORT VK_RESULT VKAPI vkGetQueryPoolResults(
+    VK_QUERY_POOL                              queryPool,
     uint32_t                                    startQuery,
     uint32_t                                    queryCount,
     size_t*                                     pDataSize,
@@ -204,19 +204,19 @@
     struct intel_query *query = intel_query(queryPool);
 
     switch (query->type) {
-    case XGL_QUERY_OCCLUSION:
+    case VK_QUERY_OCCLUSION:
         *pDataSize = sizeof(uint64_t) * queryCount;
         break;
-    case XGL_QUERY_PIPELINE_STATISTICS:
-        *pDataSize = sizeof(XGL_PIPELINE_STATISTICS_DATA) * queryCount;
+    case VK_QUERY_PIPELINE_STATISTICS:
+        *pDataSize = sizeof(VK_PIPELINE_STATISTICS_DATA) * queryCount;
         break;
     default:
-        return XGL_ERROR_INVALID_HANDLE;
+        return VK_ERROR_INVALID_HANDLE;
         break;
     }
 
     if (pData)
         return intel_query_get_results(query, startQuery, queryCount, pData);
     else
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 }
diff --git a/icd/intel/query.h b/icd/intel/query.h
index dc94101..4e0f7c7 100644
--- a/icd/intel/query.h
+++ b/icd/intel/query.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -34,12 +34,12 @@
 struct intel_query {
     struct intel_obj obj;
 
-    XGL_QUERY_TYPE type;
+    VK_QUERY_TYPE type;
     uint32_t slot_stride;
     uint32_t slot_count;
 };
 
-static inline struct intel_query *intel_query(XGL_QUERY_POOL pool)
+static inline struct intel_query *intel_query(VK_QUERY_POOL pool)
 {
     return (struct intel_query *) pool;
 }
@@ -54,12 +54,12 @@
     return intel_query_from_base(&obj->base);
 }
 
-XGL_RESULT intel_query_create(struct intel_dev *dev,
-                              const XGL_QUERY_POOL_CREATE_INFO *info,
+VK_RESULT intel_query_create(struct intel_dev *dev,
+                              const VK_QUERY_POOL_CREATE_INFO *info,
                               struct intel_query **query_ret);
 void intel_query_destroy(struct intel_query *query);
 
-XGL_RESULT intel_query_get_results(struct intel_query *query,
+VK_RESULT intel_query_get_results(struct intel_query *query,
                                    uint32_t slot_start, uint32_t slot_count,
                                    void *results);
 
diff --git a/icd/intel/queue.c b/icd/intel/queue.c
index 892c40c..fe67fa6 100644
--- a/icd/intel/queue.c
+++ b/icd/intel/queue.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -39,15 +39,15 @@
 {
     intel_cmd_decode(cmd, true);
 
-    intel_dev_log(queue->dev, XGL_DBG_MSG_ERROR,
-            XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
+    intel_dev_log(queue->dev, VK_DBG_MSG_ERROR,
+            VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
             "GPU hanged with %d/%d active/pending command buffers lost",
             active_lost, pending_lost);
 }
 
-static XGL_RESULT queue_submit_bo(struct intel_queue *queue,
+static VK_RESULT queue_submit_bo(struct intel_queue *queue,
                                   struct intel_bo *bo,
-                                  XGL_GPU_SIZE used)
+                                  VK_GPU_SIZE used)
 {
     struct intel_winsys *winsys = queue->dev->winsys;
     int err;
@@ -57,11 +57,11 @@
     else
         err = intel_winsys_submit_bo(winsys, queue->ring, bo, used, 0);
 
-    return (err) ? XGL_ERROR_UNKNOWN : XGL_SUCCESS;
+    return (err) ? VK_ERROR_UNKNOWN : VK_SUCCESS;
 }
 
 static struct intel_bo *queue_create_bo(struct intel_queue *queue,
-                                        XGL_GPU_SIZE size,
+                                        VK_GPU_SIZE size,
                                         const void *cmd,
                                         size_t cmd_len)
 {
@@ -88,7 +88,7 @@
     return bo;
 }
 
-static XGL_RESULT queue_select_pipeline(struct intel_queue *queue,
+static VK_RESULT queue_select_pipeline(struct intel_queue *queue,
                                         int pipeline_select)
 {
     uint32_t pipeline_select_cmd[] = {
@@ -96,11 +96,11 @@
         GEN6_MI_CMD(MI_BATCH_BUFFER_END),
     };
     struct intel_bo *bo;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     if (queue->ring != INTEL_RING_RENDER ||
         queue->last_pipeline_select == pipeline_select)
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 
     switch (pipeline_select) {
     case GEN6_PIPELINE_SELECT_DW0_SELECT_3D:
@@ -110,7 +110,7 @@
         bo = queue->select_compute_bo;
         break;
     default:
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
         break;
     }
 
@@ -119,7 +119,7 @@
         bo = queue_create_bo(queue, sizeof(pipeline_select_cmd),
                 pipeline_select_cmd, sizeof(pipeline_select_cmd));
         if (!bo)
-            return XGL_ERROR_OUT_OF_GPU_MEMORY;
+            return VK_ERROR_OUT_OF_GPU_MEMORY;
 
         switch (pipeline_select) {
         case GEN6_PIPELINE_SELECT_DW0_SELECT_3D:
@@ -134,13 +134,13 @@
     }
 
     ret = queue_submit_bo(queue, bo, sizeof(pipeline_select_cmd));
-    if (ret == XGL_SUCCESS)
+    if (ret == VK_SUCCESS)
         queue->last_pipeline_select = pipeline_select;
 
     return ret;
 }
 
-static XGL_RESULT queue_init_hw_and_atomic_bo(struct intel_queue *queue)
+static VK_RESULT queue_init_hw_and_atomic_bo(struct intel_queue *queue)
 {
     const uint32_t ctx_init_cmd[] = {
         /* STATE_SIP */
@@ -156,24 +156,24 @@
         GEN6_MI_CMD(MI_NOOP),
     };
     struct intel_bo *bo;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     if (queue->ring != INTEL_RING_RENDER) {
         queue->last_pipeline_select = -1;
         queue->atomic_bo = queue_create_bo(queue,
                 sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
                 NULL, 0);
-        return (queue->atomic_bo) ? XGL_SUCCESS : XGL_ERROR_OUT_OF_GPU_MEMORY;
+        return (queue->atomic_bo) ? VK_SUCCESS : VK_ERROR_OUT_OF_GPU_MEMORY;
     }
 
     bo = queue_create_bo(queue,
             sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
             ctx_init_cmd, sizeof(ctx_init_cmd));
     if (!bo)
-        return XGL_ERROR_OUT_OF_GPU_MEMORY;
+        return VK_ERROR_OUT_OF_GPU_MEMORY;
 
     ret = queue_submit_bo(queue, bo, sizeof(ctx_init_cmd));
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_bo_unref(bo);
         return ret;
     }
@@ -182,15 +182,15 @@
     /* reuse */
     queue->atomic_bo = bo;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT queue_submit_cmd_prepare(struct intel_queue *queue,
+static VK_RESULT queue_submit_cmd_prepare(struct intel_queue *queue,
                                            struct intel_cmd *cmd)
 {
-    if (unlikely(cmd->result != XGL_SUCCESS)) {
-        intel_dev_log(cmd->dev, XGL_DBG_MSG_ERROR,
-                XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
+    if (unlikely(cmd->result != VK_SUCCESS)) {
+        intel_dev_log(cmd->dev, VK_DBG_MSG_ERROR,
+                VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
                 "invalid command buffer submitted");
         return cmd->result;
     }
@@ -198,16 +198,16 @@
     return queue_select_pipeline(queue, cmd->pipeline_select);
 }
 
-static XGL_RESULT queue_submit_cmd_debug(struct intel_queue *queue,
+static VK_RESULT queue_submit_cmd_debug(struct intel_queue *queue,
                                          struct intel_cmd *cmd)
 {
     uint32_t active[2], pending[2];
     struct intel_bo *bo;
-    XGL_GPU_SIZE used;
-    XGL_RESULT ret;
+    VK_GPU_SIZE used;
+    VK_RESULT ret;
 
     ret = queue_submit_cmd_prepare(queue, cmd);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     if (intel_debug & INTEL_DEBUG_HANG) {
@@ -217,7 +217,7 @@
 
     bo = intel_cmd_get_batch(cmd, &used);
     ret = queue_submit_bo(queue, bo, used);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     if (intel_debug & INTEL_DEBUG_HANG) {
@@ -234,18 +234,18 @@
     if (intel_debug & INTEL_DEBUG_BATCH)
         intel_cmd_decode(cmd, false);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT queue_submit_cmd(struct intel_queue *queue,
+static VK_RESULT queue_submit_cmd(struct intel_queue *queue,
                                    struct intel_cmd *cmd)
 {
     struct intel_bo *bo;
-    XGL_GPU_SIZE used;
-    XGL_RESULT ret;
+    VK_GPU_SIZE used;
+    VK_RESULT ret;
 
     ret = queue_submit_cmd_prepare(queue, cmd);
-    if (ret == XGL_SUCCESS) {
+    if (ret == VK_SUCCESS) {
         bo = intel_cmd_get_batch(cmd, &used);
         ret = queue_submit_bo(queue, bo, used);
     }
@@ -253,48 +253,48 @@
     return ret;
 }
 
-XGL_RESULT intel_queue_create(struct intel_dev *dev,
+VK_RESULT intel_queue_create(struct intel_dev *dev,
                               enum intel_gpu_engine_type engine,
                               struct intel_queue **queue_ret)
 {
     struct intel_queue *queue;
     enum intel_ring_type ring;
-    XGL_FENCE_CREATE_INFO fence_info;
-    XGL_RESULT ret;
+    VK_FENCE_CREATE_INFO fence_info;
+    VK_RESULT ret;
 
     switch (engine) {
     case INTEL_GPU_ENGINE_3D:
         ring = INTEL_RING_RENDER;
         break;
     default:
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
         break;
     }
 
     queue = (struct intel_queue *) intel_base_create(&dev->base.handle,
-            sizeof(*queue), dev->base.dbg, XGL_DBG_OBJECT_QUEUE, NULL, 0);
+            sizeof(*queue), dev->base.dbg, VK_DBG_OBJECT_QUEUE, NULL, 0);
     if (!queue)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     queue->dev = dev;
     queue->ring = ring;
 
-    if (queue_init_hw_and_atomic_bo(queue) != XGL_SUCCESS) {
+    if (queue_init_hw_and_atomic_bo(queue) != VK_SUCCESS) {
         intel_queue_destroy(queue);
-        return XGL_ERROR_INITIALIZATION_FAILED;
+        return VK_ERROR_INITIALIZATION_FAILED;
     }
 
     memset(&fence_info, 0, sizeof(fence_info));
-    fence_info.sType = XGL_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+    fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
     ret = intel_fence_create(dev, &fence_info, &queue->fence);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_queue_destroy(queue);
         return ret;
     }
 
     *queue_ret = queue;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_queue_destroy(struct intel_queue *queue)
@@ -309,53 +309,53 @@
     intel_base_destroy(&queue->base);
 }
 
-XGL_RESULT intel_queue_wait(struct intel_queue *queue, int64_t timeout)
+VK_RESULT intel_queue_wait(struct intel_queue *queue, int64_t timeout)
 {
-    /* return XGL_SUCCESS instead of XGL_ERROR_UNAVAILABLE */
+    /* return VK_SUCCESS instead of VK_ERROR_UNAVAILABLE */
     if (!queue->fence->seqno_bo)
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 
     return intel_fence_wait(queue->fence, timeout);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglQueueAddMemReference(
-    XGL_QUEUE                                   queue,
-    XGL_GPU_MEMORY                              mem)
+ICD_EXPORT VK_RESULT VKAPI vkQueueAddMemReference(
+    VK_QUEUE                                   queue,
+    VK_GPU_MEMORY                              mem)
 {
     /*
      * The winsys maintains the list of memory references.  These are ignored
      * until we move away from the winsys.
      */
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglQueueRemoveMemReference(
-    XGL_QUEUE                                   queue,
-    XGL_GPU_MEMORY                              mem)
+ICD_EXPORT VK_RESULT VKAPI vkQueueRemoveMemReference(
+    VK_QUEUE                                   queue,
+    VK_GPU_MEMORY                              mem)
 {
     /*
      * The winsys maintains the list of memory references.  These are ignored
      * until we move away from the winsys.
      */
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglQueueWaitIdle(
-    XGL_QUEUE                                   queue_)
+ICD_EXPORT VK_RESULT VKAPI vkQueueWaitIdle(
+    VK_QUEUE                                   queue_)
 {
     struct intel_queue *queue = intel_queue(queue_);
 
     return intel_queue_wait(queue, -1);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglQueueSubmit(
-    XGL_QUEUE                                   queue_,
+ICD_EXPORT VK_RESULT VKAPI vkQueueSubmit(
+    VK_QUEUE                                   queue_,
     uint32_t                                    cmdBufferCount,
-    const XGL_CMD_BUFFER*                       pCmdBuffers,
-    XGL_FENCE                                   fence_)
+    const VK_CMD_BUFFER*                       pCmdBuffers,
+    VK_FENCE                                   fence_)
 {
     struct intel_queue *queue = intel_queue(queue_);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
     struct intel_cmd *last_cmd;
     uint32_t i;
 
@@ -363,14 +363,14 @@
         for (i = 0; i < cmdBufferCount; i++) {
             struct intel_cmd *cmd = intel_cmd(pCmdBuffers[i]);
             ret = queue_submit_cmd_debug(queue, cmd);
-            if (ret != XGL_SUCCESS)
+            if (ret != VK_SUCCESS)
                 break;
         }
     } else {
         for (i = 0; i < cmdBufferCount; i++) {
             struct intel_cmd *cmd = intel_cmd(pCmdBuffers[i]);
             ret = queue_submit_cmd(queue, cmd);
-            if (ret != XGL_SUCCESS)
+            if (ret != VK_SUCCESS)
                 break;
         }
     }
@@ -381,11 +381,11 @@
 
     last_cmd = intel_cmd(pCmdBuffers[i - 1]);
 
-    if (ret == XGL_SUCCESS) {
+    if (ret == VK_SUCCESS) {
         intel_fence_set_seqno(queue->fence,
                 intel_bo_ref(intel_cmd_get_batch(last_cmd, NULL)));
 
-        if (fence_ != XGL_NULL_HANDLE) {
+        if (fence_ != VK_NULL_HANDLE) {
             struct intel_fence *fence = intel_fence(fence_);
             intel_fence_copy(fence, queue->fence);
         }
@@ -400,18 +400,18 @@
     return ret;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglOpenSharedSemaphore(
-    XGL_DEVICE                                  device,
-    const XGL_SEMAPHORE_OPEN_INFO*              pOpenInfo,
-    XGL_SEMAPHORE*                              pSemaphore)
+ICD_EXPORT VK_RESULT VKAPI vkOpenSharedSemaphore(
+    VK_DEVICE                                  device,
+    const VK_SEMAPHORE_OPEN_INFO*              pOpenInfo,
+    VK_SEMAPHORE*                              pSemaphore)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateSemaphore(
-    XGL_DEVICE                                  device,
-    const XGL_SEMAPHORE_CREATE_INFO*            pCreateInfo,
-    XGL_SEMAPHORE*                              pSemaphore)
+ICD_EXPORT VK_RESULT VKAPI vkCreateSemaphore(
+    VK_DEVICE                                  device,
+    const VK_SEMAPHORE_CREATE_INFO*            pCreateInfo,
+    VK_SEMAPHORE*                              pSemaphore)
 {
     /*
      * We want to find an unused semaphore register and initialize it.  Signal
@@ -421,19 +421,19 @@
      *
      * XXX However, MI_SEMAPHORE_MBOX does not seem to have the flexibility.
      */
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglQueueSignalSemaphore(
-    XGL_QUEUE                                   queue,
-    XGL_SEMAPHORE                               semaphore)
+ICD_EXPORT VK_RESULT VKAPI vkQueueSignalSemaphore(
+    VK_QUEUE                                   queue,
+    VK_SEMAPHORE                               semaphore)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglQueueWaitSemaphore(
-    XGL_QUEUE                                   queue,
-    XGL_SEMAPHORE                               semaphore)
+ICD_EXPORT VK_RESULT VKAPI vkQueueWaitSemaphore(
+    VK_QUEUE                                   queue,
+    VK_SEMAPHORE                               semaphore)
 {
-    return XGL_ERROR_UNAVAILABLE;
+    return VK_ERROR_UNAVAILABLE;
 }
diff --git a/icd/intel/queue.h b/icd/intel/queue.h
index 1f07740..3ca6e78 100644
--- a/icd/intel/queue.h
+++ b/icd/intel/queue.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -53,16 +53,16 @@
     struct intel_fence *fence;
 };
 
-static inline struct intel_queue *intel_queue(XGL_QUEUE queue)
+static inline struct intel_queue *intel_queue(VK_QUEUE queue)
 {
     return (struct intel_queue *) queue;
 }
 
-XGL_RESULT intel_queue_create(struct intel_dev *dev,
+VK_RESULT intel_queue_create(struct intel_dev *dev,
                               enum intel_gpu_engine_type engine,
                               struct intel_queue **queue_ret);
 void intel_queue_destroy(struct intel_queue *queue);
 
-XGL_RESULT intel_queue_wait(struct intel_queue *queue, int64_t timeout);
+VK_RESULT intel_queue_wait(struct intel_queue *queue, int64_t timeout);
 
 #endif /* QUEUE_H */
diff --git a/icd/intel/sampler.c b/icd/intel/sampler.c
index 34d08e3..57ebdad 100644
--- a/icd/intel/sampler.c
+++ b/icd/intel/sampler.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -32,77 +32,77 @@
 /**
  * Translate a pipe texture filter to the matching hardware mapfilter.
  */
-static int translate_tex_filter(XGL_TEX_FILTER filter)
+static int translate_tex_filter(VK_TEX_FILTER filter)
 {
    switch (filter) {
-   case XGL_TEX_FILTER_NEAREST: return GEN6_MAPFILTER_NEAREST;
-   case XGL_TEX_FILTER_LINEAR:  return GEN6_MAPFILTER_LINEAR;
+   case VK_TEX_FILTER_NEAREST: return GEN6_MAPFILTER_NEAREST;
+   case VK_TEX_FILTER_LINEAR:  return GEN6_MAPFILTER_LINEAR;
    default:
       assert(!"unknown tex filter");
       return GEN6_MAPFILTER_NEAREST;
    }
 }
 
-static int translate_tex_mipmap_mode(XGL_TEX_MIPMAP_MODE mode)
+static int translate_tex_mipmap_mode(VK_TEX_MIPMAP_MODE mode)
 {
    switch (mode) {
-   case XGL_TEX_MIPMAP_NEAREST: return GEN6_MIPFILTER_NEAREST;
-   case XGL_TEX_MIPMAP_LINEAR:  return GEN6_MIPFILTER_LINEAR;
-   case XGL_TEX_MIPMAP_BASE:    return GEN6_MIPFILTER_NONE;
+   case VK_TEX_MIPMAP_NEAREST: return GEN6_MIPFILTER_NEAREST;
+   case VK_TEX_MIPMAP_LINEAR:  return GEN6_MIPFILTER_LINEAR;
+   case VK_TEX_MIPMAP_BASE:    return GEN6_MIPFILTER_NONE;
    default:
       assert(!"unknown tex mipmap mode");
       return GEN6_MIPFILTER_NONE;
    }
 }
 
-static int translate_tex_addr(XGL_TEX_ADDRESS addr)
+static int translate_tex_addr(VK_TEX_ADDRESS addr)
 {
    switch (addr) {
-   case XGL_TEX_ADDRESS_WRAP:         return GEN6_TEXCOORDMODE_WRAP;
-   case XGL_TEX_ADDRESS_MIRROR:       return GEN6_TEXCOORDMODE_MIRROR;
-   case XGL_TEX_ADDRESS_CLAMP:        return GEN6_TEXCOORDMODE_CLAMP;
-   case XGL_TEX_ADDRESS_MIRROR_ONCE:  return GEN6_TEXCOORDMODE_MIRROR_ONCE;
-   case XGL_TEX_ADDRESS_CLAMP_BORDER: return GEN6_TEXCOORDMODE_CLAMP_BORDER;
+   case VK_TEX_ADDRESS_WRAP:         return GEN6_TEXCOORDMODE_WRAP;
+   case VK_TEX_ADDRESS_MIRROR:       return GEN6_TEXCOORDMODE_MIRROR;
+   case VK_TEX_ADDRESS_CLAMP:        return GEN6_TEXCOORDMODE_CLAMP;
+   case VK_TEX_ADDRESS_MIRROR_ONCE:  return GEN6_TEXCOORDMODE_MIRROR_ONCE;
+   case VK_TEX_ADDRESS_CLAMP_BORDER: return GEN6_TEXCOORDMODE_CLAMP_BORDER;
    default:
       assert(!"unknown tex address");
       return GEN6_TEXCOORDMODE_WRAP;
    }
 }
 
-static int translate_compare_func(XGL_COMPARE_FUNC func)
+static int translate_compare_func(VK_COMPARE_FUNC func)
 {
     switch (func) {
-    case XGL_COMPARE_NEVER:         return GEN6_COMPAREFUNCTION_NEVER;
-    case XGL_COMPARE_LESS:          return GEN6_COMPAREFUNCTION_LESS;
-    case XGL_COMPARE_EQUAL:         return GEN6_COMPAREFUNCTION_EQUAL;
-    case XGL_COMPARE_LESS_EQUAL:    return GEN6_COMPAREFUNCTION_LEQUAL;
-    case XGL_COMPARE_GREATER:       return GEN6_COMPAREFUNCTION_GREATER;
-    case XGL_COMPARE_NOT_EQUAL:     return GEN6_COMPAREFUNCTION_NOTEQUAL;
-    case XGL_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
-    case XGL_COMPARE_ALWAYS:        return GEN6_COMPAREFUNCTION_ALWAYS;
+    case VK_COMPARE_NEVER:         return GEN6_COMPAREFUNCTION_NEVER;
+    case VK_COMPARE_LESS:          return GEN6_COMPAREFUNCTION_LESS;
+    case VK_COMPARE_EQUAL:         return GEN6_COMPAREFUNCTION_EQUAL;
+    case VK_COMPARE_LESS_EQUAL:    return GEN6_COMPAREFUNCTION_LEQUAL;
+    case VK_COMPARE_GREATER:       return GEN6_COMPAREFUNCTION_GREATER;
+    case VK_COMPARE_NOT_EQUAL:     return GEN6_COMPAREFUNCTION_NOTEQUAL;
+    case VK_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
+    case VK_COMPARE_ALWAYS:        return GEN6_COMPAREFUNCTION_ALWAYS;
     default:
       assert(!"unknown compare_func");
       return GEN6_COMPAREFUNCTION_NEVER;
     }
 }
 
-static void translate_border_color(XGL_BORDER_COLOR_TYPE type, float rgba[4])
+static void translate_border_color(VK_BORDER_COLOR_TYPE type, float rgba[4])
 {
     switch (type) {
-    case XGL_BORDER_COLOR_OPAQUE_WHITE:
+    case VK_BORDER_COLOR_OPAQUE_WHITE:
         rgba[0] = 1.0;
         rgba[1] = 1.0;
         rgba[2] = 1.0;
         rgba[3] = 1.0;
         break;
-    case XGL_BORDER_COLOR_TRANSPARENT_BLACK:
+    case VK_BORDER_COLOR_TRANSPARENT_BLACK:
     default:
         rgba[0] = 0.0;
         rgba[1] = 0.0;
         rgba[2] = 0.0;
         rgba[3] = 0.0;
         break;
-    case XGL_BORDER_COLOR_OPAQUE_BLACK:
+    case VK_BORDER_COLOR_OPAQUE_BLACK:
         rgba[0] = 0.0;
         rgba[1] = 0.0;
         rgba[2] = 0.0;
@@ -177,7 +177,7 @@
 static void
 sampler_init(struct intel_sampler *sampler,
              const struct intel_gpu *gpu,
-             const XGL_SAMPLER_CREATE_INFO *info)
+             const VK_SAMPLER_CREATE_INFO *info)
 {
    int mip_filter, min_filter, mag_filter, max_aniso;
    int lod_bias, max_lod, min_lod;
@@ -253,7 +253,7 @@
     * To achieve our goal, we just need to set MinLod to zero and set
     * MagFilter to MinFilter when mipmapping is disabled.
     */
-   if (info->mipMode == XGL_TEX_MIPMAP_BASE && min_lod) {
+   if (info->mipMode == VK_TEX_MIPMAP_BASE && min_lod) {
       min_lod = 0;
       mag_filter = min_filter;
    }
@@ -361,16 +361,16 @@
     intel_sampler_destroy(sampler);
 }
 
-XGL_RESULT intel_sampler_create(struct intel_dev *dev,
-                                const XGL_SAMPLER_CREATE_INFO *info,
+VK_RESULT intel_sampler_create(struct intel_dev *dev,
+                                const VK_SAMPLER_CREATE_INFO *info,
                                 struct intel_sampler **sampler_ret)
 {
     struct intel_sampler *sampler;
 
     sampler = (struct intel_sampler *) intel_base_create(&dev->base.handle,
-            sizeof(*sampler), dev->base.dbg, XGL_DBG_OBJECT_SAMPLER, info, 0);
+            sizeof(*sampler), dev->base.dbg, VK_DBG_OBJECT_SAMPLER, info, 0);
     if (!sampler)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     sampler->obj.destroy = sampler_destroy;
 
@@ -378,7 +378,7 @@
 
     *sampler_ret = sampler;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_sampler_destroy(struct intel_sampler *sampler)
@@ -386,10 +386,10 @@
     intel_base_destroy(&sampler->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateSampler(
-    XGL_DEVICE                                  device,
-    const XGL_SAMPLER_CREATE_INFO*              pCreateInfo,
-    XGL_SAMPLER*                                pSampler)
+ICD_EXPORT VK_RESULT VKAPI vkCreateSampler(
+    VK_DEVICE                                  device,
+    const VK_SAMPLER_CREATE_INFO*              pCreateInfo,
+    VK_SAMPLER*                                pSampler)
 {
     struct intel_dev *dev = intel_dev(device);
 
diff --git a/icd/intel/sampler.h b/icd/intel/sampler.h
index f01283e..4b77843 100644
--- a/icd/intel/sampler.h
+++ b/icd/intel/sampler.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -41,7 +41,7 @@
     uint32_t cmd[15];
 };
 
-static inline struct intel_sampler *intel_sampler(XGL_SAMPLER sampler)
+static inline struct intel_sampler *intel_sampler(VK_SAMPLER sampler)
 {
     return (struct intel_sampler *) sampler;
 }
@@ -51,8 +51,8 @@
     return (struct intel_sampler *) obj;
 }
 
-XGL_RESULT intel_sampler_create(struct intel_dev *dev,
-                                const XGL_SAMPLER_CREATE_INFO *info,
+VK_RESULT intel_sampler_create(struct intel_dev *dev,
+                                const VK_SAMPLER_CREATE_INFO *info,
                                 struct intel_sampler **sampler_ret);
 void intel_sampler_destroy(struct intel_sampler *sampler);
 
diff --git a/icd/intel/shader.c b/icd/intel/shader.c
index 232fb31..c8070c3 100644
--- a/icd/intel/shader.c
+++ b/icd/intel/shader.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -39,8 +39,8 @@
     intel_base_destroy(&sh->obj.base);
 }
 
-static XGL_RESULT shader_create(struct intel_dev *dev,
-                                const XGL_SHADER_CREATE_INFO *info,
+static VK_RESULT shader_create(struct intel_dev *dev,
+                                const VK_SHADER_CREATE_INFO *info,
                                 struct intel_shader **sh_ret)
 {
     const struct icd_spv_header *spv =
@@ -48,32 +48,32 @@
     struct intel_shader *sh;
 
     sh = (struct intel_shader *) intel_base_create(&dev->base.handle,
-            sizeof(*sh), dev->base.dbg, XGL_DBG_OBJECT_SHADER, info, 0);
+            sizeof(*sh), dev->base.dbg, VK_DBG_OBJECT_SHADER, info, 0);
     if (!sh)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     if (info->codeSize < sizeof(*spv))
-        return XGL_ERROR_INVALID_MEMORY_SIZE;
+        return VK_ERROR_INVALID_MEMORY_SIZE;
     if (spv->magic != ICD_SPV_MAGIC)
-        return XGL_ERROR_BAD_SHADER_CODE;
+        return VK_ERROR_BAD_SHADER_CODE;
 
     sh->ir = shader_create_ir(dev->gpu, info->pCode, info->codeSize);
     if (!sh->ir) {
         shader_destroy(&sh->obj);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
     sh->obj.destroy = shader_destroy;
 
     *sh_ret = sh;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateShader(
-        XGL_DEVICE                                  device,
-        const XGL_SHADER_CREATE_INFO*               pCreateInfo,
-        XGL_SHADER*                                 pShader)
+ICD_EXPORT VK_RESULT VKAPI vkCreateShader(
+        VK_DEVICE                                  device,
+        const VK_SHADER_CREATE_INFO*               pCreateInfo,
+        VK_SHADER*                                 pShader)
 {
     struct intel_dev *dev = intel_dev(device);
 
diff --git a/icd/intel/shader.h b/icd/intel/shader.h
index d243b32..382f25c 100644
--- a/icd/intel/shader.h
+++ b/icd/intel/shader.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -40,7 +40,7 @@
     struct intel_ir *ir;
 };
 
-static inline struct intel_shader *intel_shader(XGL_SHADER shader)
+static inline struct intel_shader *intel_shader(VK_SHADER shader)
 {
     return (struct intel_shader *) shader;
 }
diff --git a/icd/intel/state.c b/icd/intel/state.c
index 17c3526..105220a 100644
--- a/icd/intel/state.c
+++ b/icd/intel/state.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -89,10 +89,10 @@
    *max_gby = (float) (center_y + half_len);
 }
 
-static XGL_RESULT
+static VK_RESULT
 viewport_state_alloc_cmd(struct intel_dynamic_vp *state,
                          const struct intel_gpu *gpu,
-                         const XGL_DYNAMIC_VP_STATE_CREATE_INFO *info)
+                         const VK_DYNAMIC_VP_STATE_CREATE_INFO *info)
 {
     INTEL_GPU_ASSERT(gpu, 6, 7.5);
 
@@ -118,28 +118,28 @@
     state->cmd_len += 2 * info->viewportAndScissorCount;
 
     state->cmd = intel_alloc(state, sizeof(uint32_t) * state->cmd_len,
-            0, XGL_SYSTEM_ALLOC_INTERNAL);
+            0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!state->cmd)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT
+static VK_RESULT
 viewport_state_init(struct intel_dynamic_vp *state,
                     const struct intel_gpu *gpu,
-                    const XGL_DYNAMIC_VP_STATE_CREATE_INFO *info)
+                    const VK_DYNAMIC_VP_STATE_CREATE_INFO *info)
 {
     const uint32_t sf_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 8;
     const uint32_t clip_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 4;
     uint32_t *sf_viewport, *clip_viewport, *cc_viewport, *scissor_rect;
     uint32_t i;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     INTEL_GPU_ASSERT(gpu, 6, 7.5);
 
     ret = viewport_state_alloc_cmd(state, gpu, info);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     sf_viewport = state->cmd;
@@ -148,7 +148,7 @@
     scissor_rect = state->cmd + state->cmd_scissor_rect_pos;
 
     for (i = 0; i < info->viewportAndScissorCount; i++) {
-        const XGL_VIEWPORT *viewport = &info->pViewports[i];
+        const VK_VIEWPORT *viewport = &info->pViewports[i];
         uint32_t *dw = NULL;
         float translate[3], scale[3];
         int min_gbx, max_gbx, min_gby, max_gby;
@@ -191,7 +191,7 @@
     }
 
     for (i = 0; i < info->viewportAndScissorCount; i++) {
-        const XGL_RECT *scissor = &info->pScissors[i];
+        const VK_RECT *scissor = &info->pScissors[i];
         /* SCISSOR_RECT */
         int16_t max_x, max_y;
         uint32_t *dw = NULL;
@@ -211,7 +211,7 @@
         scissor_rect += 2;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void viewport_state_destroy(struct intel_obj *obj)
@@ -221,30 +221,30 @@
     intel_viewport_state_destroy(state);
 }
 
-XGL_RESULT intel_viewport_state_create(struct intel_dev *dev,
-                                       const XGL_DYNAMIC_VP_STATE_CREATE_INFO *info,
+VK_RESULT intel_viewport_state_create(struct intel_dev *dev,
+                                       const VK_DYNAMIC_VP_STATE_CREATE_INFO *info,
                                        struct intel_dynamic_vp **state_ret)
 {
     struct intel_dynamic_vp *state;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     state = (struct intel_dynamic_vp *) intel_base_create(&dev->base.handle,
-            sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_VIEWPORT_STATE,
+            sizeof(*state), dev->base.dbg, VK_DBG_OBJECT_VIEWPORT_STATE,
             info, 0);
     if (!state)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     state->obj.destroy = viewport_state_destroy;
 
     ret = viewport_state_init(state, dev->gpu, info);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_viewport_state_destroy(state);
         return ret;
     }
 
     *state_ret = state;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_viewport_state_destroy(struct intel_dynamic_vp *state)
@@ -260,24 +260,24 @@
     intel_raster_state_destroy(state);
 }
 
-XGL_RESULT intel_raster_state_create(struct intel_dev *dev,
-                                     const XGL_DYNAMIC_RS_STATE_CREATE_INFO *info,
+VK_RESULT intel_raster_state_create(struct intel_dev *dev,
+                                     const VK_DYNAMIC_RS_STATE_CREATE_INFO *info,
                                      struct intel_dynamic_rs **state_ret)
 {
     struct intel_dynamic_rs *state;
 
     state = (struct intel_dynamic_rs *) intel_base_create(&dev->base.handle,
-            sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_RASTER_STATE,
+            sizeof(*state), dev->base.dbg, VK_DBG_OBJECT_RASTER_STATE,
             info, 0);
     if (!state)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     state->obj.destroy = raster_state_destroy;
     state->rs_info = *info;
 
     *state_ret = state;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_raster_state_destroy(struct intel_dynamic_rs *state)
@@ -292,24 +292,24 @@
     intel_blend_state_destroy(state);
 }
 
-XGL_RESULT intel_blend_state_create(struct intel_dev *dev,
-                                    const XGL_DYNAMIC_CB_STATE_CREATE_INFO *info,
+VK_RESULT intel_blend_state_create(struct intel_dev *dev,
+                                    const VK_DYNAMIC_CB_STATE_CREATE_INFO *info,
                                     struct intel_dynamic_cb **state_ret)
 {
     struct intel_dynamic_cb *state;
 
     state = (struct intel_dynamic_cb *) intel_base_create(&dev->base.handle,
-            sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_COLOR_BLEND_STATE,
+            sizeof(*state), dev->base.dbg, VK_DBG_OBJECT_COLOR_BLEND_STATE,
             info, 0);
     if (!state)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     state->obj.destroy = blend_state_destroy;
     state->cb_info = *info;
 
     *state_ret = state;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_blend_state_destroy(struct intel_dynamic_cb *state)
@@ -324,17 +324,17 @@
     intel_ds_state_destroy(state);
 }
 
-XGL_RESULT intel_ds_state_create(struct intel_dev *dev,
-                                 const XGL_DYNAMIC_DS_STATE_CREATE_INFO *info,
+VK_RESULT intel_ds_state_create(struct intel_dev *dev,
+                                 const VK_DYNAMIC_DS_STATE_CREATE_INFO *info,
                                  struct intel_dynamic_ds **state_ret)
 {
     struct intel_dynamic_ds *state;
 
     state = (struct intel_dynamic_ds *) intel_base_create(&dev->base.handle,
-            sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_STATE,
+            sizeof(*state), dev->base.dbg, VK_DBG_OBJECT_DEPTH_STENCIL_STATE,
             info, 0);
     if (!state)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     state->obj.destroy = ds_state_destroy;
 
@@ -357,7 +357,7 @@
 
     *state_ret = state;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_ds_state_destroy(struct intel_dynamic_ds *state)
@@ -365,10 +365,10 @@
     intel_base_destroy(&state->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDynamicViewportState(
-    XGL_DEVICE                                  device,
-    const XGL_DYNAMIC_VP_STATE_CREATE_INFO*     pCreateInfo,
-    XGL_DYNAMIC_VP_STATE_OBJECT*                pState)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(
+    VK_DEVICE                                  device,
+    const VK_DYNAMIC_VP_STATE_CREATE_INFO*     pCreateInfo,
+    VK_DYNAMIC_VP_STATE_OBJECT*                pState)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -376,10 +376,10 @@
             (struct intel_dynamic_vp **) pState);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDynamicRasterState(
-    XGL_DEVICE                                  device,
-    const XGL_DYNAMIC_RS_STATE_CREATE_INFO*     pCreateInfo,
-    XGL_DYNAMIC_RS_STATE_OBJECT*                pState)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(
+    VK_DEVICE                                  device,
+    const VK_DYNAMIC_RS_STATE_CREATE_INFO*     pCreateInfo,
+    VK_DYNAMIC_RS_STATE_OBJECT*                pState)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -387,10 +387,10 @@
             (struct intel_dynamic_rs **) pState);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDynamicColorBlendState(
-    XGL_DEVICE                                  device,
-    const XGL_DYNAMIC_CB_STATE_CREATE_INFO*     pCreateInfo,
-    XGL_DYNAMIC_CB_STATE_OBJECT*                pState)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(
+    VK_DEVICE                                  device,
+    const VK_DYNAMIC_CB_STATE_CREATE_INFO*     pCreateInfo,
+    VK_DYNAMIC_CB_STATE_OBJECT*                pState)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -398,10 +398,10 @@
             (struct intel_dynamic_cb **) pState);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDynamicDepthStencilState(
-    XGL_DEVICE                                  device,
-    const XGL_DYNAMIC_DS_STATE_CREATE_INFO*     pCreateInfo,
-    XGL_DYNAMIC_DS_STATE_OBJECT*                pState)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(
+    VK_DEVICE                                  device,
+    const VK_DYNAMIC_DS_STATE_CREATE_INFO*     pCreateInfo,
+    VK_DYNAMIC_DS_STATE_OBJECT*                pState)
 {
     struct intel_dev *dev = intel_dev(device);
 
diff --git a/icd/intel/state.h b/icd/intel/state.h
index 67e3410..f14d251 100644
--- a/icd/intel/state.h
+++ b/icd/intel/state.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -47,20 +47,20 @@
 
 struct intel_dynamic_rs {
     struct intel_obj obj;
-    XGL_DYNAMIC_RS_STATE_CREATE_INFO rs_info;
+    VK_DYNAMIC_RS_STATE_CREATE_INFO rs_info;
 };
 
 struct intel_dynamic_cb {
     struct intel_obj obj;
-    XGL_DYNAMIC_CB_STATE_CREATE_INFO cb_info;
+    VK_DYNAMIC_CB_STATE_CREATE_INFO cb_info;
 };
 
 struct intel_dynamic_ds {
     struct intel_obj obj;
-    XGL_DYNAMIC_DS_STATE_CREATE_INFO ds_info;
+    VK_DYNAMIC_DS_STATE_CREATE_INFO ds_info;
 };
 
-static inline struct intel_dynamic_vp *intel_dynamic_vp(XGL_DYNAMIC_VP_STATE_OBJECT state)
+static inline struct intel_dynamic_vp *intel_dynamic_vp(VK_DYNAMIC_VP_STATE_OBJECT state)
 {
     return (struct intel_dynamic_vp *) state;
 }
@@ -70,7 +70,7 @@
     return (struct intel_dynamic_vp *) obj;
 }
 
-static inline struct intel_dynamic_rs *intel_dynamic_rs(XGL_DYNAMIC_RS_STATE_OBJECT state)
+static inline struct intel_dynamic_rs *intel_dynamic_rs(VK_DYNAMIC_RS_STATE_OBJECT state)
 {
     return (struct intel_dynamic_rs *) state;
 }
@@ -80,7 +80,7 @@
     return (struct intel_dynamic_rs *) obj;
 }
 
-static inline struct intel_dynamic_cb *intel_dynamic_cb(XGL_DYNAMIC_CB_STATE_OBJECT state)
+static inline struct intel_dynamic_cb *intel_dynamic_cb(VK_DYNAMIC_CB_STATE_OBJECT state)
 {
     return (struct intel_dynamic_cb *) state;
 }
@@ -90,7 +90,7 @@
     return (struct intel_dynamic_cb *) obj;
 }
 
-static inline struct intel_dynamic_ds *intel_dynamic_ds(XGL_DYNAMIC_DS_STATE_OBJECT state)
+static inline struct intel_dynamic_ds *intel_dynamic_ds(VK_DYNAMIC_DS_STATE_OBJECT state)
 {
     return (struct intel_dynamic_ds *) state;
 }
@@ -100,23 +100,23 @@
     return (struct intel_dynamic_ds *) obj;
 }
 
-XGL_RESULT intel_viewport_state_create(struct intel_dev *dev,
-                                       const XGL_DYNAMIC_VP_STATE_CREATE_INFO *info,
+VK_RESULT intel_viewport_state_create(struct intel_dev *dev,
+                                       const VK_DYNAMIC_VP_STATE_CREATE_INFO *info,
                                        struct intel_dynamic_vp **state_ret);
 void intel_viewport_state_destroy(struct intel_dynamic_vp *state);
 
-XGL_RESULT intel_raster_state_create(struct intel_dev *dev,
-                                     const XGL_DYNAMIC_RS_STATE_CREATE_INFO *info,
+VK_RESULT intel_raster_state_create(struct intel_dev *dev,
+                                     const VK_DYNAMIC_RS_STATE_CREATE_INFO *info,
                                      struct intel_dynamic_rs **state_ret);
 void intel_raster_state_destroy(struct intel_dynamic_rs *state);
 
-XGL_RESULT intel_blend_state_create(struct intel_dev *dev,
-                                    const XGL_DYNAMIC_CB_STATE_CREATE_INFO *info,
+VK_RESULT intel_blend_state_create(struct intel_dev *dev,
+                                    const VK_DYNAMIC_CB_STATE_CREATE_INFO *info,
                                     struct intel_dynamic_cb **state_ret);
 void intel_blend_state_destroy(struct intel_dynamic_cb *state);
 
-XGL_RESULT intel_ds_state_create(struct intel_dev *dev,
-                                 const XGL_DYNAMIC_DS_STATE_CREATE_INFO *info,
+VK_RESULT intel_ds_state_create(struct intel_dev *dev,
+                                 const VK_DYNAMIC_DS_STATE_CREATE_INFO *info,
                                  struct intel_dynamic_ds **state_ret);
 void intel_ds_state_destroy(struct intel_dynamic_ds *state);
 
diff --git a/icd/intel/view.c b/icd/intel/view.c
index 53e17c0..04ceb83 100644
--- a/icd/intel/view.c
+++ b/icd/intel/view.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -87,7 +87,7 @@
 static void surface_state_buf_gen7(const struct intel_gpu *gpu,
                                    unsigned offset, unsigned size,
                                    unsigned struct_size,
-                                   XGL_FORMAT elem_format,
+                                   VK_FORMAT elem_format,
                                    bool is_rt, bool render_cache_rw,
                                    uint32_t dw[8])
 {
@@ -200,49 +200,49 @@
    }
 }
 
-static int img_type_to_view_type(XGL_IMAGE_TYPE type)
+static int img_type_to_view_type(VK_IMAGE_TYPE type)
 {
     switch (type) {
-    case XGL_IMAGE_1D:   return XGL_IMAGE_VIEW_1D;
-    case XGL_IMAGE_2D:   return XGL_IMAGE_VIEW_2D;
-    case XGL_IMAGE_3D:   return XGL_IMAGE_VIEW_3D;
-    default: assert(!"unknown img type"); return XGL_IMAGE_VIEW_1D;
+    case VK_IMAGE_1D:   return VK_IMAGE_VIEW_1D;
+    case VK_IMAGE_2D:   return VK_IMAGE_VIEW_2D;
+    case VK_IMAGE_3D:   return VK_IMAGE_VIEW_3D;
+    default: assert(!"unknown img type"); return VK_IMAGE_VIEW_1D;
     }
 }
 
-static int view_type_to_surface_type(XGL_IMAGE_VIEW_TYPE type)
+static int view_type_to_surface_type(VK_IMAGE_VIEW_TYPE type)
 {
     switch (type) {
-    case XGL_IMAGE_VIEW_1D:   return GEN6_SURFTYPE_1D;
-    case XGL_IMAGE_VIEW_2D:   return GEN6_SURFTYPE_2D;
-    case XGL_IMAGE_VIEW_3D:   return GEN6_SURFTYPE_3D;
-    case XGL_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
+    case VK_IMAGE_VIEW_1D:   return GEN6_SURFTYPE_1D;
+    case VK_IMAGE_VIEW_2D:   return GEN6_SURFTYPE_2D;
+    case VK_IMAGE_VIEW_3D:   return GEN6_SURFTYPE_3D;
+    case VK_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
     default: assert(!"unknown view type"); return GEN6_SURFTYPE_NULL;
     }
 }
 
-static int channel_swizzle_to_scs(XGL_CHANNEL_SWIZZLE swizzle)
+static int channel_swizzle_to_scs(VK_CHANNEL_SWIZZLE swizzle)
 {
     switch (swizzle) {
-    case XGL_CHANNEL_SWIZZLE_ZERO:  return GEN75_SCS_ZERO;
-    case XGL_CHANNEL_SWIZZLE_ONE:   return GEN75_SCS_ONE;
-    case XGL_CHANNEL_SWIZZLE_R:     return GEN75_SCS_RED;
-    case XGL_CHANNEL_SWIZZLE_G:     return GEN75_SCS_GREEN;
-    case XGL_CHANNEL_SWIZZLE_B:     return GEN75_SCS_BLUE;
-    case XGL_CHANNEL_SWIZZLE_A:     return GEN75_SCS_ALPHA;
+    case VK_CHANNEL_SWIZZLE_ZERO:  return GEN75_SCS_ZERO;
+    case VK_CHANNEL_SWIZZLE_ONE:   return GEN75_SCS_ONE;
+    case VK_CHANNEL_SWIZZLE_R:     return GEN75_SCS_RED;
+    case VK_CHANNEL_SWIZZLE_G:     return GEN75_SCS_GREEN;
+    case VK_CHANNEL_SWIZZLE_B:     return GEN75_SCS_BLUE;
+    case VK_CHANNEL_SWIZZLE_A:     return GEN75_SCS_ALPHA;
     default: assert(!"unknown swizzle"); return GEN75_SCS_ZERO;
     }
 }
 
 static void surface_state_tex_gen7(const struct intel_gpu *gpu,
                                    const struct intel_img *img,
-                                   XGL_IMAGE_VIEW_TYPE type,
-                                   XGL_FORMAT format,
+                                   VK_IMAGE_VIEW_TYPE type,
+                                   VK_FORMAT format,
                                    unsigned first_level,
                                    unsigned num_levels,
                                    unsigned first_layer,
                                    unsigned num_layers,
-                                   XGL_CHANNEL_MAPPING swizzles,
+                                   VK_CHANNEL_MAPPING swizzles,
                                    bool is_rt,
                                    uint32_t dw[8])
 {
@@ -259,7 +259,7 @@
 
    width = img->layout.width0;
    height = img->layout.height0;
-   depth = (type == XGL_IMAGE_VIEW_3D) ?
+   depth = (type == VK_IMAGE_VIEW_3D) ?
       img->depth : num_layers;
    pitch = img->layout.bo_stride;
 
@@ -436,10 +436,10 @@
           channel_swizzle_to_scs(swizzles.b) << GEN75_SURFACE_DW7_SCS_B__SHIFT |
           channel_swizzle_to_scs(swizzles.a) << GEN75_SURFACE_DW7_SCS_A__SHIFT;
    } else {
-        assert(swizzles.r == XGL_CHANNEL_SWIZZLE_R &&
-               swizzles.g == XGL_CHANNEL_SWIZZLE_G &&
-               swizzles.b == XGL_CHANNEL_SWIZZLE_B &&
-               swizzles.a == XGL_CHANNEL_SWIZZLE_A);
+        assert(swizzles.r == VK_CHANNEL_SWIZZLE_R &&
+               swizzles.g == VK_CHANNEL_SWIZZLE_G &&
+               swizzles.b == VK_CHANNEL_SWIZZLE_B &&
+               swizzles.a == VK_CHANNEL_SWIZZLE_A);
    }
 }
 
@@ -484,7 +484,7 @@
 static void surface_state_buf_gen6(const struct intel_gpu *gpu,
                                    unsigned offset, unsigned size,
                                    unsigned struct_size,
-                                   XGL_FORMAT elem_format,
+                                   VK_FORMAT elem_format,
                                    bool is_rt, bool render_cache_rw,
                                    uint32_t dw[6])
 {
@@ -570,8 +570,8 @@
 
 static void surface_state_tex_gen6(const struct intel_gpu *gpu,
                                    const struct intel_img *img,
-                                   XGL_IMAGE_VIEW_TYPE type,
-                                   XGL_FORMAT format,
+                                   VK_IMAGE_VIEW_TYPE type,
+                                   VK_FORMAT format,
                                    unsigned first_level,
                                    unsigned num_levels,
                                    unsigned first_layer,
@@ -592,7 +592,7 @@
 
    width = img->layout.width0;
    height = img->layout.height0;
-   depth = (type == XGL_IMAGE_VIEW_3D) ?
+   depth = (type == VK_IMAGE_VIEW_3D) ?
       img->depth : num_layers;
    pitch = img->layout.bo_stride;
 
@@ -754,7 +754,7 @@
 static void
 ds_init_info(const struct intel_gpu *gpu,
              const struct intel_img *img,
-             XGL_FORMAT format, unsigned level,
+             VK_FORMAT format, unsigned level,
              unsigned first_layer, unsigned num_layers,
              struct ds_surface_info *info)
 {
@@ -817,18 +817,18 @@
     * As for GEN7+, separate_stencil is always true.
     */
    switch (format) {
-   case XGL_FMT_D16_UNORM:
+   case VK_FMT_D16_UNORM:
       info->format = GEN6_ZFORMAT_D16_UNORM;
       break;
-   case XGL_FMT_D32_SFLOAT:
+   case VK_FMT_D32_SFLOAT:
       info->format = GEN6_ZFORMAT_D32_FLOAT;
       break;
-   case XGL_FMT_D32_SFLOAT_S8_UINT:
+   case VK_FMT_D32_SFLOAT_S8_UINT:
       info->format = (separate_stencil) ?
          GEN6_ZFORMAT_D32_FLOAT :
          GEN6_ZFORMAT_D32_FLOAT_S8X24_UINT;
       break;
-   case XGL_FMT_S8_UINT:
+   case VK_FMT_S8_UINT:
       if (separate_stencil) {
          info->format = GEN6_ZFORMAT_D32_FLOAT;
          break;
@@ -841,7 +841,7 @@
       break;
    }
 
-   if (format != XGL_FMT_S8_UINT)
+   if (format != VK_FMT_S8_UINT)
       info->zs.stride = img->layout.bo_stride;
 
    if (img->s8_layout) {
@@ -866,7 +866,7 @@
          intel_layout_pos_to_mem(img->s8_layout, x, y, &x, &y);
          info->stencil.offset = intel_layout_mem_to_raw(img->s8_layout, x, y);
       }
-   } else if (format == XGL_FMT_S8_UINT) {
+   } else if (format == VK_FMT_S8_UINT) {
       info->stencil.stride = img->layout.bo_stride * 2;
    }
 
@@ -880,7 +880,7 @@
 
    info->width = img->layout.width0;
    info->height = img->layout.height0;
-   info->depth = (img->type == XGL_IMAGE_3D) ?
+   info->depth = (img->type == VK_IMAGE_3D) ?
       img->depth : num_layers;
 
    info->lod = level;
@@ -891,7 +891,7 @@
 static void ds_view_init(struct intel_ds_view *view,
                          const struct intel_gpu *gpu,
                          const struct intel_img *img,
-                         XGL_FORMAT format, unsigned level,
+                         VK_FORMAT format, unsigned level,
                          unsigned first_layer, unsigned num_layers)
 {
    const int max_2d_size U_ASSERT_ONLY =
@@ -1065,25 +1065,25 @@
     intel_buf_view_destroy(view);
 }
 
-XGL_RESULT intel_buf_view_create(struct intel_dev *dev,
-                                 const XGL_BUFFER_VIEW_CREATE_INFO *info,
+VK_RESULT intel_buf_view_create(struct intel_dev *dev,
+                                 const VK_BUFFER_VIEW_CREATE_INFO *info,
                                  struct intel_buf_view **view_ret)
 {
     struct intel_buf *buf = intel_buf(info->buffer);
     const bool will_write = (buf->usage |
-            (XGL_BUFFER_USAGE_SHADER_ACCESS_WRITE_BIT &
-             XGL_BUFFER_USAGE_SHADER_ACCESS_ATOMIC_BIT));
-    XGL_FORMAT format;
-    XGL_GPU_SIZE stride;
+            (VK_BUFFER_USAGE_SHADER_ACCESS_WRITE_BIT &
+             VK_BUFFER_USAGE_SHADER_ACCESS_ATOMIC_BIT));
+    VK_FORMAT format;
+    VK_GPU_SIZE stride;
     uint32_t *cmd;
     struct intel_buf_view *view;
     int i;
 
     view = (struct intel_buf_view *) intel_base_create(&dev->base.handle,
-            sizeof(*view), dev->base.dbg, XGL_DBG_OBJECT_BUFFER_VIEW,
+            sizeof(*view), dev->base.dbg, VK_DBG_OBJECT_BUFFER_VIEW,
             info, 0);
     if (!view)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     view->obj.destroy = buf_view_destroy;
 
@@ -1092,10 +1092,10 @@
     /*
      * The compiler expects uniform buffers to have pitch of
      * 4 for fragment shaders, but 16 for other stages.  The format
-     * must be XGL_FMT_R32G32B32A32_SFLOAT.
+     * must be VK_FMT_R32G32B32A32_SFLOAT.
      */
-    if (info->viewType == XGL_BUFFER_VIEW_RAW) {
-        format = XGL_FMT_R32G32B32A32_SFLOAT;
+    if (info->viewType == VK_BUFFER_VIEW_RAW) {
+        format = VK_FMT_R32G32B32A32_SFLOAT;
         stride = 16;
     } else {
         format = info->format;
@@ -1117,7 +1117,7 @@
         }
 
         /* switch to view->fs_cmd */
-        if (info->viewType == XGL_BUFFER_VIEW_RAW) {
+        if (info->viewType == VK_BUFFER_VIEW_RAW) {
             cmd = view->fs_cmd;
             stride = 4;
         } else {
@@ -1128,7 +1128,7 @@
 
     *view_ret = view;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_buf_view_destroy(struct intel_buf_view *view)
@@ -1143,20 +1143,20 @@
     intel_img_view_destroy(view);
 }
 
-XGL_RESULT intel_img_view_create(struct intel_dev *dev,
-                                 const XGL_IMAGE_VIEW_CREATE_INFO *info,
+VK_RESULT intel_img_view_create(struct intel_dev *dev,
+                                 const VK_IMAGE_VIEW_CREATE_INFO *info,
                                  struct intel_img_view **view_ret)
 {
     struct intel_img *img = intel_img(info->image);
     struct intel_img_view *view;
     uint32_t mip_levels, array_size;
-    XGL_CHANNEL_MAPPING state_swizzles;
+    VK_CHANNEL_MAPPING state_swizzles;
 
     if (info->subresourceRange.baseMipLevel >= img->mip_levels ||
         info->subresourceRange.baseArraySlice >= img->array_size ||
         !info->subresourceRange.mipLevels ||
         !info->subresourceRange.arraySize)
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
 
     mip_levels = info->subresourceRange.mipLevels;
     if (mip_levels > img->mip_levels - info->subresourceRange.baseMipLevel)
@@ -1167,9 +1167,9 @@
         array_size = img->array_size - info->subresourceRange.baseArraySlice;
 
     view = (struct intel_img_view *) intel_base_create(&dev->base.handle,
-            sizeof(*view), dev->base.dbg, XGL_DBG_OBJECT_IMAGE_VIEW, info, 0);
+            sizeof(*view), dev->base.dbg, VK_DBG_OBJECT_IMAGE_VIEW, info, 0);
     if (!view)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     view->obj.destroy = img_view_destroy;
 
@@ -1178,25 +1178,25 @@
 
     if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7.5)) {
         state_swizzles = info->channels;
-        view->shader_swizzles.r = XGL_CHANNEL_SWIZZLE_R;
-        view->shader_swizzles.g = XGL_CHANNEL_SWIZZLE_G;
-        view->shader_swizzles.b = XGL_CHANNEL_SWIZZLE_B;
-        view->shader_swizzles.a = XGL_CHANNEL_SWIZZLE_A;
+        view->shader_swizzles.r = VK_CHANNEL_SWIZZLE_R;
+        view->shader_swizzles.g = VK_CHANNEL_SWIZZLE_G;
+        view->shader_swizzles.b = VK_CHANNEL_SWIZZLE_B;
+        view->shader_swizzles.a = VK_CHANNEL_SWIZZLE_A;
     } else {
-        state_swizzles.r = XGL_CHANNEL_SWIZZLE_R;
-        state_swizzles.g = XGL_CHANNEL_SWIZZLE_G;
-        state_swizzles.b = XGL_CHANNEL_SWIZZLE_B;
-        state_swizzles.a = XGL_CHANNEL_SWIZZLE_A;
+        state_swizzles.r = VK_CHANNEL_SWIZZLE_R;
+        state_swizzles.g = VK_CHANNEL_SWIZZLE_G;
+        state_swizzles.b = VK_CHANNEL_SWIZZLE_B;
+        state_swizzles.a = VK_CHANNEL_SWIZZLE_A;
         view->shader_swizzles = info->channels;
     }
 
     /* shader_swizzles is ignored by the compiler */
-    if (view->shader_swizzles.r != XGL_CHANNEL_SWIZZLE_R ||
-        view->shader_swizzles.g != XGL_CHANNEL_SWIZZLE_G ||
-        view->shader_swizzles.b != XGL_CHANNEL_SWIZZLE_B ||
-        view->shader_swizzles.a != XGL_CHANNEL_SWIZZLE_A) {
-        intel_dev_log(dev, XGL_DBG_MSG_WARNING,
-                XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
+    if (view->shader_swizzles.r != VK_CHANNEL_SWIZZLE_R ||
+        view->shader_swizzles.g != VK_CHANNEL_SWIZZLE_G ||
+        view->shader_swizzles.b != VK_CHANNEL_SWIZZLE_B ||
+        view->shader_swizzles.a != VK_CHANNEL_SWIZZLE_A) {
+        intel_dev_log(dev, VK_DBG_MSG_WARNING,
+                VK_VALIDATION_LEVEL_0, VK_NULL_HANDLE, 0, 0,
                 "image data swizzling is ignored");
     }
 
@@ -1216,7 +1216,7 @@
 
     *view_ret = view;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_img_view_destroy(struct intel_img_view *view)
@@ -1231,24 +1231,24 @@
     intel_rt_view_destroy(view);
 }
 
-XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
-                                const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
+VK_RESULT intel_rt_view_create(struct intel_dev *dev,
+                                const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
                                 struct intel_rt_view **view_ret)
 {
-    static const XGL_CHANNEL_MAPPING identity_channel_mapping = {
-        .r = XGL_CHANNEL_SWIZZLE_R,
-        .g = XGL_CHANNEL_SWIZZLE_G,
-        .b = XGL_CHANNEL_SWIZZLE_B,
-        .a = XGL_CHANNEL_SWIZZLE_A,
+    static const VK_CHANNEL_MAPPING identity_channel_mapping = {
+        .r = VK_CHANNEL_SWIZZLE_R,
+        .g = VK_CHANNEL_SWIZZLE_G,
+        .b = VK_CHANNEL_SWIZZLE_B,
+        .a = VK_CHANNEL_SWIZZLE_A,
     };
     struct intel_img *img = intel_img(info->image);
     struct intel_rt_view *view;
 
     view = (struct intel_rt_view *) intel_base_create(&dev->base.handle,
-            sizeof(*view), dev->base.dbg, XGL_DBG_OBJECT_COLOR_TARGET_VIEW,
+            sizeof(*view), dev->base.dbg, VK_DBG_OBJECT_COLOR_TARGET_VIEW,
             info, 0);
     if (!view)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     view->obj.destroy = rt_view_destroy;
 
@@ -1274,7 +1274,7 @@
 
     *view_ret = view;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_rt_view_destroy(struct intel_rt_view *view)
@@ -1289,18 +1289,18 @@
     intel_ds_view_destroy(view);
 }
 
-XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
-                                const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
+VK_RESULT intel_ds_view_create(struct intel_dev *dev,
+                                const VK_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
                                 struct intel_ds_view **view_ret)
 {
     struct intel_img *img = intel_img(info->image);
     struct intel_ds_view *view;
 
     view = (struct intel_ds_view *) intel_base_create(&dev->base.handle,
-            sizeof(*view), dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW,
+            sizeof(*view), dev->base.dbg, VK_DBG_OBJECT_DEPTH_STENCIL_VIEW,
             info, 0);
     if (!view)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     view->obj.destroy = ds_view_destroy;
 
@@ -1313,7 +1313,7 @@
 
     *view_ret = view;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_ds_view_destroy(struct intel_ds_view *view)
@@ -1321,10 +1321,10 @@
     intel_base_destroy(&view->obj.base);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateBufferView(
-    XGL_DEVICE                                  device,
-    const XGL_BUFFER_VIEW_CREATE_INFO*          pCreateInfo,
-    XGL_BUFFER_VIEW*                            pView)
+ICD_EXPORT VK_RESULT VKAPI vkCreateBufferView(
+    VK_DEVICE                                  device,
+    const VK_BUFFER_VIEW_CREATE_INFO*          pCreateInfo,
+    VK_BUFFER_VIEW*                            pView)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -1332,10 +1332,10 @@
             (struct intel_buf_view **) pView);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateImageView(
-    XGL_DEVICE                                  device,
-    const XGL_IMAGE_VIEW_CREATE_INFO*           pCreateInfo,
-    XGL_IMAGE_VIEW*                             pView)
+ICD_EXPORT VK_RESULT VKAPI vkCreateImageView(
+    VK_DEVICE                                  device,
+    const VK_IMAGE_VIEW_CREATE_INFO*           pCreateInfo,
+    VK_IMAGE_VIEW*                             pView)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -1343,10 +1343,10 @@
             (struct intel_img_view **) pView);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateColorAttachmentView(
-    XGL_DEVICE                                  device,
-    const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
-    XGL_COLOR_ATTACHMENT_VIEW*                  pView)
+ICD_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(
+    VK_DEVICE                                  device,
+    const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
+    VK_COLOR_ATTACHMENT_VIEW*                  pView)
 {
     struct intel_dev *dev = intel_dev(device);
 
@@ -1354,10 +1354,10 @@
             (struct intel_rt_view **) pView);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDepthStencilView(
-    XGL_DEVICE                                  device,
-    const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO*   pCreateInfo,
-    XGL_DEPTH_STENCIL_VIEW*                     pView)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(
+    VK_DEVICE                                  device,
+    const VK_DEPTH_STENCIL_VIEW_CREATE_INFO*   pCreateInfo,
+    VK_DEPTH_STENCIL_VIEW*                     pView)
 {
     struct intel_dev *dev = intel_dev(device);
 
diff --git a/icd/intel/view.h b/icd/intel/view.h
index 2684fd8..0d824b2 100644
--- a/icd/intel/view.h
+++ b/icd/intel/view.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -59,7 +59,7 @@
     struct intel_img *img;
 
     float min_lod;
-    XGL_CHANNEL_MAPPING shader_swizzles;
+    VK_CHANNEL_MAPPING shader_swizzles;
 
     /* SURFACE_STATE */
     uint32_t cmd[8];
@@ -95,7 +95,7 @@
     bool has_hiz;
 };
 
-static inline struct intel_buf_view *intel_buf_view(XGL_BUFFER_VIEW view)
+static inline struct intel_buf_view *intel_buf_view(VK_BUFFER_VIEW view)
 {
     return (struct intel_buf_view *) view;
 }
@@ -105,7 +105,7 @@
     return (struct intel_buf_view *) obj;
 }
 
-static inline struct intel_img_view *intel_img_view(XGL_IMAGE_VIEW view)
+static inline struct intel_img_view *intel_img_view(VK_IMAGE_VIEW view)
 {
     return (struct intel_img_view *) view;
 }
@@ -115,7 +115,7 @@
     return (struct intel_img_view *) obj;
 }
 
-static inline struct intel_rt_view *intel_rt_view(XGL_COLOR_ATTACHMENT_VIEW view)
+static inline struct intel_rt_view *intel_rt_view(VK_COLOR_ATTACHMENT_VIEW view)
 {
     return (struct intel_rt_view *) view;
 }
@@ -125,7 +125,7 @@
     return (struct intel_rt_view *) obj;
 }
 
-static inline struct intel_ds_view *intel_ds_view(XGL_DEPTH_STENCIL_VIEW view)
+static inline struct intel_ds_view *intel_ds_view(VK_DEPTH_STENCIL_VIEW view)
 {
     return (struct intel_ds_view *) view;
 }
@@ -138,24 +138,24 @@
 void intel_null_view_init(struct intel_null_view *view,
                           struct intel_dev *dev);
 
-XGL_RESULT intel_buf_view_create(struct intel_dev *dev,
-                                 const XGL_BUFFER_VIEW_CREATE_INFO *info,
+VK_RESULT intel_buf_view_create(struct intel_dev *dev,
+                                 const VK_BUFFER_VIEW_CREATE_INFO *info,
                                  struct intel_buf_view **view_ret);
 
 void intel_buf_view_destroy(struct intel_buf_view *view);
 
-XGL_RESULT intel_img_view_create(struct intel_dev *dev,
-                                 const XGL_IMAGE_VIEW_CREATE_INFO *info,
+VK_RESULT intel_img_view_create(struct intel_dev *dev,
+                                 const VK_IMAGE_VIEW_CREATE_INFO *info,
                                  struct intel_img_view **view_ret);
 void intel_img_view_destroy(struct intel_img_view *view);
 
-XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
-                                const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
+VK_RESULT intel_rt_view_create(struct intel_dev *dev,
+                                const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
                                 struct intel_rt_view **view_ret);
 void intel_rt_view_destroy(struct intel_rt_view *view);
 
-XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
-                                const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
+VK_RESULT intel_ds_view_create(struct intel_dev *dev,
+                                const VK_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
                                 struct intel_ds_view **view_ret);
 void intel_ds_view_destroy(struct intel_ds_view *view);
 
diff --git a/icd/intel/wsi.h b/icd/intel/wsi.h
index ab2a250..3342b90 100644
--- a/icd/intel/wsi.h
+++ b/icd/intel/wsi.h
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2015 LunarG, Inc.
  *
@@ -34,19 +34,19 @@
 struct intel_gpu;
 struct intel_img;
 
-XGL_RESULT intel_wsi_gpu_get_info(struct intel_gpu *gpu,
-                                  XGL_PHYSICAL_GPU_INFO_TYPE type,
+VK_RESULT intel_wsi_gpu_get_info(struct intel_gpu *gpu,
+                                  VK_PHYSICAL_GPU_INFO_TYPE type,
                                   size_t *size, void *data);
 void intel_wsi_gpu_cleanup(struct intel_gpu *gpu);
 
-XGL_RESULT intel_wsi_img_init(struct intel_img *img);
+VK_RESULT intel_wsi_img_init(struct intel_img *img);
 void intel_wsi_img_cleanup(struct intel_img *img);
 
-XGL_RESULT intel_wsi_fence_init(struct intel_fence *fence);
+VK_RESULT intel_wsi_fence_init(struct intel_fence *fence);
 void intel_wsi_fence_cleanup(struct intel_fence *fence);
 void intel_wsi_fence_copy(struct intel_fence *fence,
                           const struct intel_fence *src);
-XGL_RESULT intel_wsi_fence_wait(struct intel_fence *fence,
+VK_RESULT intel_wsi_fence_wait(struct intel_fence *fence,
                                 int64_t timeout_ns);
 
 #endif /* WSI_H */
diff --git a/icd/intel/wsi_null.c b/icd/intel/wsi_null.c
index 156a0b6..9f5bd5f 100644
--- a/icd/intel/wsi_null.c
+++ b/icd/intel/wsi_null.c
@@ -27,29 +27,29 @@
 
 #include "wsi.h"
 
-XGL_RESULT intel_wsi_gpu_get_info(struct intel_gpu *gpu,
-                                  XGL_PHYSICAL_GPU_INFO_TYPE type,
-                                  size_t *size, void *data)
+VK_RESULT intel_wsi_gpu_get_info(struct intel_gpu *gpu,
+                                 VK_PHYSICAL_GPU_INFO_TYPE type,
+                                 size_t *size, void *data)
 {
-    return XGL_ERROR_INVALID_VALUE;
+    return VK_ERROR_INVALID_VALUE;
 }
 
 void intel_wsi_gpu_cleanup(struct intel_gpu *gpu)
 {
 }
 
-XGL_RESULT intel_wsi_img_init(struct intel_img *img)
+VK_RESULT intel_wsi_img_init(struct intel_img *img)
 {
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_wsi_img_cleanup(struct intel_img *img)
 {
 }
 
-XGL_RESULT intel_wsi_fence_init(struct intel_fence *fence)
+VK_RESULT intel_wsi_fence_init(struct intel_fence *fence)
 {
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_wsi_fence_cleanup(struct intel_fence *fence)
@@ -61,41 +61,41 @@
 {
 }
 
-XGL_RESULT intel_wsi_fence_wait(struct intel_fence *fence,
-                                int64_t timeout_ns)
+VK_RESULT intel_wsi_fence_wait(struct intel_fence *fence,
+                               int64_t timeout_ns)
 {
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11AssociateConnection(
-    XGL_PHYSICAL_GPU                            gpu_,
-    const XGL_WSI_X11_CONNECTION_INFO*          pConnectionInfo)
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11AssociateConnection(
+    VK_PHYSICAL_GPU                             gpu_,
+    const VK_WSI_X11_CONNECTION_INFO*           pConnectionInfo)
 {
-    return XGL_ERROR_UNKNOWN;
+    return VK_ERROR_UNKNOWN;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11GetMSC(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11GetMSC(
+    VK_DEVICE                                   device,
     xcb_window_t                                window,
     xcb_randr_crtc_t                            crtc,
     uint64_t  *                                 pMsc)
 {
-    return XGL_ERROR_UNKNOWN;
+    return VK_ERROR_UNKNOWN;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11CreatePresentableImage(
-    XGL_DEVICE                                  device,
-    const XGL_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
-    XGL_IMAGE*                                  pImage,
-    XGL_GPU_MEMORY*                             pMem)
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(
+    VK_DEVICE                                   device,
+    const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
+    VK_IMAGE*                                   pImage,
+    VK_GPU_MEMORY*                              pMem)
 {
-    return XGL_ERROR_UNKNOWN;
+    return VK_ERROR_UNKNOWN;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11QueuePresent(
-    XGL_QUEUE                                   queue_,
-    const XGL_WSI_X11_PRESENT_INFO*             pPresentInfo,
-    XGL_FENCE                                   fence_)
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11QueuePresent(
+    VK_QUEUE                                    queue_,
+    const VK_WSI_X11_PRESENT_INFO*              pPresentInfo,
+    VK_FENCE                                    fence_)
 {
-    return XGL_ERROR_UNKNOWN;
+    return VK_ERROR_UNKNOWN;
 }
diff --git a/icd/intel/wsi_x11.c b/icd/intel/wsi_x11.c
index 59bb360..1575da4 100644
--- a/icd/intel/wsi_x11.c
+++ b/icd/intel/wsi_x11.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -50,8 +50,8 @@
     uint32_t connector_id;
 
     char name[32];
-    XGL_EXTENT2D physical_dimension;
-    XGL_EXTENT2D physical_resolution;
+    VK_EXTENT2D physical_dimension;
+    VK_EXTENT2D physical_resolution;
 
     drmModeModeInfoPtr modes;
     uint32_t mode_count;
@@ -103,13 +103,13 @@
 };
 
 static bool x11_is_format_presentable(const struct intel_dev *dev,
-                                      XGL_FORMAT format)
+                                      VK_FORMAT format)
 {
     /* this is what DDX expects */
     switch (format) {
-    case XGL_FMT_B5G6R5_UNORM:
-    case XGL_FMT_B8G8R8A8_UNORM:
-    case XGL_FMT_B8G8R8A8_SRGB:
+    case VK_FMT_B5G6R5_UNORM:
+    case VK_FMT_B8G8R8A8_UNORM:
+    case VK_FMT_B8G8R8A8_SRGB:
         return true;
     default:
         return false;
@@ -295,7 +295,7 @@
 /**
  * Send a PresentSelectInput to select interested events.
  */
-static XGL_RESULT x11_swap_chain_present_select_input(struct intel_x11_swap_chain *sc)
+static VK_RESULT x11_swap_chain_present_select_input(struct intel_x11_swap_chain *sc)
 {
     xcb_void_cookie_t cookie;
     xcb_generic_error_t *error;
@@ -312,56 +312,56 @@
     error = xcb_request_check(sc->c, cookie);
     if (error) {
         free(error);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-static XGL_RESULT wsi_x11_dri3_pixmap_from_buffer(struct intel_wsi_x11 *x11,
-                                                  struct intel_dev *dev,
-                                                  struct intel_img *img,
-                                                  struct intel_mem *mem)
+static VK_RESULT wsi_x11_dri3_pixmap_from_buffer(struct intel_wsi_x11 *x11,
+                                                 struct intel_dev *dev,
+                                                 struct intel_img *img,
+                                                 struct intel_mem *mem)
 {
     struct intel_x11_img_data *data =
         (struct intel_x11_img_data *) img->wsi_data;
 
     data->prime_fd = x11_export_prime_fd(dev, mem->bo, &img->layout);
     if (data->prime_fd < 0)
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
 
     data->pixmap = x11_dri3_pixmap_from_buffer(x11->c, x11->root,
             x11->root_depth, data->prime_fd, &img->layout);
 
     data->mem = mem;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 /**
  * Create a presentable image.
  */
-static XGL_RESULT wsi_x11_img_create(struct intel_wsi_x11 *x11,
-                                     struct intel_dev *dev,
-                                     const XGL_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO *info,
-                                     struct intel_img **img_ret)
+static VK_RESULT wsi_x11_img_create(struct intel_wsi_x11 *x11,
+                                    struct intel_dev *dev,
+                                    const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO *info,
+                                    struct intel_img **img_ret)
 {
-    XGL_IMAGE_CREATE_INFO img_info;
-    XGL_MEMORY_ALLOC_INFO mem_info;
+    VK_IMAGE_CREATE_INFO img_info;
+    VK_MEMORY_ALLOC_INFO mem_info;
     struct intel_img *img;
     struct intel_mem *mem;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     if (!x11_is_format_presentable(dev, info->format)) {
-        intel_dev_log(dev, XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0,
-                XGL_NULL_HANDLE, 0, 0, "invalid presentable image format");
-        return XGL_ERROR_INVALID_VALUE;
+        intel_dev_log(dev, VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0,
+                VK_NULL_HANDLE, 0, 0, "invalid presentable image format");
+        return VK_ERROR_INVALID_VALUE;
     }
 
     /* create image */
     memset(&img_info, 0, sizeof(img_info));
-    img_info.sType = XGL_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
-    img_info.imageType = XGL_IMAGE_2D;
+    img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+    img_info.imageType = VK_IMAGE_2D;
     img_info.format = info->format;
     img_info.extent.width = info->extent.width;
     img_info.extent.height = info->extent.height;
@@ -369,30 +369,30 @@
     img_info.mipLevels = 1;
     img_info.arraySize = 1;
     img_info.samples = 1;
-    img_info.tiling = XGL_OPTIMAL_TILING;
+    img_info.tiling = VK_OPTIMAL_TILING;
     img_info.usage = info->usage;
     img_info.flags = 0;
 
     ret = intel_img_create(dev, &img_info, true, &img);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     /* allocate memory */
     memset(&mem_info, 0, sizeof(mem_info));
-    mem_info.sType = XGL_STRUCTURE_TYPE_MEMORY_ALLOC_INFO;
+    mem_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO;
     mem_info.allocationSize = img->total_size;
     mem_info.memProps =  0;
-    mem_info.memType = XGL_MEMORY_TYPE_IMAGE;
-    mem_info.memPriority = XGL_MEMORY_PRIORITY_HIGH;
+    mem_info.memType = VK_MEMORY_TYPE_IMAGE;
+    mem_info.memPriority = VK_MEMORY_PRIORITY_HIGH;
 
     ret = intel_mem_alloc(dev, &mem_info, &mem);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_img_destroy(img);
         return ret;
     }
 
     ret = wsi_x11_dri3_pixmap_from_buffer(x11, dev, img, mem);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_mem_free(mem);
         intel_img_destroy(img);
         return ret;
@@ -402,14 +402,14 @@
 
     *img_ret = img;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 /**
  * Send a PresentPixmap.
  */
-static XGL_RESULT x11_swap_chain_present_pixmap(struct intel_x11_swap_chain *sc,
-                                                const XGL_WSI_X11_PRESENT_INFO *info)
+static VK_RESULT x11_swap_chain_present_pixmap(struct intel_x11_swap_chain *sc,
+                                               const VK_WSI_X11_PRESENT_INFO *info)
 {
     struct intel_img *img = intel_img(info->srcImage);
     struct intel_x11_img_data *data =
@@ -443,10 +443,10 @@
     err = xcb_request_check(sc->c, cookie);
     if (err) {
         free(err);
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 /**
@@ -481,7 +481,7 @@
     }
 }
 
-static XGL_RESULT x11_swap_chain_wait(struct intel_x11_swap_chain *sc,
+static VK_RESULT x11_swap_chain_wait(struct intel_x11_swap_chain *sc,
                                       uint32_t serial, int64_t timeout)
 {
     const bool wait = (timeout != 0);
@@ -493,12 +493,12 @@
             ev = (xcb_present_generic_event_t *)
                 xcb_wait_for_special_event(sc->c, sc->present_special_event);
             if (!ev)
-                return XGL_ERROR_UNKNOWN;
+                return VK_ERROR_UNKNOWN;
         } else {
             ev = (xcb_present_generic_event_t *)
                 xcb_poll_for_special_event(sc->c, sc->present_special_event);
             if (!ev)
-                return XGL_NOT_READY;
+                return VK_NOT_READY;
         }
 
         x11_swap_chain_present_event(sc, ev);
@@ -506,7 +506,7 @@
         free(ev);
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void x11_swap_chain_destroy(struct intel_x11_swap_chain *sc)
@@ -531,7 +531,7 @@
 }
 
 static struct intel_wsi_x11 *wsi_x11_create(struct intel_gpu *gpu,
-                                            const XGL_WSI_X11_CONNECTION_INFO *info)
+                                            const VK_WSI_X11_CONNECTION_INFO *info)
 {
     struct intel_wsi_x11 *x11;
     int depth, fd;
@@ -550,13 +550,13 @@
         return NULL;
     }
 
-    x11 = intel_alloc(gpu, sizeof(*x11), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
+    x11 = intel_alloc(gpu, sizeof(*x11), 0, VK_SYSTEM_ALLOC_API_OBJECT);
     if (!x11)
         return NULL;
 
     memset(x11, 0, sizeof(*x11));
-    /* there is no XGL_DBG_OBJECT_WSI_DISPLAY */
-    intel_handle_init(&x11->handle, XGL_DBG_OBJECT_UNKNOWN, gpu->handle.icd);
+    /* there is no VK_DBG_OBJECT_WSI_DISPLAY */
+    intel_handle_init(&x11->handle, VK_DBG_OBJECT_UNKNOWN, gpu->handle.icd);
 
     x11->c = info->pConnection;
     x11->root = info->root;
@@ -577,19 +577,19 @@
     struct intel_wsi_x11 *x11 = (struct intel_wsi_x11 *) dev->gpu->wsi_data;
     struct intel_x11_swap_chain *sc;
 
-    sc = intel_alloc(dev, sizeof(*sc), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
+    sc = intel_alloc(dev, sizeof(*sc), 0, VK_SYSTEM_ALLOC_API_OBJECT);
     if (!sc)
         return NULL;
 
     memset(sc, 0, sizeof(*sc));
-    /* there is no XGL_DBG_OBJECT_WSI_SWAP_CHAIN */
-    intel_handle_init(&sc->handle, XGL_DBG_OBJECT_UNKNOWN,
+    /* there is no VK_DBG_OBJECT_WSI_SWAP_CHAIN */
+    intel_handle_init(&sc->handle, VK_DBG_OBJECT_UNKNOWN,
             dev->base.handle.icd);
 
     sc->c = x11->c;
     sc->window = window;
 
-    if (x11_swap_chain_present_select_input(sc) != XGL_SUCCESS) {
+    if (x11_swap_chain_present_select_input(sc) != VK_SUCCESS) {
         intel_free(dev, sc);
         return NULL;
     }
@@ -621,18 +621,18 @@
     return sc;
 }
 
-static XGL_RESULT intel_wsi_gpu_init(struct intel_gpu *gpu,
-                                     const XGL_WSI_X11_CONNECTION_INFO *info)
+static VK_RESULT intel_wsi_gpu_init(struct intel_gpu *gpu,
+                                     const VK_WSI_X11_CONNECTION_INFO *info)
 {
     struct intel_wsi_x11 *x11;
 
     x11 = wsi_x11_create(gpu, info);
     if (!x11)
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
 
     gpu->wsi_data = x11;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void x11_display_init_modes(struct intel_x11_display *dpy,
@@ -644,7 +644,7 @@
         return;
 
     dpy->modes = intel_alloc(dpy, sizeof(dpy->modes[0]) * conn->count_modes,
-            0, XGL_SYSTEM_ALLOC_INTERNAL);
+            0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!dpy->modes)
         return;
 
@@ -710,13 +710,13 @@
     struct intel_x11_display *dpy;
     drmModeConnectorPtr conn;
 
-    dpy = intel_alloc(gpu, sizeof(*dpy), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
+    dpy = intel_alloc(gpu, sizeof(*dpy), 0, VK_SYSTEM_ALLOC_API_OBJECT);
     if (!dpy)
         return NULL;
 
     memset(dpy, 0, sizeof(*dpy));
-    /* there is no XGL_DBG_OBJECT_WSI_DISPLAY */
-    intel_handle_init(&dpy->handle, XGL_DBG_OBJECT_UNKNOWN, gpu->handle.icd);
+    /* there is no VK_DBG_OBJECT_WSI_DISPLAY */
+    intel_handle_init(&dpy->handle, VK_DBG_OBJECT_UNKNOWN, gpu->handle.icd);
 
     dpy->fd = fd;
     dpy->connector_id = connector_id;
@@ -750,7 +750,7 @@
         return;
 
     displays = intel_alloc(gpu, sizeof(*displays) * res->count_connectors,
-            0, XGL_SYSTEM_ALLOC_INTERNAL);
+            0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!displays) {
         drmModeFreeResources(res);
         return;
@@ -768,14 +768,14 @@
     gpu->display_count = i;
 }
 
-XGL_RESULT intel_wsi_gpu_get_info(struct intel_gpu *gpu,
-                                  XGL_PHYSICAL_GPU_INFO_TYPE type,
+VK_RESULT intel_wsi_gpu_get_info(struct intel_gpu *gpu,
+                                  VK_PHYSICAL_GPU_INFO_TYPE type,
                                   size_t *size, void *data)
 {
     if (false)
         x11_display_scan(gpu);
 
-    return XGL_ERROR_INVALID_VALUE;
+    return VK_ERROR_INVALID_VALUE;
 }
 
 void intel_wsi_gpu_cleanup(struct intel_gpu *gpu)
@@ -797,20 +797,20 @@
     }
 }
 
-XGL_RESULT intel_wsi_img_init(struct intel_img *img)
+VK_RESULT intel_wsi_img_init(struct intel_img *img)
 {
     struct intel_x11_img_data *data;
 
-    data = intel_alloc(img, sizeof(*data), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+    data = intel_alloc(img, sizeof(*data), 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!data)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     memset(data, 0, sizeof(*data));
 
     assert(!img->wsi_data);
     img->wsi_data = data;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_wsi_img_cleanup(struct intel_img *img)
@@ -826,20 +826,20 @@
     intel_free(img, img->wsi_data);
 }
 
-XGL_RESULT intel_wsi_fence_init(struct intel_fence *fence)
+VK_RESULT intel_wsi_fence_init(struct intel_fence *fence)
 {
     struct intel_x11_fence_data *data;
 
-    data = intel_alloc(fence, sizeof(*data), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+    data = intel_alloc(fence, sizeof(*data), 0, VK_SYSTEM_ALLOC_INTERNAL);
     if (!data)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     memset(data, 0, sizeof(*data));
 
     assert(!fence->wsi_data);
     fence->wsi_data = data;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_wsi_fence_cleanup(struct intel_fence *fence)
@@ -854,77 +854,77 @@
             sizeof(struct intel_x11_fence_data));
 }
 
-XGL_RESULT intel_wsi_fence_wait(struct intel_fence *fence,
+VK_RESULT intel_wsi_fence_wait(struct intel_fence *fence,
                                 int64_t timeout_ns)
 {
     struct intel_x11_fence_data *data =
         (struct intel_x11_fence_data *) fence->wsi_data;
 
     if (!data->swap_chain)
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 
     return x11_swap_chain_wait(data->swap_chain, data->serial, timeout_ns);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11AssociateConnection(
-    XGL_PHYSICAL_GPU                            gpu_,
-    const XGL_WSI_X11_CONNECTION_INFO*          pConnectionInfo)
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11AssociateConnection(
+    VK_PHYSICAL_GPU                            gpu_,
+    const VK_WSI_X11_CONNECTION_INFO*          pConnectionInfo)
 {
     struct intel_gpu *gpu = intel_gpu(gpu_);
 
     return intel_wsi_gpu_init(gpu, pConnectionInfo);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11GetMSC(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11GetMSC(
+    VK_DEVICE                                   device,
     xcb_window_t                                window,
     xcb_randr_crtc_t                            crtc,
     uint64_t  *                                 pMsc)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_x11_swap_chain *sc;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     sc = x11_swap_chain_lookup(dev, window);
     if (!sc)
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
 
     x11_swap_chain_present_notify_msc(sc);
 
     /* wait for the event */
     ret = x11_swap_chain_wait(sc, sc->local.serial, -1);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     *pMsc = sc->remote.msc;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11CreatePresentableImage(
-    XGL_DEVICE                                  device,
-    const XGL_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
-    XGL_IMAGE*                                  pImage,
-    XGL_GPU_MEMORY*                             pMem)
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(
+    VK_DEVICE                                  device,
+    const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
+    VK_IMAGE*                                  pImage,
+    VK_GPU_MEMORY*                             pMem)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_wsi_x11 *x11 = (struct intel_wsi_x11 *) dev->gpu->wsi_data;
     struct intel_img *img;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     ret = wsi_x11_img_create(x11, dev, pCreateInfo, &img);
-    if (ret == XGL_SUCCESS) {
-        *pImage = (XGL_IMAGE) img;
-        *pMem = (XGL_GPU_MEMORY) img->obj.mem;
+    if (ret == VK_SUCCESS) {
+        *pImage = (VK_IMAGE) img;
+        *pMem = (VK_GPU_MEMORY) img->obj.mem;
     }
 
     return ret;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglWsiX11QueuePresent(
-    XGL_QUEUE                                   queue_,
-    const XGL_WSI_X11_PRESENT_INFO*             pPresentInfo,
-    XGL_FENCE                                   fence_)
+ICD_EXPORT VK_RESULT VKAPI vkWsiX11QueuePresent(
+    VK_QUEUE                                   queue_,
+    const VK_WSI_X11_PRESENT_INFO*             pPresentInfo,
+    VK_FENCE                                   fence_)
 {
     struct intel_queue *queue = intel_queue(queue_);
     struct intel_dev *dev = queue->dev;
@@ -932,24 +932,24 @@
         (struct intel_x11_fence_data *) queue->fence->wsi_data;
     struct intel_img *img = intel_img(pPresentInfo->srcImage);
     struct intel_x11_swap_chain *sc;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     sc = x11_swap_chain_lookup(dev, pPresentInfo->destWindow);
     if (!sc)
-        return XGL_ERROR_UNKNOWN;
+        return VK_ERROR_UNKNOWN;
 
     ret = x11_swap_chain_present_pixmap(sc, pPresentInfo);
-    if (ret != XGL_SUCCESS)
+    if (ret != VK_SUCCESS)
         return ret;
 
     data->swap_chain = sc;
     data->serial = sc->local.serial;
     intel_fence_set_seqno(queue->fence, img->obj.mem->bo);
 
-    if (fence_ != XGL_NULL_HANDLE) {
+    if (fence_ != VK_NULL_HANDLE) {
         struct intel_fence *fence = intel_fence(fence_);
         intel_fence_copy(fence, queue->fence);
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }