Stage 1 of rename

TODO: re-enable glave build, advance API for glave

v2: get rid of outdated code in tri introduced by rebase
    rename wsi_null.c (olv)
diff --git a/icd/intel/dev.c b/icd/intel/dev.c
index 8db165b..94bcd42 100644
--- a/icd/intel/dev.c
+++ b/icd/intel/dev.c
@@ -1,5 +1,5 @@
 /*
- * XGL
+ * Vulkan
  *
  * Copyright (C) 2014 LunarG, Inc.
  *
@@ -65,18 +65,18 @@
     return true;
 }
 
-static XGL_RESULT dev_create_queues(struct intel_dev *dev,
-                                    const XGL_DEVICE_QUEUE_CREATE_INFO *queues,
+static VK_RESULT dev_create_queues(struct intel_dev *dev,
+                                    const VK_DEVICE_QUEUE_CREATE_INFO *queues,
                                     uint32_t count)
 {
     uint32_t i;
 
     if (!count)
-        return XGL_ERROR_INVALID_POINTER;
+        return VK_ERROR_INVALID_POINTER;
 
     for (i = 0; i < count; i++) {
-        const XGL_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
-        XGL_RESULT ret = XGL_SUCCESS;
+        const VK_DEVICE_QUEUE_CREATE_INFO *q = &queues[i];
+        VK_RESULT ret = VK_SUCCESS;
 
         if (q->queueNodeIndex < INTEL_GPU_ENGINE_COUNT &&
             q->queueCount == 1 && !dev->queues[q->queueNodeIndex]) {
@@ -84,10 +84,10 @@
                     &dev->queues[q->queueNodeIndex]);
         }
         else {
-            ret = XGL_ERROR_INVALID_POINTER;
+            ret = VK_ERROR_INVALID_POINTER;
         }
 
-        if (ret != XGL_SUCCESS) {
+        if (ret != VK_SUCCESS) {
             uint32_t j;
             for (j = 0; j < i; j++)
                 intel_queue_destroy(dev->queues[j]);
@@ -96,25 +96,25 @@
         }
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-XGL_RESULT intel_dev_create(struct intel_gpu *gpu,
-                            const XGL_DEVICE_CREATE_INFO *info,
+VK_RESULT intel_dev_create(struct intel_gpu *gpu,
+                            const VK_DEVICE_CREATE_INFO *info,
                             struct intel_dev **dev_ret)
 {
     struct intel_dev *dev;
     uint32_t i;
-    XGL_RESULT ret;
+    VK_RESULT ret;
 
     if (gpu->winsys)
-        return XGL_ERROR_DEVICE_ALREADY_CREATED;
+        return VK_ERROR_DEVICE_ALREADY_CREATED;
 
     dev = (struct intel_dev *) intel_base_create(&gpu->handle,
             sizeof(*dev), info->flags,
-            XGL_DBG_OBJECT_DEVICE, info, sizeof(struct intel_dev_dbg));
+            VK_DBG_OBJECT_DEVICE, info, sizeof(struct intel_dev_dbg));
     if (!dev)
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
 
     for (i = 0; i < info->extensionCount; i++) {
         const enum intel_ext_type ext = intel_gpu_lookup_extension(gpu,
@@ -127,7 +127,7 @@
     dev->gpu = gpu;
 
     ret = intel_gpu_init_winsys(gpu);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_dev_destroy(dev);
         return ret;
     }
@@ -138,16 +138,16 @@
             "command buffer scratch", 4096, false);
     if (!dev->cmd_scratch_bo) {
         intel_dev_destroy(dev);
-        return XGL_ERROR_OUT_OF_GPU_MEMORY;
+        return VK_ERROR_OUT_OF_GPU_MEMORY;
     }
 
     if (!dev_create_meta_shaders(dev)) {
         intel_dev_destroy(dev);
-        return XGL_ERROR_OUT_OF_MEMORY;
+        return VK_ERROR_OUT_OF_MEMORY;
     }
 
     ret = intel_desc_region_create(dev, &dev->desc_region);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_dev_destroy(dev);
         return ret;
     }
@@ -161,14 +161,14 @@
 
     ret = dev_create_queues(dev, info->pRequestedQueues,
             info->queueRecordCount);
-    if (ret != XGL_SUCCESS) {
+    if (ret != VK_SUCCESS) {
         intel_dev_destroy(dev);
         return ret;
     }
 
     *dev_ret = dev;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 static void dev_clear_msg_filters(struct intel_dev *dev)
@@ -212,14 +212,14 @@
         intel_gpu_cleanup_winsys(gpu);
 }
 
-XGL_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
+VK_RESULT intel_dev_add_msg_filter(struct intel_dev *dev,
                                     int32_t msg_code,
-                                    XGL_DBG_MSG_FILTER filter)
+                                    VK_DBG_MSG_FILTER filter)
 {
     struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
     struct intel_dev_dbg_msg_filter *f = dbg->filters;
 
-    assert(filter != XGL_DBG_MSG_FILTER_NONE);
+    assert(filter != VK_DBG_MSG_FILTER_NONE);
 
     while (f) {
         if (f->msg_code == msg_code)
@@ -233,9 +233,9 @@
             f->triggered = false;
         }
     } else {
-        f = intel_alloc(dev, sizeof(*f), 0, XGL_SYSTEM_ALLOC_DEBUG);
+        f = intel_alloc(dev, sizeof(*f), 0, VK_SYSTEM_ALLOC_DEBUG);
         if (!f)
-            return XGL_ERROR_OUT_OF_MEMORY;
+            return VK_ERROR_OUT_OF_MEMORY;
 
         f->msg_code = msg_code;
         f->filter = filter;
@@ -245,7 +245,7 @@
         dbg->filters = f;
     }
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
 void intel_dev_remove_msg_filter(struct intel_dev *dev,
@@ -286,10 +286,10 @@
             continue;
         }
 
-        if (filter->filter == XGL_DBG_MSG_FILTER_ALL)
+        if (filter->filter == VK_DBG_MSG_FILTER_ALL)
             return true;
 
-        if (filter->filter == XGL_DBG_MSG_FILTER_REPEATED &&
+        if (filter->filter == VK_DBG_MSG_FILTER_REPEATED &&
             filter->triggered)
             return true;
 
@@ -301,8 +301,8 @@
 }
 
 void intel_dev_log(struct intel_dev *dev,
-                   XGL_DBG_MSG_TYPE msg_type,
-                   XGL_VALIDATION_LEVEL validation_level,
+                   VK_DBG_MSG_TYPE msg_type,
+                   VK_VALIDATION_LEVEL validation_level,
                    struct intel_base *src_object,
                    size_t location,
                    int32_t msg_code,
@@ -314,61 +314,61 @@
         return;
 
     va_start(ap, format);
-    intel_logv(dev, msg_type, validation_level, (XGL_BASE_OBJECT) src_object,
+    intel_logv(dev, msg_type, validation_level, (VK_BASE_OBJECT) src_object,
             location, msg_code, format, ap);
     va_end(ap);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglCreateDevice(
-    XGL_PHYSICAL_GPU                            gpu_,
-    const XGL_DEVICE_CREATE_INFO*               pCreateInfo,
-    XGL_DEVICE*                                 pDevice)
+ICD_EXPORT VK_RESULT VKAPI vkCreateDevice(
+    VK_PHYSICAL_GPU                            gpu_,
+    const VK_DEVICE_CREATE_INFO*               pCreateInfo,
+    VK_DEVICE*                                 pDevice)
 {
     struct intel_gpu *gpu = intel_gpu(gpu_);
 
     return intel_dev_create(gpu, pCreateInfo, (struct intel_dev **) pDevice);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDestroyDevice(
-    XGL_DEVICE                                  device)
+ICD_EXPORT VK_RESULT VKAPI vkDestroyDevice(
+    VK_DEVICE                                  device)
 {
     struct intel_dev *dev = intel_dev(device);
 
     intel_dev_destroy(dev);
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglGetDeviceQueue(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(
+    VK_DEVICE                                  device,
     uint32_t                                    queueNodeIndex,
     uint32_t                                    queueIndex,
-    XGL_QUEUE*                                  pQueue)
+    VK_QUEUE*                                  pQueue)
 {
     struct intel_dev *dev = intel_dev(device);
 
     if (queueNodeIndex >= INTEL_GPU_ENGINE_COUNT) {
-        return XGL_ERROR_UNAVAILABLE;
+        return VK_ERROR_UNAVAILABLE;
     }
 
     if (queueIndex > 0)
-        return XGL_ERROR_UNAVAILABLE;
+        return VK_ERROR_UNAVAILABLE;
 
     *pQueue = dev->queues[queueNodeIndex];
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDeviceWaitIdle(
-    XGL_DEVICE                                  device)
+ICD_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(
+    VK_DEVICE                                  device)
 {
     struct intel_dev *dev = intel_dev(device);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
     uint32_t i;
 
     for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
         if (dev->queues[i]) {
-            const XGL_RESULT r = intel_queue_wait(dev->queues[i], -1);
-            if (r != XGL_SUCCESS)
+            const VK_RESULT r = intel_queue_wait(dev->queues[i], -1);
+            if (r != VK_SUCCESS)
                 ret = r;
         }
     }
@@ -376,9 +376,9 @@
     return ret;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetValidationLevel(
-    XGL_DEVICE                                  device,
-    XGL_VALIDATION_LEVEL                        validationLevel)
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetValidationLevel(
+    VK_DEVICE                                  device,
+    VK_VALIDATION_LEVEL                        validationLevel)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
@@ -386,55 +386,55 @@
     if (dbg)
         dbg->validation_level = validationLevel;
 
-    return XGL_SUCCESS;
+    return VK_SUCCESS;
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetMessageFilter(
-    XGL_DEVICE                                  device,
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetMessageFilter(
+    VK_DEVICE                                  device,
     int32_t                                     msgCode,
-    XGL_DBG_MSG_FILTER                          filter)
+    VK_DBG_MSG_FILTER                          filter)
 {
     struct intel_dev *dev = intel_dev(device);
 
     if (!dev->base.dbg)
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
 
-    if (filter == XGL_DBG_MSG_FILTER_NONE) {
+    if (filter == VK_DBG_MSG_FILTER_NONE) {
         intel_dev_remove_msg_filter(dev, msgCode);
-        return XGL_SUCCESS;
+        return VK_SUCCESS;
     }
 
     return intel_dev_add_msg_filter(dev, msgCode, filter);
 }
 
-ICD_EXPORT XGL_RESULT XGLAPI xglDbgSetDeviceOption(
-    XGL_DEVICE                                  device,
-    XGL_DBG_DEVICE_OPTION                       dbgOption,
+ICD_EXPORT VK_RESULT VKAPI vkDbgSetDeviceOption(
+    VK_DEVICE                                  device,
+    VK_DBG_DEVICE_OPTION                       dbgOption,
     size_t                                      dataSize,
     const void*                                 pData)
 {
     struct intel_dev *dev = intel_dev(device);
     struct intel_dev_dbg *dbg = intel_dev_dbg(dev);
-    XGL_RESULT ret = XGL_SUCCESS;
+    VK_RESULT ret = VK_SUCCESS;
 
     if (dataSize == 0)
-        return XGL_ERROR_INVALID_VALUE;
+        return VK_ERROR_INVALID_VALUE;
 
     switch (dbgOption) {
-    case XGL_DBG_OPTION_DISABLE_PIPELINE_LOADS:
+    case VK_DBG_OPTION_DISABLE_PIPELINE_LOADS:
         if (dbg)
             dbg->disable_pipeline_loads = *((const bool *) pData);
         break;
-    case XGL_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS:
+    case VK_DBG_OPTION_FORCE_OBJECT_MEMORY_REQS:
         if (dbg)
             dbg->force_object_memory_reqs = *((const bool *) pData);
         break;
-    case XGL_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT:
+    case VK_DBG_OPTION_FORCE_LARGE_IMAGE_ALIGNMENT:
         if (dbg)
             dbg->force_large_image_alignment = *((const bool *) pData);
         break;
     default:
-        ret = XGL_ERROR_INVALID_VALUE;
+        ret = VK_ERROR_INVALID_VALUE;
         break;
     }