intel: clean up queue init

Rename queue_set_state() to queue_select_pipeline(), and separate out queue
init code to queue_init_hw_and_atomic_bo().
diff --git a/icd/intel/queue.c b/icd/intel/queue.c
index dff89c8..e80aa37 100644
--- a/icd/intel/queue.c
+++ b/icd/intel/queue.c
@@ -29,13 +29,6 @@
 #include "fence.h"
 #include "queue.h"
 
-/* must match intel_cmd::pipeline_select */
-enum queue_state {
-    QUEUE_STATE_GRAPHICS_SELECTED = GEN6_PIPELINE_SELECT_DW0_SELECT_3D,
-    QUEUE_STATE_COMPUTE_SELECTED = GEN6_PIPELINE_SELECT_DW0_SELECT_MEDIA,
-    QUEUE_STATE_INITIALIZED = -1,
-};
-
 static XGL_RESULT queue_submit_bo(struct intel_queue *queue,
                                   struct intel_bo *bo,
                                   XGL_GPU_SIZE used)
@@ -54,10 +47,89 @@
     return (err) ? XGL_ERROR_UNKNOWN : XGL_SUCCESS;
 }
 
-static XGL_RESULT queue_set_state(struct intel_queue *queue,
-                                  enum queue_state state)
+static struct intel_bo *queue_create_bo(struct intel_queue *queue,
+                                        XGL_GPU_SIZE size,
+                                        const void *cmd,
+                                        XGL_SIZE cmd_len)
 {
-    static const uint32_t queue_state_init[] = {
+    struct intel_bo *bo;
+    void *ptr;
+
+    bo = intel_winsys_alloc_buffer(queue->dev->winsys,
+            "queue bo", size, INTEL_DOMAIN_CPU);
+    if (!bo)
+        return NULL;
+
+    if (!cmd_len)
+        return bo;
+
+    ptr = intel_bo_map(bo, true);
+    if (!ptr) {
+        intel_bo_unreference(bo);
+        return NULL;
+    }
+
+    memcpy(ptr, cmd, cmd_len);
+    intel_bo_unmap(bo);
+
+    return bo;
+}
+
+static XGL_RESULT queue_select_pipeline(struct intel_queue *queue,
+                                        int pipeline_select)
+{
+    uint32_t pipeline_select_cmd[] = {
+        GEN_RENDER_CMD(SINGLE_DW, GEN6, PIPELINE_SELECT),
+        GEN_MI_CMD(MI_BATCH_BUFFER_END),
+    };
+    struct intel_bo *bo;
+    XGL_RESULT ret;
+
+    if (queue->ring != INTEL_RING_RENDER ||
+        queue->last_pipeline_select == pipeline_select)
+        return XGL_SUCCESS;
+
+    switch (pipeline_select) {
+    case GEN6_PIPELINE_SELECT_DW0_SELECT_3D:
+        bo = queue->select_graphics_bo;
+        break;
+    case GEN6_PIPELINE_SELECT_DW0_SELECT_MEDIA:
+        bo = queue->select_compute_bo;
+        break;
+    default:
+        return XGL_ERROR_INVALID_VALUE;
+        break;
+    }
+
+    if (!bo) {
+        pipeline_select_cmd[0] |= pipeline_select;
+        bo = queue_create_bo(queue, sizeof(pipeline_select_cmd),
+                pipeline_select_cmd, sizeof(pipeline_select_cmd));
+        if (!bo)
+            return XGL_ERROR_OUT_OF_GPU_MEMORY;
+
+        switch (pipeline_select) {
+        case GEN6_PIPELINE_SELECT_DW0_SELECT_3D:
+            queue->select_graphics_bo = bo;
+            break;
+        case GEN6_PIPELINE_SELECT_DW0_SELECT_MEDIA:
+            queue->select_compute_bo = bo;
+            break;
+        default:
+            break;
+        }
+    }
+
+    ret = queue_submit_bo(queue, bo, sizeof(pipeline_select_cmd));
+    if (ret == XGL_SUCCESS)
+        queue->last_pipeline_select = pipeline_select;
+
+    return ret;
+}
+
+static XGL_RESULT queue_init_hw_and_atomic_bo(struct intel_queue *queue)
+{
+    const uint32_t ctx_init_cmd[] = {
         /* STATE_SIP */
         GEN_RENDER_CMD(COMMON, GEN6, STATE_SIP),
         0,
@@ -70,102 +142,34 @@
         GEN_MI_CMD(MI_BATCH_BUFFER_END),
         GEN_MI_CMD(MI_NOOP),
     };
-    static const uint32_t queue_state_select_graphics[] = {
-        /* PIPELINE_SELECT */
-        GEN_RENDER_CMD(SINGLE_DW, GEN6, PIPELINE_SELECT) |
-            GEN6_PIPELINE_SELECT_DW0_SELECT_3D,
-        /* end */
-        GEN_MI_CMD(MI_BATCH_BUFFER_END),
-    };
-    static const uint32_t queue_state_select_compute[] = {
-        /* PIPELINE_SELECT */
-        GEN_RENDER_CMD(SINGLE_DW, GEN6, PIPELINE_SELECT) |
-            GEN6_PIPELINE_SELECT_DW0_SELECT_MEDIA,
-        /* end */
-        GEN_MI_CMD(MI_BATCH_BUFFER_END),
-    };
     struct intel_bo *bo;
-    XGL_GPU_SIZE size;
     XGL_RESULT ret;
 
-    if (queue->last_pipeline_select == state)
-        return XGL_SUCCESS;
-
-    switch (state) {
-    case QUEUE_STATE_GRAPHICS_SELECTED:
-        bo = queue->select_graphics_bo;
-        size = sizeof(queue_state_select_graphics);
-        break;
-    case QUEUE_STATE_COMPUTE_SELECTED:
-        bo = queue->select_compute_bo;
-        size = sizeof(queue_state_select_compute);
-        break;
-    case QUEUE_STATE_INITIALIZED:
-        /* will be reused for the atomic counters */
-        assert(!queue->atomic_bo);
-        bo = NULL;
-        size = sizeof(queue_state_init);
-        break;
-    default:
-        return XGL_ERROR_INVALID_VALUE;
-        break;
+    if (queue->ring != INTEL_RING_RENDER) {
+        queue->last_pipeline_select = -1;
+        queue->atomic_bo = queue_create_bo(queue,
+                sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
+                NULL, 0);
+        return (queue->atomic_bo) ? XGL_SUCCESS : XGL_ERROR_OUT_OF_GPU_MEMORY;
     }
 
-    if (!bo) {
-        const void *cmd;
-        void *ptr;
+    bo = queue_create_bo(queue,
+            sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
+            ctx_init_cmd, sizeof(ctx_init_cmd));
+    if (!bo)
+        return XGL_ERROR_OUT_OF_GPU_MEMORY;
 
-        bo = intel_winsys_alloc_buffer(queue->dev->winsys,
-                "queue bo", 4096, INTEL_DOMAIN_CPU);
-        if (!bo)
-            return XGL_ERROR_OUT_OF_GPU_MEMORY;
-
-        /* do the allocation only */
-        if (queue->ring != INTEL_RING_RENDER) {
-            assert(state == QUEUE_STATE_INITIALIZED);
-            queue->atomic_bo = bo;
-            queue->last_pipeline_select = QUEUE_STATE_INITIALIZED;
-            return XGL_SUCCESS;
-        }
-
-        ptr = intel_bo_map(bo, true);
-        if (!ptr) {
-            intel_bo_unreference(bo);
-            return XGL_ERROR_MEMORY_MAP_FAILED;
-        }
-
-        switch (state) {
-        case QUEUE_STATE_GRAPHICS_SELECTED:
-            queue->select_graphics_bo = bo;
-            cmd = queue_state_select_graphics;
-            break;
-        case QUEUE_STATE_COMPUTE_SELECTED:
-            queue->select_compute_bo = bo;
-            cmd = queue_state_select_compute;
-            break;
-        case QUEUE_STATE_INITIALIZED:
-            /* reused for the atomic counters */
-            queue->atomic_bo = bo;
-            cmd = queue_state_init;
-            break;
-        default:
-            break;
-        }
-
-        memcpy(ptr, cmd, size);
-        intel_bo_unmap(bo);
+    ret = queue_submit_bo(queue, bo, sizeof(ctx_init_cmd));
+    if (ret != XGL_SUCCESS) {
+        intel_bo_unreference(bo);
+        return ret;
     }
 
-    assert(queue->ring == INTEL_RING_RENDER);
+    queue->last_pipeline_select = GEN6_PIPELINE_SELECT_DW0_SELECT_3D;
+    /* reuse */
+    queue->atomic_bo = bo;
 
-    ret = queue_submit_bo(queue, bo, size);
-    if (ret == XGL_SUCCESS) {
-        if (state == QUEUE_STATE_INITIALIZED)
-            state = QUEUE_STATE_GRAPHICS_SELECTED;
-        queue->last_pipeline_select = state;
-    }
-
-    return ret;
+    return XGL_SUCCESS;
 }
 
 XGL_RESULT intel_queue_create(struct intel_dev *dev,
@@ -192,7 +196,7 @@
     queue->dev = dev;
     queue->ring = ring;
 
-    if (queue_set_state(queue, QUEUE_STATE_INITIALIZED) != XGL_SUCCESS) {
+    if (queue_init_hw_and_atomic_bo(queue) != XGL_SUCCESS) {
         intel_queue_destroy(queue);
         return XGL_ERROR_INITIALIZATION_FAILED;
     }
@@ -260,7 +264,7 @@
         XGL_GPU_SIZE used;
         XGL_RESULT ret;
 
-        ret = queue_set_state(queue, cmd->pipeline_select);
+        ret = queue_select_pipeline(queue, cmd->pipeline_select);
         if (ret != XGL_SUCCESS)
             break;