Allan MacKinnon | 4359d52 | 2018-06-19 13:57:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can |
| 5 | * be found in the LICENSE file. |
| 6 | * |
| 7 | */ |
| 8 | |
| 9 | #pragma once |
| 10 | |
| 11 | // |
| 12 | // |
| 13 | // |
| 14 | |
Allan MacKinnon | c110e79 | 2018-06-21 09:09:56 -0700 | [diff] [blame^] | 15 | #include "skc.h" |
Allan MacKinnon | 4359d52 | 2018-06-19 13:57:04 -0700 | [diff] [blame] | 16 | #include "runtime.h" |
Allan MacKinnon | 4359d52 | 2018-06-19 13:57:04 -0700 | [diff] [blame] | 17 | #include "cq_pool_cl.h" |
| 18 | #include "handle_pool_cl_12.h" |
| 19 | #include "block_pool_cl_12.h" |
| 20 | #include "allocator_device_cl.h" |
| 21 | |
| 22 | // |
| 23 | // FIXME -- two parts: |
| 24 | // |
| 25 | // 1. directly access the structures in the runtime sub-struct implementations |
| 26 | // 2. possibly wall off the non-platform-specific structs into a sub structure |
| 27 | // |
| 28 | |
| 29 | struct skc_runtime |
| 30 | { |
| 31 | // |
| 32 | // state visible to device |
| 33 | // |
Allan MacKinnon | c110e79 | 2018-06-21 09:09:56 -0700 | [diff] [blame^] | 34 | struct { |
| 35 | cl_context context; |
| 36 | cl_device_id device_id; |
| 37 | cl_uint align_bytes; |
| 38 | } cl; |
Allan MacKinnon | 4359d52 | 2018-06-19 13:57:04 -0700 | [diff] [blame] | 39 | |
| 40 | struct { |
| 41 | struct skc_allocator_host host; |
| 42 | struct skc_allocator_device device; |
| 43 | } allocator; |
| 44 | |
| 45 | struct skc_cq_pool cq_pool; |
| 46 | |
| 47 | struct skc_block_pool block_pool; |
| 48 | |
| 49 | struct skc_handle_pool handle_pool; |
| 50 | |
| 51 | // |
| 52 | // state that is slightly opaque (for now) |
| 53 | // |
| 54 | struct skc_scheduler * scheduler; |
| 55 | |
| 56 | struct skc_grid_deps * deps; |
| 57 | |
| 58 | struct skc_config const * config; // FIXME: config will be determined by device with some opportunities to resize |
| 59 | |
| 60 | struct skc_device * device; // opaque bundle of kernels |
| 61 | }; |
| 62 | |
| 63 | // |
| 64 | // Creation and disposal intitializes context and may rely on other |
| 65 | // context resources like the scheduler |
| 66 | // |
| 67 | |
| 68 | skc_err |
| 69 | skc_runtime_cl_12_create(struct skc_context * const context, |
Allan MacKinnon | c110e79 | 2018-06-21 09:09:56 -0700 | [diff] [blame^] | 70 | cl_context context_cl, |
| 71 | cl_device_id device_id_cl); |
Allan MacKinnon | 4359d52 | 2018-06-19 13:57:04 -0700 | [diff] [blame] | 72 | |
| 73 | skc_err |
| 74 | skc_runtime_cl_12_dispose(struct skc_context * const context); |
| 75 | |
| 76 | // |
| 77 | // HOST HANDLE RETAIN/RELEASE/FLUSH |
| 78 | // |
| 79 | |
| 80 | skc_err |
| 81 | skc_runtime_path_host_retain(struct skc_runtime * const runtime, |
| 82 | skc_path_t const * paths, |
| 83 | uint32_t count); |
| 84 | |
| 85 | skc_err |
| 86 | skc_runtime_raster_host_retain(struct skc_runtime * const runtime, |
| 87 | skc_raster_t const * rasters, |
| 88 | uint32_t count); |
| 89 | |
| 90 | |
| 91 | skc_err |
| 92 | skc_runtime_path_host_release(struct skc_runtime * const runtime, |
| 93 | skc_path_t const * paths, |
| 94 | uint32_t count); |
| 95 | |
| 96 | skc_err |
| 97 | skc_runtime_raster_host_release(struct skc_runtime * const runtime, |
| 98 | skc_raster_t const * rasters, |
| 99 | uint32_t count); |
| 100 | |
| 101 | |
| 102 | skc_err |
| 103 | skc_runtime_path_host_flush(struct skc_runtime * const runtime, |
| 104 | skc_path_t const * paths, |
| 105 | uint32_t count); |
| 106 | |
| 107 | skc_err |
| 108 | skc_runtime_raster_host_flush(struct skc_runtime * const runtime, |
| 109 | skc_raster_t const * rasters, |
| 110 | uint32_t count); |
| 111 | |
| 112 | // |
| 113 | // DEVICE/PIPELINE HANDLE ACQUIRE/RETAIN/RELEASE |
| 114 | // |
| 115 | // The retain operations pre-validate handles |
| 116 | // |
| 117 | |
| 118 | skc_handle_t |
| 119 | skc_runtime_handle_device_acquire(struct skc_runtime * const runtime); |
| 120 | |
| 121 | skc_err |
| 122 | skc_runtime_handle_device_validate_retain(struct skc_runtime * const runtime, |
| 123 | skc_typed_handle_type_e const handle_type, |
| 124 | skc_typed_handle_t const * typed_handles, |
| 125 | uint32_t count); |
| 126 | |
| 127 | void |
| 128 | skc_runtime_handle_device_retain(struct skc_runtime * const runtime, |
| 129 | skc_handle_t const * handles, |
| 130 | uint32_t count); |
| 131 | |
| 132 | void |
| 133 | skc_runtime_path_device_release(struct skc_runtime * const runtime, |
| 134 | skc_handle_t const * handles, |
| 135 | uint32_t count); |
| 136 | |
| 137 | void |
| 138 | skc_runtime_raster_device_release(struct skc_runtime * const runtime, |
| 139 | skc_handle_t const * handles, |
| 140 | uint32_t count); |
| 141 | |
| 142 | // |
| 143 | // We only use in-order command queues in the pipeline |
| 144 | // |
| 145 | |
| 146 | cl_command_queue |
| 147 | skc_runtime_acquire_cq_in_order(struct skc_runtime * const runtime); |
| 148 | |
| 149 | void |
| 150 | skc_runtime_release_cq_in_order(struct skc_runtime * const runtime, |
| 151 | cl_command_queue cq); |
| 152 | |
| 153 | // |
| 154 | // DEVICE MEMORY ALLOCATION |
| 155 | // |
| 156 | |
| 157 | cl_mem |
| 158 | skc_runtime_device_perm_alloc(struct skc_runtime * const runtime, |
| 159 | cl_mem_flags const flags, |
| 160 | size_t const size); |
| 161 | |
| 162 | void |
| 163 | skc_runtime_device_perm_free(struct skc_runtime * const runtime, |
| 164 | cl_mem const mem); |
| 165 | |
| 166 | cl_mem |
| 167 | skc_runtime_device_temp_alloc(struct skc_runtime * const runtime, |
| 168 | cl_mem_flags const flags, |
| 169 | size_t const size, |
| 170 | skc_subbuf_id_t * const subbuf_id, |
| 171 | size_t * const subbuf_size); |
| 172 | |
| 173 | void |
| 174 | skc_runtime_device_temp_free(struct skc_runtime * const runtime, |
| 175 | cl_mem const mem, |
| 176 | skc_subbuf_id_t const subbuf_id); |
| 177 | |
| 178 | // |
| 179 | // |
| 180 | // |