Chia-I Wu | 82bff27 | 2014-12-27 14:12:52 +0800 | [diff] [blame] | 1 | // XGL tests |
| 2 | // |
| 3 | // Copyright (C) 2014 LunarG, Inc. |
| 4 | // |
| 5 | // Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | // copy of this software and associated documentation files (the "Software"), |
| 7 | // to deal in the Software without restriction, including without limitation |
| 8 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | // and/or sell copies of the Software, and to permit persons to whom the |
| 10 | // Software is furnished to do so, subject to the following conditions: |
| 11 | // |
| 12 | // The above copyright notice and this permission notice shall be included |
| 13 | // in all copies or substantial portions of the Software. |
| 14 | // |
| 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 21 | // DEALINGS IN THE SOFTWARE. |
| 22 | |
| 23 | #ifndef XGLTESTBINDING_H |
| 24 | #define XGLTESTBINDING_H |
| 25 | |
| 26 | #include <vector> |
| 27 | |
| 28 | #define XGL_PROTOTYPES |
| 29 | #include "xgl.h" |
| 30 | |
| 31 | namespace xgl_testing { |
| 32 | |
| 33 | typedef void (*ErrorCallback)(const char *expr, const char *file, unsigned int line, const char *function); |
| 34 | void set_error_callback(ErrorCallback callback); |
| 35 | |
| 36 | class PhysicalGpu; |
| 37 | class BaseObject; |
| 38 | class Object; |
| 39 | class DynamicStateObject; |
| 40 | class Device; |
| 41 | class Queue; |
| 42 | class GpuMemory; |
| 43 | class Fence; |
| 44 | class QueueSemaphore; |
| 45 | class Event; |
| 46 | class QueryPool; |
| 47 | class Image; |
| 48 | class ImageView; |
| 49 | class ColorAttachmentView; |
| 50 | class DepthStencilView; |
| 51 | class Shader; |
| 52 | class Pipeline; |
| 53 | class PipelineDelta; |
| 54 | class Sampler; |
| 55 | class DescriptorSet; |
| 56 | class DynamicVpStateObject; |
| 57 | class DynamicRsStateObject; |
| 58 | class DynamicMsaaStateObject; |
| 59 | class DynamicCbStateObject; |
| 60 | class DynamicDsStateObject; |
| 61 | class CmdBuffer; |
| 62 | |
| 63 | class PhysicalGpu { |
| 64 | public: |
| 65 | explicit PhysicalGpu(XGL_PHYSICAL_GPU gpu) : gpu_(gpu) {} |
| 66 | |
| 67 | const XGL_PHYSICAL_GPU &obj() const { return gpu_; } |
| 68 | |
| 69 | // xglGetGpuInfo() |
| 70 | XGL_PHYSICAL_GPU_PROPERTIES properties() const; |
| 71 | XGL_PHYSICAL_GPU_PERFORMANCE performance() const; |
| 72 | XGL_PHYSICAL_GPU_MEMORY_PROPERTIES memory_properties() const; |
| 73 | std::vector<XGL_PHYSICAL_GPU_QUEUE_PROPERTIES> queue_properties() const; |
| 74 | |
| 75 | // xglGetProcAddr() |
| 76 | void *get_proc(const char *name) const { return xglGetProcAddr(gpu_, name); } |
| 77 | |
| 78 | // xglGetExtensionSupport() |
| 79 | bool has_extension(const char *ext) const { return (xglGetExtensionSupport(gpu_, ext) == XGL_SUCCESS); } |
| 80 | std::vector<const char *> extensions() const; |
| 81 | |
| 82 | // xglEnumerateLayers() |
| 83 | std::vector<const char *> layers(std::vector<char> &buf) const; |
| 84 | |
| 85 | // xglGetMultiGpuCompatibility() |
| 86 | XGL_GPU_COMPATIBILITY_INFO compatibility(const PhysicalGpu &other) const; |
| 87 | |
| 88 | private: |
| 89 | XGL_PHYSICAL_GPU gpu_; |
| 90 | }; |
| 91 | |
| 92 | class BaseObject { |
| 93 | public: |
| 94 | const XGL_BASE_OBJECT &obj() const { return obj_; } |
| 95 | bool initialized() const { return (obj_ != XGL_NULL_HANDLE); } |
| 96 | |
| 97 | // xglGetObjectInfo() |
| 98 | uint32_t memory_allocation_count() const; |
| 99 | std::vector<XGL_MEMORY_REQUIREMENTS> memory_requirements() const; |
| 100 | |
| 101 | protected: |
| 102 | explicit BaseObject() : obj_(XGL_NULL_HANDLE), own_obj_(false) {} |
| 103 | explicit BaseObject(XGL_BASE_OBJECT obj) : obj_(XGL_NULL_HANDLE), own_obj_(false) { init(obj); } |
| 104 | |
| 105 | void init(XGL_BASE_OBJECT obj, bool own); |
| 106 | void init(XGL_BASE_OBJECT obj) { init(obj, true); } |
| 107 | |
| 108 | void reinit(XGL_BASE_OBJECT obj, bool own); |
| 109 | void reinit(XGL_BASE_OBJECT obj) { reinit(obj, true); } |
| 110 | |
| 111 | bool own() const { return own_obj_; }; |
| 112 | |
| 113 | private: |
| 114 | // base objects are non-copyable |
| 115 | BaseObject(const BaseObject &); |
| 116 | BaseObject &operator=(const BaseObject &); |
| 117 | |
| 118 | XGL_BASE_OBJECT obj_; |
| 119 | bool own_obj_; |
| 120 | }; |
| 121 | |
| 122 | class Object : public BaseObject { |
| 123 | public: |
| 124 | const XGL_OBJECT &obj() const { return reinterpret_cast<const XGL_OBJECT &>(BaseObject::obj()); } |
| 125 | |
| 126 | // xglBindObjectMemory() |
| 127 | void bind_memory(uint32_t alloc_idx, const GpuMemory &mem, XGL_GPU_SIZE mem_offset); |
| 128 | void unbind_memory(uint32_t alloc_idx); |
| 129 | void unbind_memory(); |
| 130 | |
| 131 | // Unless an object is initialized with init_no_mem(), memories are |
| 132 | // automatically allocated and bound. These methods can be used to get |
| 133 | // the memories (for XGL_MEMORY_REFs), or to map/unmap the primary memory. |
| 134 | std::vector<XGL_GPU_MEMORY> memories() const; |
| 135 | |
| 136 | const void *map(XGL_FLAGS flags) const; |
| 137 | void *map(XGL_FLAGS flags); |
| 138 | const void *map() const { return map(0); } |
| 139 | void *map() { return map(0); } |
| 140 | |
| 141 | void unmap() const; |
| 142 | |
| 143 | protected: |
| 144 | explicit Object() : mem_alloc_count_(0), internal_mems_(NULL), primary_mem_(NULL) {} |
| 145 | explicit Object(XGL_OBJECT obj) : mem_alloc_count_(0), internal_mems_(NULL), primary_mem_(NULL) { init(obj); } |
| 146 | ~Object() { cleanup(); } |
| 147 | |
| 148 | void init(XGL_OBJECT obj, bool own); |
| 149 | void init(XGL_OBJECT obj) { init(obj, true); } |
| 150 | |
| 151 | void reinit(XGL_OBJECT obj, bool own); |
| 152 | void reinit(XGL_OBJECT obj) { init(obj, true); } |
| 153 | |
| 154 | // allocate and bind internal memories |
| 155 | void alloc_memory(const Device &dev, bool for_linear_img); |
| 156 | void alloc_memory(const Device &dev) { alloc_memory(dev, false); } |
| 157 | void alloc_memory(const std::vector<XGL_GPU_MEMORY> &mems); |
| 158 | |
| 159 | private: |
| 160 | void cleanup(); |
| 161 | |
| 162 | uint32_t mem_alloc_count_; |
| 163 | GpuMemory *internal_mems_; |
| 164 | GpuMemory *primary_mem_; |
| 165 | }; |
| 166 | |
| 167 | class DynamicStateObject : public Object { |
| 168 | public: |
| 169 | const XGL_STATE_OBJECT &obj() const { return reinterpret_cast<const XGL_STATE_OBJECT &>(Object::obj()); } |
| 170 | |
| 171 | protected: |
| 172 | explicit DynamicStateObject() {} |
| 173 | explicit DynamicStateObject(XGL_STATE_OBJECT obj) : Object(obj) {} |
| 174 | }; |
| 175 | |
| 176 | template<typename T, class C> |
| 177 | class DerivedObject : public C { |
| 178 | public: |
| 179 | const T &obj() const { return reinterpret_cast<const T &>(C::obj()); } |
| 180 | |
| 181 | protected: |
| 182 | typedef T obj_type; |
| 183 | typedef C base_type; |
| 184 | |
| 185 | explicit DerivedObject() {} |
| 186 | explicit DerivedObject(T obj) : C(obj) {} |
| 187 | }; |
| 188 | |
| 189 | class Device : public DerivedObject<XGL_DEVICE, BaseObject> { |
| 190 | public: |
| 191 | explicit Device(XGL_PHYSICAL_GPU gpu) : gpu_(gpu) {} |
| 192 | ~Device(); |
| 193 | |
| 194 | // xglCreateDevice() |
| 195 | void init(const XGL_DEVICE_CREATE_INFO &info); |
| 196 | void init(); // all queues, all layers, all extensions, etc |
| 197 | |
| 198 | const PhysicalGpu &gpu() const { return gpu_; } |
| 199 | |
| 200 | // xglGetDeviceQueue() |
| 201 | const std::vector<Queue *> &graphics_queues() { return queues_[GRAPHICS]; } |
| 202 | const std::vector<Queue *> &compute_queues() { return queues_[COMPUTE]; } |
| 203 | const std::vector<Queue *> &dma_queues() { return queues_[DMA]; } |
| 204 | |
| 205 | const std::vector<XGL_MEMORY_HEAP_PROPERTIES> &heap_properties() const { return heap_props_; } |
| 206 | |
| 207 | struct Format { |
| 208 | XGL_FORMAT format; |
| 209 | XGL_IMAGE_TILING tiling; |
| 210 | XGL_FLAGS features; |
| 211 | }; |
| 212 | // xglGetFormatInfo() |
| 213 | XGL_FORMAT_PROPERTIES format_properties(XGL_FORMAT format); |
| 214 | const std::vector<Format> &formats() const { return formats_; } |
| 215 | |
| 216 | // xglDeviceWaitIdle() |
| 217 | void wait(); |
| 218 | |
| 219 | // xglWaitForFences() |
| 220 | XGL_RESULT wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout); |
| 221 | XGL_RESULT wait(const Fence &fence) { return wait(std::vector<const Fence *>(1, &fence), true, (uint64_t) -1); } |
| 222 | |
| 223 | private: |
| 224 | enum QueueIndex { |
| 225 | GRAPHICS, |
| 226 | COMPUTE, |
| 227 | DMA, |
| 228 | QUEUE_COUNT, |
| 229 | }; |
| 230 | |
| 231 | void init_queues(); |
| 232 | void init_heap_props(); |
| 233 | void init_formats(); |
| 234 | |
| 235 | PhysicalGpu gpu_; |
| 236 | |
| 237 | std::vector<Queue *> queues_[QUEUE_COUNT]; |
| 238 | std::vector<XGL_MEMORY_HEAP_PROPERTIES> heap_props_; |
| 239 | std::vector<Format> formats_; |
| 240 | }; |
| 241 | |
| 242 | class Queue : public DerivedObject<XGL_QUEUE, BaseObject> { |
| 243 | public: |
| 244 | explicit Queue(XGL_QUEUE queue) : DerivedObject(queue) {} |
| 245 | |
| 246 | // xglQueueSubmit() |
| 247 | void submit(const std::vector<const CmdBuffer *> &cmds, const std::vector<XGL_MEMORY_REF> &mem_refs, Fence &fence); |
| 248 | void submit(const CmdBuffer &cmd, const std::vector<XGL_MEMORY_REF> &mem_refs, Fence &fence); |
| 249 | void submit(const CmdBuffer &cmd, const std::vector<XGL_MEMORY_REF> &mem_refs); |
| 250 | |
| 251 | // xglQueueSetGlobalMemReferences() |
| 252 | void set_global_mem_references(const std::vector<XGL_MEMORY_REF> &mem_refs); |
| 253 | |
| 254 | // xglQueueWaitIdle() |
| 255 | void wait(); |
| 256 | |
| 257 | // xglSignalQueueSemaphore() |
| 258 | // xglWaitQueueSemaphore() |
| 259 | void signal_semaphore(QueueSemaphore &sem); |
| 260 | void wait_semaphore(QueueSemaphore &sem); |
| 261 | }; |
| 262 | |
| 263 | class GpuMemory : public DerivedObject<XGL_GPU_MEMORY, BaseObject> { |
| 264 | public: |
| 265 | explicit GpuMemory() {} |
| 266 | explicit GpuMemory(const Device &dev, XGL_GPU_SIZE size) { init(dev, size); } |
| 267 | ~GpuMemory(); |
| 268 | |
| 269 | // xglAllocMemory() |
| 270 | void init(const Device &dev, const XGL_MEMORY_ALLOC_INFO &info); |
| 271 | void init(const Device &dev, XGL_GPU_SIZE size); |
| 272 | // xglPinSystemMemory() |
| 273 | void init(const Device &dev, size_t size, const void *data); |
| 274 | // xglOpenSharedMemory() |
| 275 | void init(const Device &dev, const XGL_MEMORY_OPEN_INFO &info); |
| 276 | // xglOpenPeerMemory() |
| 277 | void init(const Device &dev, const XGL_PEER_MEMORY_OPEN_INFO &info); |
| 278 | |
| 279 | void init(XGL_GPU_MEMORY mem) { BaseObject::init(mem, false); } |
| 280 | |
| 281 | // xglSetMemoryPriority() |
| 282 | void set_priority(XGL_MEMORY_PRIORITY priority); |
| 283 | |
| 284 | // xglMapMemory() |
| 285 | const void *map(XGL_FLAGS flags) const; |
| 286 | void *map(XGL_FLAGS flags); |
| 287 | const void *map() const { return map(0); } |
| 288 | void *map() { return map(0); } |
| 289 | |
| 290 | // xglUnmapMemory() |
| 291 | void unmap() const; |
| 292 | |
| 293 | XGL_MEMORY_STATE_TRANSITION state_transition(XGL_MEMORY_STATE old_state, XGL_MEMORY_STATE new_state, |
| 294 | XGL_GPU_SIZE offset, XGL_GPU_SIZE size) const |
| 295 | { |
| 296 | XGL_MEMORY_STATE_TRANSITION transition = {}; |
| 297 | transition.sType = XGL_STRUCTURE_TYPE_MEMORY_STATE_TRANSITION; |
| 298 | transition.mem = obj(); |
| 299 | transition.oldState = old_state; |
| 300 | transition.newState = new_state; |
| 301 | transition.offset = offset; |
| 302 | transition.regionSize = size; |
| 303 | return transition; |
| 304 | } |
| 305 | |
| 306 | static XGL_MEMORY_ALLOC_INFO alloc_info(const XGL_MEMORY_REQUIREMENTS &reqs); |
| 307 | }; |
| 308 | |
| 309 | class Fence : public DerivedObject<XGL_FENCE, Object> { |
| 310 | public: |
| 311 | // xglCreateFence() |
| 312 | void init(const Device &dev, const XGL_FENCE_CREATE_INFO &info); |
| 313 | |
| 314 | // xglGetFenceStatus() |
| 315 | XGL_RESULT status() const { return xglGetFenceStatus(obj()); } |
| 316 | |
| 317 | static XGL_FENCE_CREATE_INFO create_info(XGL_FLAGS flags); |
| 318 | }; |
| 319 | |
| 320 | class QueueSemaphore : public DerivedObject<XGL_QUEUE_SEMAPHORE, Object> { |
| 321 | public: |
| 322 | // xglCreateQueueSemaphore() |
| 323 | void init(const Device &dev, const XGL_QUEUE_SEMAPHORE_CREATE_INFO &info); |
| 324 | // xglOpenSharedQueueSemaphore() |
| 325 | void init(const Device &dev, const XGL_QUEUE_SEMAPHORE_OPEN_INFO &info); |
| 326 | |
| 327 | static XGL_QUEUE_SEMAPHORE_CREATE_INFO create_info(uint32_t init_count, XGL_FLAGS flags); |
| 328 | }; |
| 329 | |
| 330 | class Event : public DerivedObject<XGL_EVENT, Object> { |
| 331 | public: |
| 332 | // xglCreateEvent() |
| 333 | void init(const Device &dev, const XGL_EVENT_CREATE_INFO &info); |
| 334 | |
| 335 | // xglGetEventStatus() |
| 336 | // xglSetEvent() |
| 337 | // xglResetEvent() |
| 338 | XGL_RESULT status() const { return xglGetEventStatus(obj()); } |
| 339 | void set(); |
| 340 | void reset(); |
| 341 | |
| 342 | static XGL_EVENT_CREATE_INFO create_info(XGL_FLAGS flags); |
| 343 | }; |
| 344 | |
| 345 | class QueryPool : public DerivedObject<XGL_QUERY_POOL, Object> { |
| 346 | public: |
| 347 | // xglCreateQueryPool() |
| 348 | void init(const Device &dev, const XGL_QUERY_POOL_CREATE_INFO &info); |
| 349 | |
| 350 | // xglGetQueryPoolResults() |
| 351 | XGL_RESULT results(uint32_t start, uint32_t count, size_t size, void *data); |
| 352 | |
| 353 | static XGL_QUERY_POOL_CREATE_INFO create_info(XGL_QUERY_TYPE type, uint32_t slot_count); |
| 354 | }; |
| 355 | |
| 356 | class Image : public DerivedObject<XGL_IMAGE, Object> { |
| 357 | public: |
| 358 | explicit Image() : format_features_(0) {} |
| 359 | explicit Image(const Device &dev, const XGL_IMAGE_CREATE_INFO &info) : format_features_(0) { init(dev, info); } |
| 360 | |
| 361 | // xglCreateImage() |
| 362 | void init(const Device &dev, const XGL_IMAGE_CREATE_INFO &info); |
| 363 | void init_no_mem(const Device &dev, const XGL_IMAGE_CREATE_INFO &info); |
| 364 | // xglOpenPeerImage() |
| 365 | void init(const Device &dev, const XGL_PEER_IMAGE_OPEN_INFO &info, const XGL_IMAGE_CREATE_INFO &original_info); |
| 366 | |
| 367 | // xglGetImageSubresourceInfo() |
| 368 | XGL_SUBRESOURCE_LAYOUT subresource_layout(const XGL_IMAGE_SUBRESOURCE &subres) const; |
| 369 | |
| 370 | bool transparent() const; |
| 371 | bool copyable() const { return (format_features_ & XGL_FORMAT_IMAGE_COPY_BIT); } |
| 372 | |
| 373 | XGL_IMAGE_SUBRESOURCE_RANGE subresource_range(XGL_IMAGE_ASPECT aspect) const { return subresource_range(create_info_, aspect); } |
| 374 | XGL_EXTENT3D extent() const { return create_info_.extent; } |
| 375 | XGL_EXTENT3D extent(uint32_t mip_level) const { return extent(create_info_.extent, mip_level); } |
| 376 | |
| 377 | XGL_IMAGE_STATE_TRANSITION state_transition(XGL_IMAGE_STATE old_state, XGL_IMAGE_STATE new_state, |
| 378 | const XGL_IMAGE_SUBRESOURCE_RANGE &range) const |
| 379 | { |
| 380 | XGL_IMAGE_STATE_TRANSITION transition = {}; |
| 381 | transition.image = obj(); |
| 382 | transition.oldState = old_state; |
| 383 | transition.newState = new_state; |
| 384 | transition.subresourceRange = range; |
| 385 | return transition; |
| 386 | } |
| 387 | |
| 388 | static XGL_IMAGE_CREATE_INFO create_info(); |
| 389 | static XGL_IMAGE_SUBRESOURCE subresource(XGL_IMAGE_ASPECT aspect, uint32_t mip_level, uint32_t array_slice); |
| 390 | static XGL_IMAGE_SUBRESOURCE subresource(const XGL_IMAGE_SUBRESOURCE_RANGE &range, uint32_t mip_level, uint32_t array_slice); |
| 391 | static XGL_IMAGE_SUBRESOURCE_RANGE subresource_range(XGL_IMAGE_ASPECT aspect, uint32_t base_mip_level, uint32_t mip_levels, |
| 392 | uint32_t base_array_slice, uint32_t array_size); |
| 393 | static XGL_IMAGE_SUBRESOURCE_RANGE subresource_range(const XGL_IMAGE_CREATE_INFO &info, XGL_IMAGE_ASPECT aspect); |
| 394 | static XGL_IMAGE_SUBRESOURCE_RANGE subresource_range(const XGL_IMAGE_SUBRESOURCE &subres); |
| 395 | |
| 396 | static XGL_EXTENT2D extent(int32_t width, int32_t height); |
| 397 | static XGL_EXTENT2D extent(const XGL_EXTENT2D &extent, uint32_t mip_level); |
| 398 | static XGL_EXTENT2D extent(const XGL_EXTENT3D &extent); |
| 399 | |
| 400 | static XGL_EXTENT3D extent(int32_t width, int32_t height, int32_t depth); |
| 401 | static XGL_EXTENT3D extent(const XGL_EXTENT3D &extent, uint32_t mip_level); |
| 402 | |
| 403 | private: |
| 404 | void init_info(const Device &dev, const XGL_IMAGE_CREATE_INFO &info); |
| 405 | |
| 406 | XGL_IMAGE_CREATE_INFO create_info_; |
| 407 | XGL_FLAGS format_features_; |
| 408 | }; |
| 409 | |
| 410 | class ImageView : public DerivedObject<XGL_IMAGE_VIEW, Object> { |
| 411 | public: |
| 412 | // xglCreateImageView() |
| 413 | void init(const Device &dev, const XGL_IMAGE_VIEW_CREATE_INFO &info); |
| 414 | }; |
| 415 | |
| 416 | class ColorAttachmentView : public DerivedObject<XGL_COLOR_ATTACHMENT_VIEW, Object> { |
| 417 | public: |
| 418 | // xglCreateColorAttachmentView() |
| 419 | void init(const Device &dev, const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO &info); |
| 420 | }; |
| 421 | |
| 422 | class DepthStencilView : public DerivedObject<XGL_DEPTH_STENCIL_VIEW, Object> { |
| 423 | public: |
| 424 | // xglCreateDepthStencilView() |
| 425 | void init(const Device &dev, const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO &info); |
| 426 | }; |
| 427 | |
| 428 | class Shader : public DerivedObject<XGL_SHADER, Object> { |
| 429 | public: |
| 430 | // xglCreateShader() |
| 431 | void init(const Device &dev, const XGL_SHADER_CREATE_INFO &info); |
| 432 | XGL_RESULT init_try(const Device &dev, const XGL_SHADER_CREATE_INFO &info); |
| 433 | |
| 434 | static XGL_SHADER_CREATE_INFO create_info(size_t code_size, const void *code, XGL_FLAGS flags); |
| 435 | }; |
| 436 | |
| 437 | class Pipeline : public DerivedObject<XGL_PIPELINE, Object> { |
| 438 | public: |
| 439 | // xglCreateGraphicsPipeline() |
| 440 | void init(const Device &dev, const XGL_GRAPHICS_PIPELINE_CREATE_INFO &info); |
| 441 | // xglCreateComputePipeline() |
| 442 | void init(const Device &dev, const XGL_COMPUTE_PIPELINE_CREATE_INFO &info); |
| 443 | // xglLoadPipeline() |
| 444 | void init(const Device&dev, size_t size, const void *data); |
| 445 | |
| 446 | // xglStorePipeline() |
| 447 | size_t store(size_t size, void *data); |
| 448 | }; |
| 449 | |
| 450 | class PipelineDelta : public DerivedObject<XGL_PIPELINE_DELTA, Object> { |
| 451 | public: |
| 452 | // xglCreatePipelineDelta() |
| 453 | void init(const Device &dev, const Pipeline &p1, const Pipeline &p2); |
| 454 | }; |
| 455 | |
| 456 | class Sampler : public DerivedObject<XGL_SAMPLER, Object> { |
| 457 | public: |
| 458 | // xglCreateSampler() |
| 459 | void init(const Device &dev, const XGL_SAMPLER_CREATE_INFO &info); |
| 460 | }; |
| 461 | |
| 462 | class DescriptorSet : public DerivedObject<XGL_DESCRIPTOR_SET, Object> { |
| 463 | public: |
| 464 | // xglCreateDescriptorSet() |
| 465 | void init(const Device &dev, const XGL_DESCRIPTOR_SET_CREATE_INFO &info); |
| 466 | |
| 467 | // xglBeginDescriptorSetUpdate() |
| 468 | // xglEndDescriptorSetUpdate() |
| 469 | void begin() { xglBeginDescriptorSetUpdate(obj()); } |
| 470 | void end() { xglEndDescriptorSetUpdate(obj()); } |
| 471 | |
| 472 | // xglAttachSamplerDescriptors() |
| 473 | void attach(uint32_t start_slot, const std::vector<const Sampler *> &samplers); |
| 474 | void attach(uint32_t start_slot, const Sampler &sampler) |
| 475 | { |
| 476 | attach(start_slot, std::vector<const Sampler *>(1, &sampler)); |
| 477 | } |
| 478 | |
| 479 | // xglAttachImageViewDescriptors() |
| 480 | void attach(uint32_t start_slot, const std::vector<XGL_IMAGE_VIEW_ATTACH_INFO> &img_views); |
| 481 | void attach(uint32_t start_slot, const XGL_IMAGE_VIEW_ATTACH_INFO &view) |
| 482 | { |
| 483 | attach(start_slot, std::vector<XGL_IMAGE_VIEW_ATTACH_INFO>(1, view)); |
| 484 | } |
| 485 | |
| 486 | // xglAttachMemoryViewDescriptors() |
| 487 | void attach(uint32_t start_slot, const std::vector<XGL_MEMORY_VIEW_ATTACH_INFO> &mem_views); |
| 488 | void attach(uint32_t start_slot, const XGL_MEMORY_VIEW_ATTACH_INFO &view) |
| 489 | { |
| 490 | attach(start_slot, std::vector<XGL_MEMORY_VIEW_ATTACH_INFO>(1, view)); |
| 491 | } |
| 492 | |
| 493 | // xglAttachNestedDescriptors() |
| 494 | void attach(uint32_t start_slot, const std::vector<XGL_DESCRIPTOR_SET_ATTACH_INFO> &sets); |
| 495 | void attach(uint32_t start_slot, const XGL_DESCRIPTOR_SET_ATTACH_INFO &set) |
| 496 | { |
| 497 | attach(start_slot, std::vector<XGL_DESCRIPTOR_SET_ATTACH_INFO>(1, set)); |
| 498 | } |
| 499 | |
| 500 | // xglClearDescriptorSetSlots() |
| 501 | void clear(uint32_t start_slot, uint32_t count) { xglClearDescriptorSetSlots(obj(), start_slot, count); } |
| 502 | void clear() { clear(0, info_.slots); } |
| 503 | |
| 504 | static XGL_DESCRIPTOR_SET_CREATE_INFO create_info(uint32_t slot_count) |
| 505 | { |
| 506 | XGL_DESCRIPTOR_SET_CREATE_INFO info = {}; |
| 507 | info.sType = XGL_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO; |
| 508 | info.slots = slot_count; |
| 509 | return info; |
| 510 | } |
| 511 | |
| 512 | private: |
| 513 | XGL_DESCRIPTOR_SET_CREATE_INFO info_; |
| 514 | }; |
| 515 | |
| 516 | class DynamicVpStateObject : public DerivedObject<XGL_VIEWPORT_STATE_OBJECT, DynamicStateObject> { |
| 517 | public: |
| 518 | // xglCreateViewportState() |
| 519 | void init(const Device &dev, const XGL_VIEWPORT_STATE_CREATE_INFO &info); |
| 520 | }; |
| 521 | |
| 522 | class DynamicRsStateObject : public DerivedObject<XGL_RASTER_STATE_OBJECT, DynamicStateObject> { |
| 523 | public: |
| 524 | // xglCreateRasterState() |
| 525 | void init(const Device &dev, const XGL_RASTER_STATE_CREATE_INFO &info); |
| 526 | }; |
| 527 | |
| 528 | class DynamicMsaaStateObject : public DerivedObject<XGL_MSAA_STATE_OBJECT, DynamicStateObject> { |
| 529 | public: |
| 530 | // xglCreateMsaaState() |
| 531 | void init(const Device &dev, const XGL_MSAA_STATE_CREATE_INFO &info); |
| 532 | }; |
| 533 | |
| 534 | class DynamicCbStateObject : public DerivedObject<XGL_COLOR_BLEND_STATE_OBJECT, DynamicStateObject> { |
| 535 | public: |
| 536 | // xglCreateColorBlendState() |
| 537 | void init(const Device &dev, const XGL_COLOR_BLEND_STATE_CREATE_INFO &info); |
| 538 | }; |
| 539 | |
| 540 | class DynamicDsStateObject : public DerivedObject<XGL_DEPTH_STENCIL_STATE_OBJECT, DynamicStateObject> { |
| 541 | public: |
| 542 | // xglCreateDepthStencilState() |
| 543 | void init(const Device &dev, const XGL_DEPTH_STENCIL_STATE_CREATE_INFO &info); |
| 544 | }; |
| 545 | |
| 546 | class CmdBuffer : public DerivedObject<XGL_CMD_BUFFER, Object> { |
| 547 | public: |
| 548 | explicit CmdBuffer() {} |
| 549 | explicit CmdBuffer(const Device &dev, const XGL_CMD_BUFFER_CREATE_INFO &info) { init(dev, info); } |
| 550 | |
| 551 | // xglCreateCommandBuffer() |
| 552 | void init(const Device &dev, const XGL_CMD_BUFFER_CREATE_INFO &info); |
| 553 | |
| 554 | // xglBeginCommandBuffer() |
| 555 | void begin(XGL_FLAGS flags); |
| 556 | void begin(); |
| 557 | |
| 558 | // xglEndCommandBuffer() |
| 559 | // xglResetCommandBuffer() |
| 560 | void end(); |
| 561 | void reset(); |
| 562 | |
| 563 | static XGL_CMD_BUFFER_CREATE_INFO create_info(XGL_QUEUE_TYPE type); |
| 564 | }; |
| 565 | |
| 566 | inline const void *Object::map(XGL_FLAGS flags) const |
| 567 | { |
| 568 | return (primary_mem_) ? primary_mem_->map(flags) : NULL; |
| 569 | } |
| 570 | |
| 571 | inline void *Object::map(XGL_FLAGS flags) |
| 572 | { |
| 573 | return (primary_mem_) ? primary_mem_->map(flags) : NULL; |
| 574 | } |
| 575 | |
| 576 | inline void Object::unmap() const |
| 577 | { |
| 578 | if (primary_mem_) |
| 579 | primary_mem_->unmap(); |
| 580 | } |
| 581 | |
| 582 | inline XGL_MEMORY_ALLOC_INFO GpuMemory::alloc_info(const XGL_MEMORY_REQUIREMENTS &reqs) |
| 583 | { |
| 584 | XGL_MEMORY_ALLOC_INFO info = {}; |
| 585 | info.sType = XGL_STRUCTURE_TYPE_MEMORY_ALLOC_INFO; |
| 586 | info.allocationSize = reqs.size; |
| 587 | info.alignment = reqs.alignment; |
| 588 | info.heapCount = reqs.heapCount; |
| 589 | for (int i = 0; i < reqs.heapCount; i++) |
| 590 | info.heaps[i] = reqs.heaps[i]; |
| 591 | info.memPriority = XGL_MEMORY_PRIORITY_NORMAL; |
| 592 | return info; |
| 593 | } |
| 594 | |
| 595 | inline XGL_FENCE_CREATE_INFO Fence::create_info(XGL_FLAGS flags) |
| 596 | { |
| 597 | XGL_FENCE_CREATE_INFO info = {}; |
| 598 | info.sType = XGL_STRUCTURE_TYPE_FENCE_CREATE_INFO; |
| 599 | info.flags = flags; |
| 600 | return info; |
| 601 | } |
| 602 | |
| 603 | inline XGL_QUEUE_SEMAPHORE_CREATE_INFO QueueSemaphore::create_info(uint32_t init_count, XGL_FLAGS flags) |
| 604 | { |
| 605 | XGL_QUEUE_SEMAPHORE_CREATE_INFO info = {}; |
| 606 | info.sType = XGL_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; |
| 607 | info.initialCount = init_count; |
| 608 | info.flags = flags; |
| 609 | return info; |
| 610 | } |
| 611 | |
| 612 | inline XGL_EVENT_CREATE_INFO Event::create_info(XGL_FLAGS flags) |
| 613 | { |
| 614 | XGL_EVENT_CREATE_INFO info = {}; |
| 615 | info.sType = XGL_STRUCTURE_TYPE_EVENT_CREATE_INFO; |
| 616 | info.flags = flags; |
| 617 | return info; |
| 618 | } |
| 619 | |
| 620 | inline XGL_QUERY_POOL_CREATE_INFO QueryPool::create_info(XGL_QUERY_TYPE type, uint32_t slot_count) |
| 621 | { |
| 622 | XGL_QUERY_POOL_CREATE_INFO info = {}; |
| 623 | info.sType = XGL_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; |
| 624 | info.queryType = type; |
| 625 | info.slots = slot_count; |
| 626 | return info; |
| 627 | } |
| 628 | |
| 629 | inline XGL_IMAGE_CREATE_INFO Image::create_info() |
| 630 | { |
| 631 | XGL_IMAGE_CREATE_INFO info = {}; |
| 632 | info.sType = XGL_STRUCTURE_TYPE_IMAGE_CREATE_INFO; |
| 633 | info.extent.width = 1; |
| 634 | info.extent.height = 1; |
| 635 | info.extent.depth = 1; |
| 636 | info.mipLevels = 1; |
| 637 | info.arraySize = 1; |
| 638 | info.samples = 1; |
| 639 | return info; |
| 640 | } |
| 641 | |
| 642 | inline XGL_IMAGE_SUBRESOURCE Image::subresource(XGL_IMAGE_ASPECT aspect, uint32_t mip_level, uint32_t array_slice) |
| 643 | { |
| 644 | XGL_IMAGE_SUBRESOURCE subres = {}; |
| 645 | subres.aspect = aspect; |
| 646 | subres.mipLevel = mip_level; |
| 647 | subres.arraySlice = array_slice; |
| 648 | return subres; |
| 649 | } |
| 650 | |
| 651 | inline XGL_IMAGE_SUBRESOURCE Image::subresource(const XGL_IMAGE_SUBRESOURCE_RANGE &range, uint32_t mip_level, uint32_t array_slice) |
| 652 | { |
| 653 | return subresource(range.aspect, range.baseMipLevel + mip_level, range.baseArraySlice + array_slice); |
| 654 | } |
| 655 | |
| 656 | inline XGL_IMAGE_SUBRESOURCE_RANGE Image::subresource_range(XGL_IMAGE_ASPECT aspect, uint32_t base_mip_level, uint32_t mip_levels, |
| 657 | uint32_t base_array_slice, uint32_t array_size) |
| 658 | { |
| 659 | XGL_IMAGE_SUBRESOURCE_RANGE range = {}; |
| 660 | range.aspect = aspect; |
| 661 | range.baseMipLevel = base_mip_level; |
| 662 | range.mipLevels = mip_levels; |
| 663 | range.baseArraySlice = base_array_slice; |
| 664 | range.arraySize = array_size; |
| 665 | return range; |
| 666 | } |
| 667 | |
| 668 | inline XGL_IMAGE_SUBRESOURCE_RANGE Image::subresource_range(const XGL_IMAGE_CREATE_INFO &info, XGL_IMAGE_ASPECT aspect) |
| 669 | { |
| 670 | return subresource_range(aspect, 0, info.mipLevels, 0, info.arraySize); |
| 671 | } |
| 672 | |
| 673 | inline XGL_IMAGE_SUBRESOURCE_RANGE Image::subresource_range(const XGL_IMAGE_SUBRESOURCE &subres) |
| 674 | { |
| 675 | return subresource_range(subres.aspect, subres.mipLevel, 1, subres.arraySlice, 1); |
| 676 | } |
| 677 | |
| 678 | inline XGL_EXTENT2D Image::extent(int32_t width, int32_t height) |
| 679 | { |
| 680 | XGL_EXTENT2D extent = {}; |
| 681 | extent.width = width; |
| 682 | extent.height = height; |
| 683 | return extent; |
| 684 | } |
| 685 | |
| 686 | inline XGL_EXTENT2D Image::extent(const XGL_EXTENT2D &extent, uint32_t mip_level) |
| 687 | { |
| 688 | const int32_t width = (extent.width >> mip_level) ? extent.width >> mip_level : 1; |
| 689 | const int32_t height = (extent.height >> mip_level) ? extent.height >> mip_level : 1; |
| 690 | return Image::extent(width, height); |
| 691 | } |
| 692 | |
| 693 | inline XGL_EXTENT2D Image::extent(const XGL_EXTENT3D &extent) |
| 694 | { |
| 695 | return Image::extent(extent.width, extent.height); |
| 696 | } |
| 697 | |
| 698 | inline XGL_EXTENT3D Image::extent(int32_t width, int32_t height, int32_t depth) |
| 699 | { |
| 700 | XGL_EXTENT3D extent = {}; |
| 701 | extent.width = width; |
| 702 | extent.height = height; |
| 703 | extent.depth = depth; |
| 704 | return extent; |
| 705 | } |
| 706 | |
| 707 | inline XGL_EXTENT3D Image::extent(const XGL_EXTENT3D &extent, uint32_t mip_level) |
| 708 | { |
| 709 | const int32_t width = (extent.width >> mip_level) ? extent.width >> mip_level : 1; |
| 710 | const int32_t height = (extent.height >> mip_level) ? extent.height >> mip_level : 1; |
| 711 | const int32_t depth = (extent.depth >> mip_level) ? extent.depth >> mip_level : 1; |
| 712 | return Image::extent(width, height, depth); |
| 713 | } |
| 714 | |
| 715 | inline XGL_SHADER_CREATE_INFO Shader::create_info(size_t code_size, const void *code, XGL_FLAGS flags) |
| 716 | { |
| 717 | XGL_SHADER_CREATE_INFO info = {}; |
| 718 | info.sType = XGL_STRUCTURE_TYPE_SHADER_CREATE_INFO; |
| 719 | info.codeSize = code_size; |
| 720 | info.pCode = code; |
| 721 | info.flags = flags; |
| 722 | return info; |
| 723 | } |
| 724 | |
| 725 | inline XGL_CMD_BUFFER_CREATE_INFO CmdBuffer::create_info(XGL_QUEUE_TYPE type) |
| 726 | { |
| 727 | XGL_CMD_BUFFER_CREATE_INFO info = {}; |
| 728 | info.sType = XGL_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO; |
| 729 | info.queueType = type; |
| 730 | return info; |
| 731 | } |
| 732 | |
| 733 | }; // namespace xgl_testing |
| 734 | |
| 735 | #endif // XGLTESTBINDING_H |