Oscar Mateo | b20385f | 2014-07-24 17:04:10 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 21 | * DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
| 24 | #ifndef _INTEL_LRC_H_ |
| 25 | #define _INTEL_LRC_H_ |
| 26 | |
Oscar Mateo | dcb4c12 | 2014-11-13 10:28:10 +0000 | [diff] [blame] | 27 | #define GEN8_LR_CONTEXT_ALIGN 4096 |
| 28 | |
Oscar Mateo | 4ba70e4 | 2014-08-07 13:23:20 +0100 | [diff] [blame] | 29 | /* Execlists regs */ |
| 30 | #define RING_ELSP(ring) ((ring)->mmio_base+0x230) |
| 31 | #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) |
| 32 | #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) |
| 33 | #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) |
| 34 | #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) |
| 35 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 36 | /* Logical Rings */ |
| 37 | void intel_logical_ring_stop(struct intel_engine_cs *ring); |
| 38 | void intel_logical_ring_cleanup(struct intel_engine_cs *ring); |
| 39 | int intel_logical_rings_init(struct drm_device *dev); |
| 40 | |
Oscar Mateo | 48e29f5 | 2014-07-24 17:04:29 +0100 | [diff] [blame] | 41 | int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf); |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 42 | void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf); |
Oscar Mateo | 73e4d07 | 2014-07-24 17:04:48 +0100 | [diff] [blame] | 43 | /** |
| 44 | * intel_logical_ring_advance() - advance the ringbuffer tail |
| 45 | * @ringbuf: Ringbuffer to advance. |
| 46 | * |
| 47 | * The tail is only updated in our logical ringbuffer struct. |
| 48 | */ |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 49 | static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf) |
| 50 | { |
| 51 | ringbuf->tail &= ringbuf->size - 1; |
| 52 | } |
Oscar Mateo | 73e4d07 | 2014-07-24 17:04:48 +0100 | [diff] [blame] | 53 | /** |
| 54 | * intel_logical_ring_emit() - write a DWORD to the ringbuffer. |
| 55 | * @ringbuf: Ringbuffer to write to. |
| 56 | * @data: DWORD to write. |
| 57 | */ |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 58 | static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, |
| 59 | u32 data) |
| 60 | { |
| 61 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); |
| 62 | ringbuf->tail += 4; |
| 63 | } |
| 64 | int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords); |
| 65 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 66 | /* Logical Ring Contexts */ |
Oscar Mateo | 564ddb2 | 2014-08-21 11:40:54 +0100 | [diff] [blame] | 67 | int intel_lr_context_render_state_init(struct intel_engine_cs *ring, |
| 68 | struct intel_context *ctx); |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 69 | void intel_lr_context_free(struct intel_context *ctx); |
| 70 | int intel_lr_context_deferred_create(struct intel_context *ctx, |
| 71 | struct intel_engine_cs *ring); |
Oscar Mateo | dcb4c12 | 2014-11-13 10:28:10 +0000 | [diff] [blame] | 72 | void intel_lr_context_unpin(struct intel_engine_cs *ring, |
| 73 | struct intel_context *ctx); |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 74 | |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 75 | /* Execlists */ |
| 76 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 77 | int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, |
| 78 | struct intel_engine_cs *ring, |
| 79 | struct intel_context *ctx, |
| 80 | struct drm_i915_gem_execbuffer2 *args, |
| 81 | struct list_head *vmas, |
| 82 | struct drm_i915_gem_object *batch_obj, |
| 83 | u64 exec_start, u32 flags); |
Ben Widawsky | 84b790f | 2014-07-24 17:04:36 +0100 | [diff] [blame] | 84 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 85 | |
Oscar Mateo | 73e4d07 | 2014-07-24 17:04:48 +0100 | [diff] [blame] | 86 | /** |
| 87 | * struct intel_ctx_submit_request - queued context submission request |
| 88 | * @ctx: Context to submit to the ELSP. |
| 89 | * @ring: Engine to submit it to. |
| 90 | * @tail: how far in the context's ringbuffer this request goes to. |
| 91 | * @execlist_link: link in the submission queue. |
| 92 | * @work: workqueue for processing this request in a bottom half. |
| 93 | * @elsp_submitted: no. of times this request has been sent to the ELSP. |
| 94 | * |
| 95 | * The ELSP only accepts two elements at a time, so we queue context/tail |
| 96 | * pairs on a given queue (ring->execlist_queue) until the hardware is |
| 97 | * available. The queue serves a double purpose: we also use it to keep track |
| 98 | * of the up to 2 contexts currently in the hardware (usually one in execution |
| 99 | * and the other queued up by the GPU): We only remove elements from the head |
| 100 | * of the queue when the hardware informs us that an element has been |
| 101 | * completed. |
| 102 | * |
| 103 | * All accesses to the queue are mediated by a spinlock (ring->execlist_lock). |
| 104 | */ |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 105 | struct intel_ctx_submit_request { |
| 106 | struct intel_context *ctx; |
| 107 | struct intel_engine_cs *ring; |
| 108 | u32 tail; |
| 109 | |
| 110 | struct list_head execlist_link; |
Oscar Mateo | e1fee72 | 2014-07-24 17:04:40 +0100 | [diff] [blame] | 111 | |
| 112 | int elsp_submitted; |
Michel Thierry | acdd884 | 2014-07-24 17:04:38 +0100 | [diff] [blame] | 113 | }; |
| 114 | |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 115 | void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); |
Thomas Daniel | c86ee3a9 | 2014-11-13 10:27:05 +0000 | [diff] [blame] | 116 | void intel_execlists_retire_requests(struct intel_engine_cs *ring); |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 117 | |
Oscar Mateo | b20385f | 2014-07-24 17:04:10 +0100 | [diff] [blame] | 118 | #endif /* _INTEL_LRC_H_ */ |