Oscar Mateo | b20385f | 2014-07-24 17:04:10 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 21 | * DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
| 24 | #ifndef _INTEL_LRC_H_ |
| 25 | #define _INTEL_LRC_H_ |
| 26 | |
Oscar Mateo | dcb4c12 | 2014-11-13 10:28:10 +0000 | [diff] [blame] | 27 | #define GEN8_LR_CONTEXT_ALIGN 4096 |
| 28 | |
Oscar Mateo | 4ba70e4 | 2014-08-07 13:23:20 +0100 | [diff] [blame] | 29 | /* Execlists regs */ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 30 | #define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230) |
| 31 | #define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234) |
| 32 | #define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4) |
| 33 | #define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244) |
Zhi Wang | 5baa22c5 | 2015-02-10 17:11:36 +0800 | [diff] [blame] | 34 | #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) |
| 35 | #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) |
Abdiel Janulgue | 6922528 | 2015-06-16 13:39:42 +0300 | [diff] [blame] | 36 | #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) |
Tvrtko Ursulin | 3756685 | 2016-04-12 14:37:31 +0100 | [diff] [blame] | 37 | #define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370) |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 38 | #define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8) |
| 39 | #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) |
| 40 | #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) |
Oscar Mateo | 4ba70e4 | 2014-08-07 13:23:20 +0100 | [diff] [blame] | 41 | |
Ben Widawsky | 5590a5f | 2016-01-05 10:30:05 -0800 | [diff] [blame] | 42 | /* The docs specify that the write pointer wraps around after 5h, "After status |
| 43 | * is written out to the last available status QW at offset 5h, this pointer |
| 44 | * wraps to 0." |
| 45 | * |
| 46 | * Therefore, one must infer than even though there are 3 bits available, 6 and |
| 47 | * 7 appear to be * reserved. |
| 48 | */ |
| 49 | #define GEN8_CSB_ENTRIES 6 |
| 50 | #define GEN8_CSB_PTR_MASK 0x7 |
| 51 | #define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8) |
| 52 | #define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0) |
| 53 | #define GEN8_CSB_WRITE_PTR(csb_status) \ |
| 54 | (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0) |
| 55 | #define GEN8_CSB_READ_PTR(csb_status) \ |
| 56 | (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) |
| 57 | |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 58 | /* Logical Rings */ |
John Harrison | 40e895c | 2015-05-29 17:43:26 +0100 | [diff] [blame] | 59 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
John Harrison | ccd98fe | 2015-05-29 17:44:09 +0100 | [diff] [blame] | 60 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 61 | void intel_logical_ring_stop(struct intel_engine_cs *engine); |
| 62 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 63 | int intel_logical_rings_init(struct drm_device *dev); |
Peter Antoine | 3bbaba0 | 2015-07-10 20:13:11 +0300 | [diff] [blame] | 64 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 65 | |
John Harrison | 4866d72 | 2015-05-29 17:43:55 +0100 | [diff] [blame] | 66 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); |
Oscar Mateo | 73e4d07 | 2014-07-24 17:04:48 +0100 | [diff] [blame] | 67 | /** |
| 68 | * intel_logical_ring_advance() - advance the ringbuffer tail |
| 69 | * @ringbuf: Ringbuffer to advance. |
| 70 | * |
| 71 | * The tail is only updated in our logical ringbuffer struct. |
| 72 | */ |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 73 | static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf) |
| 74 | { |
| 75 | ringbuf->tail &= ringbuf->size - 1; |
| 76 | } |
Oscar Mateo | 73e4d07 | 2014-07-24 17:04:48 +0100 | [diff] [blame] | 77 | /** |
| 78 | * intel_logical_ring_emit() - write a DWORD to the ringbuffer. |
| 79 | * @ringbuf: Ringbuffer to write to. |
| 80 | * @data: DWORD to write. |
| 81 | */ |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 82 | static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, |
| 83 | u32 data) |
| 84 | { |
| 85 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); |
| 86 | ringbuf->tail += 4; |
| 87 | } |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 88 | static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 89 | i915_reg_t reg) |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 90 | { |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 91 | intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg)); |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 92 | } |
Oscar Mateo | 82e104c | 2014-07-24 17:04:26 +0100 | [diff] [blame] | 93 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 94 | /* Logical Ring Contexts */ |
Alex Dai | d167519 | 2015-08-12 15:43:43 +0100 | [diff] [blame] | 95 | |
| 96 | /* One extra page is added before LRC for GuC as shared data */ |
| 97 | #define LRC_GUCSHR_PN (0) |
| 98 | #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) |
| 99 | #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) |
| 100 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 101 | void intel_lr_context_free(struct intel_context *ctx); |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 102 | uint32_t intel_lr_context_size(struct intel_engine_cs *engine); |
Nick Hoath | e84fe80 | 2015-09-11 12:53:46 +0100 | [diff] [blame] | 103 | int intel_lr_context_deferred_alloc(struct intel_context *ctx, |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 104 | struct intel_engine_cs *engine); |
Tvrtko Ursulin | e5292823 | 2016-01-28 10:29:54 +0000 | [diff] [blame] | 105 | void intel_lr_context_unpin(struct intel_context *ctx, |
| 106 | struct intel_engine_cs *engine); |
Tvrtko Ursulin | 7d774ca | 2016-04-12 15:40:42 +0100 | [diff] [blame^] | 107 | |
| 108 | struct drm_i915_private; |
| 109 | |
| 110 | void intel_lr_context_reset(struct drm_i915_private *dev_priv, |
| 111 | struct intel_context *ctx); |
Dave Gordon | 919f1f5 | 2015-08-12 15:43:38 +0100 | [diff] [blame] | 112 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 113 | struct intel_engine_cs *engine); |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 114 | |
Tvrtko Ursulin | ca82580 | 2016-01-15 15:10:27 +0000 | [diff] [blame] | 115 | u32 intel_execlists_ctx_id(struct intel_context *ctx, |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 116 | struct intel_engine_cs *engine); |
Tvrtko Ursulin | ca82580 | 2016-01-15 15:10:27 +0000 | [diff] [blame] | 117 | |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 118 | /* Execlists */ |
| 119 | int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); |
John Harrison | 5f19e2b | 2015-05-29 17:43:27 +0100 | [diff] [blame] | 120 | struct i915_execbuffer_params; |
| 121 | int intel_execlists_submission(struct i915_execbuffer_params *params, |
Oscar Mateo | 454afeb | 2014-07-24 17:04:22 +0100 | [diff] [blame] | 122 | struct drm_i915_gem_execbuffer2 *args, |
John Harrison | 5f19e2b | 2015-05-29 17:43:27 +0100 | [diff] [blame] | 123 | struct list_head *vmas); |
Oscar Mateo | 127f100 | 2014-07-24 17:04:11 +0100 | [diff] [blame] | 124 | |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 125 | void intel_execlists_retire_requests(struct intel_engine_cs *engine); |
Thomas Daniel | e981e7b | 2014-07-24 17:04:39 +0100 | [diff] [blame] | 126 | |
Oscar Mateo | b20385f | 2014-07-24 17:04:10 +0100 | [diff] [blame] | 127 | #endif /* _INTEL_LRC_H_ */ |