Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
| 2 | #define _INTEL_RINGBUFFER_H_ |
| 3 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 4 | #include <linux/hashtable.h> |
| 5 | |
| 6 | #define I915_CMD_HASH_ORDER 9 |
| 7 | |
Ville Syrjälä | 633cf8f | 2012-12-03 18:43:32 +0200 | [diff] [blame] | 8 | /* |
| 9 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
| 10 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
| 11 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
| 12 | * |
| 13 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
| 14 | * cacheline, the Head Pointer must not be greater than the Tail |
| 15 | * Pointer." |
| 16 | */ |
| 17 | #define I915_RING_FREE_SPACE 64 |
| 18 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 19 | struct intel_hw_status_page { |
Daniel Vetter | 4225d0f | 2012-04-26 23:28:16 +0200 | [diff] [blame] | 20 | u32 *page_addr; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 21 | unsigned int gfx_addr; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 22 | struct drm_i915_gem_object *obj; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 23 | }; |
| 24 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 25 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
| 26 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 27 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 28 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
| 29 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 30 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 31 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
| 32 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 33 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 34 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
| 35 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 36 | |
Ben Widawsky | b7287d8 | 2011-04-25 11:22:22 -0700 | [diff] [blame] | 37 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
| 38 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
Daniel Vetter | 870e86d | 2010-08-02 16:29:44 +0200 | [diff] [blame] | 39 | |
Naresh Kumar Kachhi | e9fea57 | 2014-03-12 16:39:41 +0530 | [diff] [blame] | 40 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
Chris Wilson | 9991ae7 | 2014-04-02 16:36:07 +0100 | [diff] [blame] | 41 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) |
Naresh Kumar Kachhi | e9fea57 | 2014-03-12 16:39:41 +0530 | [diff] [blame] | 42 | |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 43 | enum intel_ring_hangcheck_action { |
Mika Kuoppala | da66146 | 2013-09-06 16:03:28 +0300 | [diff] [blame] | 44 | HANGCHECK_IDLE = 0, |
Jani Nikula | f2f4d82 | 2013-08-11 12:44:01 +0300 | [diff] [blame] | 45 | HANGCHECK_WAIT, |
| 46 | HANGCHECK_ACTIVE, |
| 47 | HANGCHECK_KICK, |
| 48 | HANGCHECK_HUNG, |
| 49 | }; |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 50 | |
Mika Kuoppala | b6b0fac | 2014-01-30 19:04:43 +0200 | [diff] [blame] | 51 | #define HANGCHECK_SCORE_RING_HUNG 31 |
| 52 | |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 53 | struct intel_ring_hangcheck { |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 54 | u64 acthd; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 55 | u32 seqno; |
Mika Kuoppala | 05407ff | 2013-05-30 09:04:29 +0300 | [diff] [blame] | 56 | int score; |
Mika Kuoppala | ad8beae | 2013-06-12 12:35:32 +0300 | [diff] [blame] | 57 | enum intel_ring_hangcheck_action action; |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 58 | bool deadlock; |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 59 | }; |
| 60 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 61 | struct intel_ring_buffer { |
| 62 | const char *name; |
Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 63 | enum intel_ring_id { |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 64 | RCS = 0x0, |
| 65 | VCS, |
| 66 | BCS, |
Ben Widawsky | 4a3dd19 | 2013-05-28 19:22:19 -0700 | [diff] [blame] | 67 | VECS, |
Zhao Yakui | 845f74a | 2014-04-17 10:37:37 +0800 | [diff] [blame] | 68 | VCS2 |
Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 69 | } id; |
Zhao Yakui | 845f74a | 2014-04-17 10:37:37 +0800 | [diff] [blame] | 70 | #define I915_NUM_RINGS 5 |
Zhao Yakui | b1a9330 | 2014-04-17 10:37:36 +0800 | [diff] [blame] | 71 | #define LAST_USER_RING (VECS + 1) |
Daniel Vetter | 333e9fe | 2010-08-02 16:24:01 +0200 | [diff] [blame] | 72 | u32 mmio_base; |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 73 | void __iomem *virtual_start; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 74 | struct drm_device *dev; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 75 | struct drm_i915_gem_object *obj; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 76 | |
Chris Wilson | 8c0a6bf | 2010-12-09 12:56:37 +0000 | [diff] [blame] | 77 | u32 head; |
| 78 | u32 tail; |
Chris Wilson | 780f0ca | 2010-09-23 17:45:39 +0100 | [diff] [blame] | 79 | int space; |
Chris Wilson | c2c347a9 | 2010-10-27 15:11:53 +0100 | [diff] [blame] | 80 | int size; |
Chris Wilson | 55249ba | 2010-12-22 14:04:47 +0000 | [diff] [blame] | 81 | int effective_size; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 82 | struct intel_hw_status_page status_page; |
| 83 | |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 84 | /** We track the position of the requests in the ring buffer, and |
| 85 | * when each is retired we increment last_retired_head as the GPU |
| 86 | * must have finished processing the request and so we know we |
| 87 | * can advance the ringbuffer up to that position. |
| 88 | * |
| 89 | * last_retired_head is set to -1 after the value is consumed so |
| 90 | * we can detect new retirements. |
| 91 | */ |
| 92 | u32 last_retired_head; |
| 93 | |
Daniel Vetter | c7113cc | 2013-07-04 23:35:29 +0200 | [diff] [blame] | 94 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
Daniel Vetter | 6a848cc | 2012-04-11 22:12:46 +0200 | [diff] [blame] | 95 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 96 | u32 trace_irq_seqno; |
Chris Wilson | b13c2b9 | 2010-12-13 16:54:50 +0000 | [diff] [blame] | 97 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 98 | void (*irq_put)(struct intel_ring_buffer *ring); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 99 | |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 100 | int (*init)(struct intel_ring_buffer *ring); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 101 | |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 102 | void (*write_tail)(struct intel_ring_buffer *ring, |
Chris Wilson | 297b0c5 | 2010-10-22 17:02:41 +0100 | [diff] [blame] | 103 | u32 value); |
Chris Wilson | b72f3ac | 2011-01-04 17:34:02 +0000 | [diff] [blame] | 104 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
| 105 | u32 invalidate_domains, |
| 106 | u32 flush_domains); |
Chris Wilson | 9d773091 | 2012-11-27 16:22:52 +0000 | [diff] [blame] | 107 | int (*add_request)(struct intel_ring_buffer *ring); |
Chris Wilson | b2eadbc | 2012-08-09 10:58:30 +0100 | [diff] [blame] | 108 | /* Some chipsets are not quite as coherent as advertised and need |
| 109 | * an expensive kick to force a true read of the up-to-date seqno. |
| 110 | * However, the up-to-date seqno is not always required and the last |
| 111 | * seen value is good enough. Note that the seqno will always be |
| 112 | * monotonic, even if not coherent. |
| 113 | */ |
| 114 | u32 (*get_seqno)(struct intel_ring_buffer *ring, |
| 115 | bool lazy_coherency); |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 116 | void (*set_seqno)(struct intel_ring_buffer *ring, |
| 117 | u32 seqno); |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 118 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
Ben Widawsky | 9bcb144 | 2014-04-28 19:29:25 -0700 | [diff] [blame] | 119 | u64 offset, u32 length, |
Chris Wilson | d7d4eed | 2012-10-17 12:09:54 +0100 | [diff] [blame] | 120 | unsigned flags); |
| 121 | #define I915_DISPATCH_SECURE 0x1 |
Daniel Vetter | b45305f | 2012-12-17 16:21:27 +0100 | [diff] [blame] | 122 | #define I915_DISPATCH_PINNED 0x2 |
Zou Nan hai | 8d19215 | 2010-11-02 16:31:01 +0800 | [diff] [blame] | 123 | void (*cleanup)(struct intel_ring_buffer *ring); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 124 | |
| 125 | struct { |
| 126 | u32 sync_seqno[I915_NUM_RINGS-1]; |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 127 | |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 128 | struct { |
| 129 | /* our mbox written by others */ |
| 130 | u32 wait[I915_NUM_RINGS]; |
| 131 | /* mboxes this ring signals to */ |
| 132 | u32 signal[I915_NUM_RINGS]; |
| 133 | } mbox; |
Ben Widawsky | 78325f2 | 2014-04-29 14:52:29 -0700 | [diff] [blame] | 134 | |
| 135 | /* AKA wait() */ |
| 136 | int (*sync_to)(struct intel_ring_buffer *ring, |
| 137 | struct intel_ring_buffer *to, |
| 138 | u32 seqno); |
Ben Widawsky | 024a43e | 2014-04-29 14:52:30 -0700 | [diff] [blame] | 139 | int (*signal)(struct intel_ring_buffer *signaller, |
| 140 | /* num_dwords needed by caller */ |
| 141 | unsigned int num_dwords); |
Ben Widawsky | ebc348b | 2014-04-29 14:52:28 -0700 | [diff] [blame] | 142 | } semaphore; |
Ben Widawsky | ad776f8 | 2013-05-28 19:22:18 -0700 | [diff] [blame] | 143 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 144 | /** |
| 145 | * List of objects currently involved in rendering from the |
| 146 | * ringbuffer. |
| 147 | * |
| 148 | * Includes buffers having the contents of their GPU caches |
| 149 | * flushed, not necessarily primitives. last_rendering_seqno |
| 150 | * represents when the rendering involved will be completed. |
| 151 | * |
| 152 | * A reference is held on the buffer while on this list. |
| 153 | */ |
| 154 | struct list_head active_list; |
| 155 | |
| 156 | /** |
| 157 | * List of breadcrumbs associated with GPU requests currently |
| 158 | * outstanding. |
| 159 | */ |
| 160 | struct list_head request_list; |
| 161 | |
Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 162 | /** |
| 163 | * Do we have some not yet emitted requests outstanding? |
| 164 | */ |
Chris Wilson | 3c0e234 | 2013-09-04 10:45:52 +0100 | [diff] [blame] | 165 | struct drm_i915_gem_request *preallocated_lazy_request; |
Chris Wilson | 1823521 | 2013-09-04 10:45:51 +0100 | [diff] [blame] | 166 | u32 outstanding_lazy_seqno; |
Daniel Vetter | cc889e0 | 2012-06-13 20:45:19 +0200 | [diff] [blame] | 167 | bool gpu_caches_dirty; |
Chris Wilson | c65355b | 2013-06-06 16:53:41 -0300 | [diff] [blame] | 168 | bool fbc_dirty; |
Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 169 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 170 | wait_queue_head_t irq_queue; |
Zou Nan hai | 8d19215 | 2010-11-02 16:31:01 +0800 | [diff] [blame] | 171 | |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 172 | struct i915_hw_context *default_context; |
Chris Wilson | 112522f | 2013-05-02 16:48:07 +0300 | [diff] [blame] | 173 | struct i915_hw_context *last_context; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 174 | |
Mika Kuoppala | 92cab73 | 2013-05-24 17:16:07 +0300 | [diff] [blame] | 175 | struct intel_ring_hangcheck hangcheck; |
| 176 | |
Chris Wilson | 0d1aaca | 2013-08-26 20:58:11 +0100 | [diff] [blame] | 177 | struct { |
| 178 | struct drm_i915_gem_object *obj; |
| 179 | u32 gtt_offset; |
| 180 | volatile u32 *cpu_page; |
| 181 | } scratch; |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 182 | |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 183 | bool needs_cmd_parser; |
| 184 | |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 185 | /* |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 186 | * Table of commands the command parser needs to know about |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 187 | * for this ring. |
| 188 | */ |
Brad Volkin | 44e895a | 2014-05-10 14:10:43 -0700 | [diff] [blame] | 189 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 190 | |
| 191 | /* |
| 192 | * Table of registers allowed in commands that read/write registers. |
| 193 | */ |
| 194 | const u32 *reg_table; |
| 195 | int reg_count; |
| 196 | |
| 197 | /* |
| 198 | * Table of registers allowed in commands that read/write registers, but |
| 199 | * only from the DRM master. |
| 200 | */ |
| 201 | const u32 *master_reg_table; |
| 202 | int master_reg_count; |
| 203 | |
| 204 | /* |
| 205 | * Returns the bitmask for the length field of the specified command. |
| 206 | * Return 0 for an unrecognized/invalid command. |
| 207 | * |
| 208 | * If the command parser finds an entry for a command in the ring's |
| 209 | * cmd_tables, it gets the command's length based on the table entry. |
| 210 | * If not, it calls this function to determine the per-ring length field |
| 211 | * encoding for the command (i.e. certain opcode ranges use certain bits |
| 212 | * to encode the command length in the header). |
| 213 | */ |
| 214 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 215 | }; |
| 216 | |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 217 | static inline bool |
| 218 | intel_ring_initialized(struct intel_ring_buffer *ring) |
| 219 | { |
| 220 | return ring->obj != NULL; |
| 221 | } |
| 222 | |
Daniel Vetter | 96154f2 | 2011-12-14 13:57:00 +0100 | [diff] [blame] | 223 | static inline unsigned |
| 224 | intel_ring_flag(struct intel_ring_buffer *ring) |
| 225 | { |
| 226 | return 1 << ring->id; |
| 227 | } |
| 228 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 229 | static inline u32 |
Chris Wilson | 1ec14ad | 2010-12-04 11:30:53 +0000 | [diff] [blame] | 230 | intel_ring_sync_index(struct intel_ring_buffer *ring, |
| 231 | struct intel_ring_buffer *other) |
| 232 | { |
| 233 | int idx; |
| 234 | |
| 235 | /* |
| 236 | * cs -> 0 = vcs, 1 = bcs |
| 237 | * vcs -> 0 = bcs, 1 = cs, |
| 238 | * bcs -> 0 = cs, 1 = vcs. |
| 239 | */ |
| 240 | |
| 241 | idx = (other - ring) - 1; |
| 242 | if (idx < 0) |
| 243 | idx += I915_NUM_RINGS; |
| 244 | |
| 245 | return idx; |
| 246 | } |
| 247 | |
| 248 | static inline u32 |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 249 | intel_read_status_page(struct intel_ring_buffer *ring, |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 250 | int reg) |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 251 | { |
Daniel Vetter | 4225d0f | 2012-04-26 23:28:16 +0200 | [diff] [blame] | 252 | /* Ensure that the compiler doesn't optimize away the load. */ |
| 253 | barrier(); |
| 254 | return ring->status_page.page_addr[reg]; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 255 | } |
| 256 | |
Mika Kuoppala | b70ec5b | 2012-12-19 11:13:05 +0200 | [diff] [blame] | 257 | static inline void |
| 258 | intel_write_status_page(struct intel_ring_buffer *ring, |
| 259 | int reg, u32 value) |
| 260 | { |
| 261 | ring->status_page.page_addr[reg] = value; |
| 262 | } |
| 263 | |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 264 | /** |
| 265 | * Reads a dword out of the status page, which is written to from the command |
| 266 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
| 267 | * MI_STORE_DATA_IMM. |
| 268 | * |
| 269 | * The following dwords have a reserved meaning: |
| 270 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
| 271 | * 0x04: ring 0 head pointer |
| 272 | * 0x05: ring 1 head pointer (915-class) |
| 273 | * 0x06: ring 2 head pointer (915-class) |
| 274 | * 0x10-0x1b: Context status DWords (GM45) |
| 275 | * 0x1f: Last written status offset. (GM45) |
| 276 | * |
| 277 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
| 278 | */ |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 279 | #define I915_GEM_HWS_INDEX 0x20 |
Jesse Barnes | 9a28977 | 2012-10-26 09:42:42 -0700 | [diff] [blame] | 280 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 |
| 281 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Chris Wilson | 311bd68 | 2011-01-13 19:06:50 +0000 | [diff] [blame] | 282 | |
Chris Wilson | e3efda4 | 2014-04-09 09:19:41 +0100 | [diff] [blame] | 283 | void intel_stop_ring_buffer(struct intel_ring_buffer *ring); |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 284 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
Ben Widawsky | 96f298a | 2011-03-19 18:14:27 -0700 | [diff] [blame] | 285 | |
Chris Wilson | e1f99ce | 2010-10-27 12:45:26 +0100 | [diff] [blame] | 286 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
Ville Syrjälä | 753b1ad | 2014-02-11 19:52:05 +0200 | [diff] [blame] | 287 | int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 288 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
| 289 | u32 data) |
Chris Wilson | e898cd2 | 2010-08-04 15:18:14 +0100 | [diff] [blame] | 290 | { |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 291 | iowrite32(data, ring->virtual_start + ring->tail); |
Chris Wilson | e898cd2 | 2010-08-04 15:18:14 +0100 | [diff] [blame] | 292 | ring->tail += 4; |
| 293 | } |
Chris Wilson | 0924673 | 2013-08-10 22:16:32 +0100 | [diff] [blame] | 294 | static inline void intel_ring_advance(struct intel_ring_buffer *ring) |
| 295 | { |
| 296 | ring->tail &= ring->size - 1; |
| 297 | } |
| 298 | void __intel_ring_advance(struct intel_ring_buffer *ring); |
| 299 | |
Chris Wilson | 3e96050 | 2012-11-27 16:22:54 +0000 | [diff] [blame] | 300 | int __must_check intel_ring_idle(struct intel_ring_buffer *ring); |
Mika Kuoppala | f7e98ad | 2012-12-19 11:13:06 +0200 | [diff] [blame] | 301 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); |
Chris Wilson | a7b9761 | 2012-07-20 12:41:08 +0100 | [diff] [blame] | 302 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
| 303 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 304 | |
Xiang, Haihao | 5c1143b | 2010-09-16 10:43:11 +0800 | [diff] [blame] | 305 | int intel_init_render_ring_buffer(struct drm_device *dev); |
| 306 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
Zhao Yakui | 845f74a | 2014-04-17 10:37:37 +0800 | [diff] [blame] | 307 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 308 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
Ben Widawsky | 9a8a221 | 2013-05-28 19:22:23 -0700 | [diff] [blame] | 309 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 310 | |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 311 | u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 312 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
Daniel Vetter | 79f321b | 2010-09-24 21:20:10 +0200 | [diff] [blame] | 313 | |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 314 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) |
| 315 | { |
| 316 | return ring->tail; |
| 317 | } |
| 318 | |
Chris Wilson | 9d773091 | 2012-11-27 16:22:52 +0000 | [diff] [blame] | 319 | static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) |
| 320 | { |
Chris Wilson | 1823521 | 2013-09-04 10:45:51 +0100 | [diff] [blame] | 321 | BUG_ON(ring->outstanding_lazy_seqno == 0); |
| 322 | return ring->outstanding_lazy_seqno; |
Chris Wilson | 9d773091 | 2012-11-27 16:22:52 +0000 | [diff] [blame] | 323 | } |
| 324 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 325 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
| 326 | { |
| 327 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
| 328 | ring->trace_irq_seqno = seqno; |
| 329 | } |
| 330 | |
Chris Wilson | e8616b6 | 2011-01-20 09:57:11 +0000 | [diff] [blame] | 331 | /* DRI warts */ |
| 332 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
| 333 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 334 | #endif /* _INTEL_RINGBUFFER_H_ */ |