blob: 72c3c15f62405b0b3939590558cceef74ec4fbc0 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Ville Syrjälä633cf8f2012-12-03 18:43:32 +02004/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14
Zou Nan hai8187a2b2010-05-21 09:08:55 +080015struct intel_hw_status_page {
Daniel Vetter4225d0f2012-04-26 23:28:16 +020016 u32 *page_addr;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080017 unsigned int gfx_addr;
Chris Wilson05394f32010-11-08 19:18:58 +000018 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080019};
20
Ben Widawskyb7287d82011-04-25 11:22:22 -070021#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080023
Ben Widawskyb7287d82011-04-25 11:22:22 -070024#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080026
Ben Widawskyb7287d82011-04-25 11:22:22 -070027#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080029
Ben Widawskyb7287d82011-04-25 11:22:22 -070030#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080032
Ben Widawskyb7287d82011-04-25 11:22:22 -070033#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020035
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053036#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
Chris Wilson9991ae72014-04-02 16:36:07 +010037#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053038
Jani Nikulaf2f4d822013-08-11 12:44:01 +030039enum intel_ring_hangcheck_action {
Mika Kuoppalada661462013-09-06 16:03:28 +030040 HANGCHECK_IDLE = 0,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030041 HANGCHECK_WAIT,
42 HANGCHECK_ACTIVE,
43 HANGCHECK_KICK,
44 HANGCHECK_HUNG,
45};
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030046
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +020047#define HANGCHECK_SCORE_RING_HUNG 31
48
Mika Kuoppala92cab732013-05-24 17:16:07 +030049struct intel_ring_hangcheck {
Chris Wilson50877442014-03-21 12:41:53 +000050 u64 acthd;
Mika Kuoppala92cab732013-05-24 17:16:07 +030051 u32 seqno;
Mika Kuoppala05407ff2013-05-30 09:04:29 +030052 int score;
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030053 enum intel_ring_hangcheck_action action;
Chris Wilson50877442014-03-21 12:41:53 +000054 bool deadlock;
Mika Kuoppala92cab732013-05-24 17:16:07 +030055};
56
Zou Nan hai8187a2b2010-05-21 09:08:55 +080057struct intel_ring_buffer {
58 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010059 enum intel_ring_id {
Daniel Vetter96154f22011-12-14 13:57:00 +010060 RCS = 0x0,
61 VCS,
62 BCS,
Ben Widawsky4a3dd192013-05-28 19:22:19 -070063 VECS,
Zhao Yakui845f74a2014-04-17 10:37:37 +080064 VCS2
Chris Wilson92204342010-09-18 11:02:01 +010065 } id;
Zhao Yakui845f74a2014-04-17 10:37:37 +080066#define I915_NUM_RINGS 5
Zhao Yakuib1a93302014-04-17 10:37:36 +080067#define LAST_USER_RING (VECS + 1)
Daniel Vetter333e9fe2010-08-02 16:24:01 +020068 u32 mmio_base;
Chris Wilson311bd682011-01-13 19:06:50 +000069 void __iomem *virtual_start;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080070 struct drm_device *dev;
Chris Wilson05394f32010-11-08 19:18:58 +000071 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080072
Chris Wilson8c0a6bf2010-12-09 12:56:37 +000073 u32 head;
74 u32 tail;
Chris Wilson780f0ca2010-09-23 17:45:39 +010075 int space;
Chris Wilsonc2c347a92010-10-27 15:11:53 +010076 int size;
Chris Wilson55249ba2010-12-22 14:04:47 +000077 int effective_size;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080078 struct intel_hw_status_page status_page;
79
Chris Wilsona71d8d92012-02-15 11:25:36 +000080 /** We track the position of the requests in the ring buffer, and
81 * when each is retired we increment last_retired_head as the GPU
82 * must have finished processing the request and so we know we
83 * can advance the ringbuffer up to that position.
84 *
85 * last_retired_head is set to -1 after the value is consumed so
86 * we can detect new retirements.
87 */
88 u32 last_retired_head;
89
Daniel Vetterc7113cc2013-07-04 23:35:29 +020090 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
Daniel Vetter6a848cc2012-04-11 22:12:46 +020091 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
Chris Wilsondb53a302011-02-03 11:57:46 +000092 u32 trace_irq_seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +000093 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +000094 void (*irq_put)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080095
Chris Wilson78501ea2010-10-27 12:18:21 +010096 int (*init)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080097
Chris Wilson78501ea2010-10-27 12:18:21 +010098 void (*write_tail)(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +010099 u32 value);
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000100 int __must_check (*flush)(struct intel_ring_buffer *ring,
101 u32 invalidate_domains,
102 u32 flush_domains);
Chris Wilson9d7730912012-11-27 16:22:52 +0000103 int (*add_request)(struct intel_ring_buffer *ring);
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100104 /* Some chipsets are not quite as coherent as advertised and need
105 * an expensive kick to force a true read of the up-to-date seqno.
106 * However, the up-to-date seqno is not always required and the last
107 * seen value is good enough. Note that the seqno will always be
108 * monotonic, even if not coherent.
109 */
110 u32 (*get_seqno)(struct intel_ring_buffer *ring,
111 bool lazy_coherency);
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200112 void (*set_seqno)(struct intel_ring_buffer *ring,
113 u32 seqno);
Chris Wilson78501ea2010-10-27 12:18:21 +0100114 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
Ben Widawsky9bcb1442014-04-28 19:29:25 -0700115 u64 offset, u32 length,
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100116 unsigned flags);
117#define I915_DISPATCH_SECURE 0x1
Daniel Vetterb45305f2012-12-17 16:21:27 +0100118#define I915_DISPATCH_PINNED 0x2
Zou Nan hai8d192152010-11-02 16:31:01 +0800119 void (*cleanup)(struct intel_ring_buffer *ring);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700120
121 struct {
122 u32 sync_seqno[I915_NUM_RINGS-1];
Ben Widawsky78325f22014-04-29 14:52:29 -0700123
Ben Widawskyebc348b2014-04-29 14:52:28 -0700124 struct {
125 /* our mbox written by others */
126 u32 wait[I915_NUM_RINGS];
127 /* mboxes this ring signals to */
128 u32 signal[I915_NUM_RINGS];
129 } mbox;
Ben Widawsky78325f22014-04-29 14:52:29 -0700130
131 /* AKA wait() */
132 int (*sync_to)(struct intel_ring_buffer *ring,
133 struct intel_ring_buffer *to,
134 u32 seqno);
Ben Widawsky024a43e2014-04-29 14:52:30 -0700135 int (*signal)(struct intel_ring_buffer *signaller,
136 /* num_dwords needed by caller */
137 unsigned int num_dwords);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700138 } semaphore;
Ben Widawskyad776f82013-05-28 19:22:18 -0700139
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800140 /**
141 * List of objects currently involved in rendering from the
142 * ringbuffer.
143 *
144 * Includes buffers having the contents of their GPU caches
145 * flushed, not necessarily primitives. last_rendering_seqno
146 * represents when the rendering involved will be completed.
147 *
148 * A reference is held on the buffer while on this list.
149 */
150 struct list_head active_list;
151
152 /**
153 * List of breadcrumbs associated with GPU requests currently
154 * outstanding.
155 */
156 struct list_head request_list;
157
Chris Wilsona56ba562010-09-28 10:07:56 +0100158 /**
159 * Do we have some not yet emitted requests outstanding?
160 */
Chris Wilson3c0e2342013-09-04 10:45:52 +0100161 struct drm_i915_gem_request *preallocated_lazy_request;
Chris Wilson18235212013-09-04 10:45:51 +0100162 u32 outstanding_lazy_seqno;
Daniel Vettercc889e02012-06-13 20:45:19 +0200163 bool gpu_caches_dirty;
Chris Wilsonc65355b2013-06-06 16:53:41 -0300164 bool fbc_dirty;
Chris Wilsona56ba562010-09-28 10:07:56 +0100165
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800166 wait_queue_head_t irq_queue;
Zou Nan hai8d192152010-11-02 16:31:01 +0800167
Ben Widawsky40521052012-06-04 14:42:43 -0700168 struct i915_hw_context *default_context;
Chris Wilson112522f2013-05-02 16:48:07 +0300169 struct i915_hw_context *last_context;
Ben Widawsky40521052012-06-04 14:42:43 -0700170
Mika Kuoppala92cab732013-05-24 17:16:07 +0300171 struct intel_ring_hangcheck hangcheck;
172
Chris Wilson0d1aaca2013-08-26 20:58:11 +0100173 struct {
174 struct drm_i915_gem_object *obj;
175 u32 gtt_offset;
176 volatile u32 *cpu_page;
177 } scratch;
Brad Volkin351e3db2014-02-18 10:15:46 -0800178
179 /*
180 * Tables of commands the command parser needs to know about
181 * for this ring.
182 */
183 const struct drm_i915_cmd_table *cmd_tables;
184 int cmd_table_count;
185
186 /*
187 * Table of registers allowed in commands that read/write registers.
188 */
189 const u32 *reg_table;
190 int reg_count;
191
192 /*
193 * Table of registers allowed in commands that read/write registers, but
194 * only from the DRM master.
195 */
196 const u32 *master_reg_table;
197 int master_reg_count;
198
199 /*
200 * Returns the bitmask for the length field of the specified command.
201 * Return 0 for an unrecognized/invalid command.
202 *
203 * If the command parser finds an entry for a command in the ring's
204 * cmd_tables, it gets the command's length based on the table entry.
205 * If not, it calls this function to determine the per-ring length field
206 * encoding for the command (i.e. certain opcode ranges use certain bits
207 * to encode the command length in the header).
208 */
209 u32 (*get_cmd_length_mask)(u32 cmd_header);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800210};
211
Chris Wilsonb4519512012-05-11 14:29:30 +0100212static inline bool
213intel_ring_initialized(struct intel_ring_buffer *ring)
214{
215 return ring->obj != NULL;
216}
217
Daniel Vetter96154f22011-12-14 13:57:00 +0100218static inline unsigned
219intel_ring_flag(struct intel_ring_buffer *ring)
220{
221 return 1 << ring->id;
222}
223
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800224static inline u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000225intel_ring_sync_index(struct intel_ring_buffer *ring,
226 struct intel_ring_buffer *other)
227{
228 int idx;
229
230 /*
231 * cs -> 0 = vcs, 1 = bcs
232 * vcs -> 0 = bcs, 1 = cs,
233 * bcs -> 0 = cs, 1 = vcs.
234 */
235
236 idx = (other - ring) - 1;
237 if (idx < 0)
238 idx += I915_NUM_RINGS;
239
240 return idx;
241}
242
243static inline u32
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800244intel_read_status_page(struct intel_ring_buffer *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100245 int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800246{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200247 /* Ensure that the compiler doesn't optimize away the load. */
248 barrier();
249 return ring->status_page.page_addr[reg];
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800250}
251
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200252static inline void
253intel_write_status_page(struct intel_ring_buffer *ring,
254 int reg, u32 value)
255{
256 ring->status_page.page_addr[reg] = value;
257}
258
Chris Wilson311bd682011-01-13 19:06:50 +0000259/**
260 * Reads a dword out of the status page, which is written to from the command
261 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
262 * MI_STORE_DATA_IMM.
263 *
264 * The following dwords have a reserved meaning:
265 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
266 * 0x04: ring 0 head pointer
267 * 0x05: ring 1 head pointer (915-class)
268 * 0x06: ring 2 head pointer (915-class)
269 * 0x10-0x1b: Context status DWords (GM45)
270 * 0x1f: Last written status offset. (GM45)
271 *
272 * The area from dword 0x20 to 0x3ff is available for driver usage.
273 */
Chris Wilson311bd682011-01-13 19:06:50 +0000274#define I915_GEM_HWS_INDEX 0x20
Jesse Barnes9a289772012-10-26 09:42:42 -0700275#define I915_GEM_HWS_SCRATCH_INDEX 0x30
276#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000277
Chris Wilsone3efda42014-04-09 09:19:41 +0100278void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
Chris Wilson78501ea2010-10-27 12:18:21 +0100279void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700280
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100281int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
Ville Syrjälä753b1ad2014-02-11 19:52:05 +0200282int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
Chris Wilson78501ea2010-10-27 12:18:21 +0100283static inline void intel_ring_emit(struct intel_ring_buffer *ring,
284 u32 data)
Chris Wilsone898cd22010-08-04 15:18:14 +0100285{
Chris Wilson78501ea2010-10-27 12:18:21 +0100286 iowrite32(data, ring->virtual_start + ring->tail);
Chris Wilsone898cd22010-08-04 15:18:14 +0100287 ring->tail += 4;
288}
Chris Wilson09246732013-08-10 22:16:32 +0100289static inline void intel_ring_advance(struct intel_ring_buffer *ring)
290{
291 ring->tail &= ring->size - 1;
292}
293void __intel_ring_advance(struct intel_ring_buffer *ring);
294
Chris Wilson3e960502012-11-27 16:22:54 +0000295int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
Mika Kuoppalaf7e98ad2012-12-19 11:13:06 +0200296void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
Chris Wilsona7b97612012-07-20 12:41:08 +0100297int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
298int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800299
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800300int intel_init_render_ring_buffer(struct drm_device *dev);
301int intel_init_bsd_ring_buffer(struct drm_device *dev);
Zhao Yakui845f74a2014-04-17 10:37:37 +0800302int intel_init_bsd2_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100303int intel_init_blt_ring_buffer(struct drm_device *dev);
Ben Widawsky9a8a2212013-05-28 19:22:23 -0700304int intel_init_vebox_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800305
Chris Wilson50877442014-03-21 12:41:53 +0000306u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
Chris Wilson78501ea2010-10-27 12:18:21 +0100307void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200308
Chris Wilsona71d8d92012-02-15 11:25:36 +0000309static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
310{
311 return ring->tail;
312}
313
Chris Wilson9d7730912012-11-27 16:22:52 +0000314static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
315{
Chris Wilson18235212013-09-04 10:45:51 +0100316 BUG_ON(ring->outstanding_lazy_seqno == 0);
317 return ring->outstanding_lazy_seqno;
Chris Wilson9d7730912012-11-27 16:22:52 +0000318}
319
Chris Wilsondb53a302011-02-03 11:57:46 +0000320static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
321{
322 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
323 ring->trace_irq_seqno = seqno;
324}
325
Chris Wilsone8616b62011-01-20 09:57:11 +0000326/* DRI warts */
327int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
328
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800329#endif /* _INTEL_RINGBUFFER_H_ */