blob: 5b37ff3a69491836b3e028d1689ec9bb98e98516 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
5 void *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8};
9
Daniel Vetter870e86d2010-08-02 16:29:44 +020010#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
Daniel Vetter6c0e1c52010-08-02 16:33:33 +020012#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
Daniel Vetter570ef602010-08-02 17:06:23 +020014#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
Daniel Vetter7f2ab692010-08-02 17:06:59 +020016#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020018
Zou Nan hai8187a2b2010-05-21 09:08:55 +080019struct drm_i915_gem_execbuffer2;
20struct intel_ring_buffer {
21 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010022 enum intel_ring_id {
23 RING_RENDER = 0x1,
24 RING_BSD = 0x2,
25 } id;
Daniel Vetter333e9fe2010-08-02 16:24:01 +020026 u32 mmio_base;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080027 unsigned long size;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080028 void *virtual_start;
29 struct drm_device *dev;
30 struct drm_gem_object *gem_object;
31
32 unsigned int head;
33 unsigned int tail;
Chris Wilson780f0ca2010-09-23 17:45:39 +010034 int space;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080035 struct intel_hw_status_page status_page;
36
37 u32 irq_gem_seqno; /* last seq seem at irq time */
38 u32 waiting_gem_seqno;
39 int user_irq_refcount;
40 void (*user_irq_get)(struct drm_device *dev,
41 struct intel_ring_buffer *ring);
42 void (*user_irq_put)(struct drm_device *dev,
43 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080044
45 int (*init)(struct drm_device *dev,
46 struct intel_ring_buffer *ring);
47
Xiang, Haihaod46eefa2010-09-16 10:43:12 +080048 void (*set_tail)(struct drm_device *dev,
Daniel Vetter870e86d2010-08-02 16:29:44 +020049 struct intel_ring_buffer *ring,
50 u32 value);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080051 void (*flush)(struct drm_device *dev,
52 struct intel_ring_buffer *ring,
53 u32 invalidate_domains,
54 u32 flush_domains);
55 u32 (*add_request)(struct drm_device *dev,
56 struct intel_ring_buffer *ring,
Zou Nan hai8187a2b2010-05-21 09:08:55 +080057 u32 flush_domains);
Chris Wilsonf787a5f2010-09-24 16:02:42 +010058 u32 (*get_seqno)(struct drm_device *dev,
59 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080060 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
61 struct intel_ring_buffer *ring,
62 struct drm_i915_gem_execbuffer2 *exec,
63 struct drm_clip_rect *cliprects,
64 uint64_t exec_offset);
65
66 /**
67 * List of objects currently involved in rendering from the
68 * ringbuffer.
69 *
70 * Includes buffers having the contents of their GPU caches
71 * flushed, not necessarily primitives. last_rendering_seqno
72 * represents when the rendering involved will be completed.
73 *
74 * A reference is held on the buffer while on this list.
75 */
76 struct list_head active_list;
77
78 /**
79 * List of breadcrumbs associated with GPU requests currently
80 * outstanding.
81 */
82 struct list_head request_list;
83
Chris Wilsona56ba562010-09-28 10:07:56 +010084 /**
85 * Do we have some not yet emitted requests outstanding?
86 */
87 bool outstanding_lazy_request;
88
Zou Nan hai8187a2b2010-05-21 09:08:55 +080089 wait_queue_head_t irq_queue;
90 drm_local_map_t map;
91};
92
93static inline u32
94intel_read_status_page(struct intel_ring_buffer *ring,
95 int reg)
96{
97 u32 *regs = ring->status_page.page_addr;
98 return regs[reg];
99}
100
101int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100102 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800103void intel_cleanup_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100104 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800105int intel_wait_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100106 struct intel_ring_buffer *ring, int n);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800107void intel_ring_begin(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100108 struct intel_ring_buffer *ring, int n);
Chris Wilsone898cd22010-08-04 15:18:14 +0100109
110static inline void intel_ring_emit(struct drm_device *dev,
111 struct intel_ring_buffer *ring,
112 unsigned int data)
113{
114 unsigned int *virt = ring->virtual_start + ring->tail;
115 *virt = data;
116 ring->tail += 4;
117}
118
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800119void intel_ring_advance(struct drm_device *dev,
120 struct intel_ring_buffer *ring);
121
122u32 intel_ring_get_seqno(struct drm_device *dev,
123 struct intel_ring_buffer *ring);
124
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800125int intel_init_render_ring_buffer(struct drm_device *dev);
126int intel_init_bsd_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800127
Daniel Vetter79f321b2010-09-24 21:20:10 +0200128u32 intel_ring_get_active_head(struct drm_device *dev,
129 struct intel_ring_buffer *ring);
Daniel Vetter447da182010-09-24 21:49:27 +0200130void intel_ring_setup_status_page(struct drm_device *dev,
131 struct intel_ring_buffer *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200132
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800133#endif /* _INTEL_RINGBUFFER_H_ */