blob: 9d0ae5ad7e55eb94ed2ffb5f5e3e8ba2c0d8b411 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
5 void *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8};
9
Daniel Vetter870e86d2010-08-02 16:29:44 +020010#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
Daniel Vetter6c0e1c52010-08-02 16:33:33 +020012#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020014
Zou Nan hai8187a2b2010-05-21 09:08:55 +080015struct drm_i915_gem_execbuffer2;
16struct intel_ring_buffer {
17 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010018 enum intel_ring_id {
19 RING_RENDER = 0x1,
20 RING_BSD = 0x2,
21 } id;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080022 struct ring_regs {
23 u32 ctl;
24 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080025 } regs;
Daniel Vetter333e9fe2010-08-02 16:24:01 +020026 u32 mmio_base;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080027 unsigned long size;
28 unsigned int alignment;
29 void *virtual_start;
30 struct drm_device *dev;
31 struct drm_gem_object *gem_object;
32
33 unsigned int head;
34 unsigned int tail;
35 unsigned int space;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080036 struct intel_hw_status_page status_page;
37
38 u32 irq_gem_seqno; /* last seq seem at irq time */
39 u32 waiting_gem_seqno;
40 int user_irq_refcount;
41 void (*user_irq_get)(struct drm_device *dev,
42 struct intel_ring_buffer *ring);
43 void (*user_irq_put)(struct drm_device *dev,
44 struct intel_ring_buffer *ring);
45 void (*setup_status_page)(struct drm_device *dev,
46 struct intel_ring_buffer *ring);
47
48 int (*init)(struct drm_device *dev,
49 struct intel_ring_buffer *ring);
50
51 unsigned int (*get_head)(struct drm_device *dev,
52 struct intel_ring_buffer *ring);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +080053 void (*set_tail)(struct drm_device *dev,
Daniel Vetter870e86d2010-08-02 16:29:44 +020054 struct intel_ring_buffer *ring,
55 u32 value);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080056 unsigned int (*get_active_head)(struct drm_device *dev,
57 struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080058 void (*flush)(struct drm_device *dev,
59 struct intel_ring_buffer *ring,
60 u32 invalidate_domains,
61 u32 flush_domains);
62 u32 (*add_request)(struct drm_device *dev,
63 struct intel_ring_buffer *ring,
64 struct drm_file *file_priv,
65 u32 flush_domains);
66 u32 (*get_gem_seqno)(struct drm_device *dev,
67 struct intel_ring_buffer *ring);
68 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
69 struct intel_ring_buffer *ring,
70 struct drm_i915_gem_execbuffer2 *exec,
71 struct drm_clip_rect *cliprects,
72 uint64_t exec_offset);
73
74 /**
75 * List of objects currently involved in rendering from the
76 * ringbuffer.
77 *
78 * Includes buffers having the contents of their GPU caches
79 * flushed, not necessarily primitives. last_rendering_seqno
80 * represents when the rendering involved will be completed.
81 *
82 * A reference is held on the buffer while on this list.
83 */
84 struct list_head active_list;
85
86 /**
87 * List of breadcrumbs associated with GPU requests currently
88 * outstanding.
89 */
90 struct list_head request_list;
91
Daniel Vettera6910432010-02-02 17:08:37 +010092 /**
93 * Do we have some not yet emitted requests outstanding?
94 */
95 bool outstanding_lazy_request;
96
Zou Nan hai8187a2b2010-05-21 09:08:55 +080097 wait_queue_head_t irq_queue;
98 drm_local_map_t map;
99};
100
101static inline u32
102intel_read_status_page(struct intel_ring_buffer *ring,
103 int reg)
104{
105 u32 *regs = ring->status_page.page_addr;
106 return regs[reg];
107}
108
109int intel_init_ring_buffer(struct drm_device *dev,
110 struct intel_ring_buffer *ring);
111void intel_cleanup_ring_buffer(struct drm_device *dev,
112 struct intel_ring_buffer *ring);
113int intel_wait_ring_buffer(struct drm_device *dev,
114 struct intel_ring_buffer *ring, int n);
115int intel_wrap_ring_buffer(struct drm_device *dev,
116 struct intel_ring_buffer *ring);
117void intel_ring_begin(struct drm_device *dev,
118 struct intel_ring_buffer *ring, int n);
Chris Wilsone898cd22010-08-04 15:18:14 +0100119
120static inline void intel_ring_emit(struct drm_device *dev,
121 struct intel_ring_buffer *ring,
122 unsigned int data)
123{
124 unsigned int *virt = ring->virtual_start + ring->tail;
125 *virt = data;
126 ring->tail += 4;
127}
128
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800129void intel_fill_struct(struct drm_device *dev,
130 struct intel_ring_buffer *ring,
131 void *data,
132 unsigned int len);
133void intel_ring_advance(struct drm_device *dev,
134 struct intel_ring_buffer *ring);
135
136u32 intel_ring_get_seqno(struct drm_device *dev,
137 struct intel_ring_buffer *ring);
138
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800139int intel_init_render_ring_buffer(struct drm_device *dev);
140int intel_init_bsd_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800141
142#endif /* _INTEL_RINGBUFFER_H_ */