blob: 9f45ac695a2979f360bfac6770b9ef9effedb339 [file] [log] [blame]
Eric Anholt8c641832009-03-26 17:15:11 -07001#ifndef INTEL_BATCHBUFFER_H
2#define INTEL_BATCHBUFFER_H
3
Eric Anholtb51588b2009-04-07 19:07:18 -07004#include <assert.h>
Eric Anholt8c641832009-03-26 17:15:11 -07005#include "intel_bufmgr.h"
Eric Anholt8c641832009-03-26 17:15:11 -07006
7#define BATCH_SZ 4096
8#define BATCH_RESERVED 16
9
Chris Wilson719ffef2011-05-22 10:34:12 +010010struct intel_batchbuffer {
Eric Anholt8c641832009-03-26 17:15:11 -070011 drm_intel_bufmgr *bufmgr;
Chris Wilsond4d769a2010-10-26 10:59:18 +010012 uint32_t devid;
Eric Anholt8c641832009-03-26 17:15:11 -070013
14 drm_intel_bo *bo;
15
Chris Wilson371f87f2011-02-01 10:53:57 +000016 uint8_t buffer[BATCH_SZ];
Eric Anholt8c641832009-03-26 17:15:11 -070017 uint8_t *ptr;
Chris Wilson1945e2a2012-12-06 17:18:52 +000018 uint8_t *state;
Eric Anholt8c641832009-03-26 17:15:11 -070019};
20
Chris Wilsond4d769a2010-10-26 10:59:18 +010021struct intel_batchbuffer *intel_batchbuffer_alloc(drm_intel_bufmgr *bufmgr,
22 uint32_t devid);
Eric Anholt8c641832009-03-26 17:15:11 -070023
24void intel_batchbuffer_free(struct intel_batchbuffer *batch);
25
26
27void intel_batchbuffer_flush(struct intel_batchbuffer *batch);
Daniel Vetterd42b7f92011-09-07 09:20:36 +020028void intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring);
Ben Widawskya635a5a2012-01-15 14:52:33 -080029void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
30 drm_intel_context *context);
Eric Anholt8c641832009-03-26 17:15:11 -070031
32void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
33
34void intel_batchbuffer_data(struct intel_batchbuffer *batch,
35 const void *data, unsigned int bytes);
36
37void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
38 drm_intel_bo *buffer,
39 uint32_t delta,
40 uint32_t read_domains,
Daniel Vetter8ab88c92011-03-27 16:17:54 +020041 uint32_t write_domain,
42 int fenced);
Eric Anholt8c641832009-03-26 17:15:11 -070043
44/* Inline functions - might actually be better off with these
45 * non-inlined. Certainly better off switching all command packets to
46 * be passed as structs rather than dwords, but that's a little bit of
47 * work...
48 */
Ben Widawsky802bd742012-01-15 13:41:42 -080049#pragma GCC diagnostic ignored "-Winline"
Eric Anholt8c641832009-03-26 17:15:11 -070050static inline int
51intel_batchbuffer_space(struct intel_batchbuffer *batch)
52{
Chris Wilson371f87f2011-02-01 10:53:57 +000053 return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - batch->buffer);
Eric Anholt8c641832009-03-26 17:15:11 -070054}
55
56
57static inline void
58intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, uint32_t dword)
59{
Eric Anholt8c641832009-03-26 17:15:11 -070060 assert(intel_batchbuffer_space(batch) >= 4);
61 *(uint32_t *) (batch->ptr) = dword;
62 batch->ptr += 4;
63}
64
65static inline void
66intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
67 unsigned int sz)
68{
Daniel Vetterbfbe8132012-05-29 22:14:06 +020069 assert(sz < BATCH_SZ - BATCH_RESERVED);
Eric Anholt8c641832009-03-26 17:15:11 -070070 if (intel_batchbuffer_space(batch) < sz)
71 intel_batchbuffer_flush(batch);
72}
73
74/* Here are the crusty old macros, to be removed:
75 */
76#define BATCH_LOCALS
77
78#define BEGIN_BATCH(n) do { \
79 intel_batchbuffer_require_space(batch, (n)*4); \
Eric Anholt8c641832009-03-26 17:15:11 -070080} while (0)
81
82#define OUT_BATCH(d) intel_batchbuffer_emit_dword(batch, d)
83
Daniel Vetter8ab88c92011-03-27 16:17:54 +020084#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
85 assert((delta) >= 0); \
86 intel_batchbuffer_emit_reloc(batch, buf, delta, \
87 read_domains, write_domain, 1); \
88} while (0)
89
Eric Anholt8c641832009-03-26 17:15:11 -070090#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
91 assert((delta) >= 0); \
92 intel_batchbuffer_emit_reloc(batch, buf, delta, \
Daniel Vetter8ab88c92011-03-27 16:17:54 +020093 read_domains, write_domain, 0); \
Eric Anholt8c641832009-03-26 17:15:11 -070094} while (0)
95
96#define ADVANCE_BATCH() do { \
Eric Anholt8c641832009-03-26 17:15:11 -070097} while(0)
98
Chris Wilson719ffef2011-05-22 10:34:12 +010099void
100intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
Eric Anholt8c641832009-03-26 17:15:11 -0700101
Chris Wilson95374222010-04-08 11:56:57 +0100102void intel_copy_bo(struct intel_batchbuffer *batch,
103 drm_intel_bo *dst_bo, drm_intel_bo *src_bo,
Chris Wilsond4d769a2010-10-26 10:59:18 +0100104 int width, int height);
Chris Wilson95374222010-04-08 11:56:57 +0100105
Ben Widawsky5a28ef82012-03-18 18:42:44 -0700106#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
107#define i915_execbuffer2_set_context_id(eb2, context) \
108 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
109#define i915_execbuffer2_get_context_id(eb2) \
110 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
111
112
Eric Anholt8c641832009-03-26 17:15:11 -0700113#endif