| #ifndef INTEL_BATCHBUFFER_H |
| #define INTEL_BATCHBUFFER_H |
| |
| #include <assert.h> |
| #include "intel_bufmgr.h" |
| |
| #define BATCH_SZ 4096 |
| #define BATCH_RESERVED 16 |
| |
| struct intel_batchbuffer { |
| drm_intel_bufmgr *bufmgr; |
| uint32_t devid; |
| |
| drm_intel_bo *bo; |
| |
| uint8_t buffer[BATCH_SZ]; |
| uint8_t *ptr; |
| uint8_t *state; |
| }; |
| |
| struct intel_batchbuffer *intel_batchbuffer_alloc(drm_intel_bufmgr *bufmgr, |
| uint32_t devid); |
| |
| void intel_batchbuffer_free(struct intel_batchbuffer *batch); |
| |
| |
| void intel_batchbuffer_flush(struct intel_batchbuffer *batch); |
| void intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring); |
| void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch, |
| drm_intel_context *context); |
| |
| void intel_batchbuffer_reset(struct intel_batchbuffer *batch); |
| |
| void intel_batchbuffer_data(struct intel_batchbuffer *batch, |
| const void *data, unsigned int bytes); |
| |
| void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, |
| drm_intel_bo *buffer, |
| uint32_t delta, |
| uint32_t read_domains, |
| uint32_t write_domain, |
| int fenced); |
| |
| /* Inline functions - might actually be better off with these |
| * non-inlined. Certainly better off switching all command packets to |
| * be passed as structs rather than dwords, but that's a little bit of |
| * work... |
| */ |
| #pragma GCC diagnostic ignored "-Winline" |
| static inline int |
| intel_batchbuffer_space(struct intel_batchbuffer *batch) |
| { |
| return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - batch->buffer); |
| } |
| |
| |
| static inline void |
| intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, uint32_t dword) |
| { |
| assert(intel_batchbuffer_space(batch) >= 4); |
| *(uint32_t *) (batch->ptr) = dword; |
| batch->ptr += 4; |
| } |
| |
| static inline void |
| intel_batchbuffer_require_space(struct intel_batchbuffer *batch, |
| unsigned int sz) |
| { |
| assert(sz < BATCH_SZ - BATCH_RESERVED); |
| if (intel_batchbuffer_space(batch) < sz) |
| intel_batchbuffer_flush(batch); |
| } |
| |
| /* Here are the crusty old macros, to be removed: |
| */ |
| #define BATCH_LOCALS |
| |
| #define BEGIN_BATCH(n) do { \ |
| intel_batchbuffer_require_space(batch, (n)*4); \ |
| } while (0) |
| |
| #define OUT_BATCH(d) intel_batchbuffer_emit_dword(batch, d) |
| |
| #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \ |
| assert((delta) >= 0); \ |
| intel_batchbuffer_emit_reloc(batch, buf, delta, \ |
| read_domains, write_domain, 1); \ |
| } while (0) |
| |
| #define OUT_RELOC(buf, read_domains, write_domain, delta) do { \ |
| assert((delta) >= 0); \ |
| intel_batchbuffer_emit_reloc(batch, buf, delta, \ |
| read_domains, write_domain, 0); \ |
| } while (0) |
| |
| #define ADVANCE_BATCH() do { \ |
| } while(0) |
| |
| void |
| intel_blt_copy(struct intel_batchbuffer *batch, |
| drm_intel_bo *src_bo, int src_x1, int src_y1, int src_pitch, |
| drm_intel_bo *dst_bo, int dst_x1, int dst_y1, int dst_pitch, |
| int width, int height, int bpp); |
| void intel_copy_bo(struct intel_batchbuffer *batch, |
| drm_intel_bo *dst_bo, drm_intel_bo *src_bo, |
| int width, int height); |
| |
| #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
| #define i915_execbuffer2_set_context_id(eb2, context) \ |
| (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK |
| #define i915_execbuffer2_get_context_id(eb2) \ |
| ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) |
| |
| |
| #endif |