blob: 75387b7cf3d3ec181172969c8313772cc7959be4 [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
31 *
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
35 */
Eric Anholtc4857422008-06-03 10:20:49 -070036
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholtc4857422008-06-03 10:20:49 -070041#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
Eric Anholt869d8be2008-09-06 03:07:41 +010044#include <errno.h>
Eric Anholt738e36a2008-09-05 10:35:32 +010045#include <xf86drm.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070046#include <pthread.h>
Eric Anholtc4857422008-06-03 10:20:49 -070047#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010048#include "intel_bufmgr_priv.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070049#include "drm.h"
50#include "i915_drm.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070051#include "mm.h"
Emil Velikov42465fe2015-04-05 15:51:59 +010052#include "libdrm_macros.h"
Eric Anholtf7a99402008-08-08 15:55:34 -070053#include "libdrm_lists.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070054
55#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070056 if (bufmgr_fake->bufmgr.debug) \
57 drmMsg(__VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070058} while (0)
59
60/* Internal flags:
61 */
62#define BM_NO_BACKING_STORE 0x00000001
63#define BM_NO_FENCE_SUBDATA 0x00000002
64#define BM_PINNED 0x00000004
65
66/* Wrapper around mm.c's mem_block, which understands that you must
67 * wait for fences to expire before memory can be freed. This is
68 * specific to our use of memcpy for uploads - an upload that was
69 * processed through the command queue wouldn't need to care about
70 * fences.
71 */
72#define MAX_RELOCS 4096
73
Eric Anholtd70d6052009-10-06 12:40:42 -070074struct fake_buffer_reloc {
75 /** Buffer object that the relocation points at. */
76 drm_intel_bo *target_buf;
77 /** Offset of the relocation entry within reloc_buf. */
78 uint32_t offset;
79 /**
80 * Cached value of the offset when we last performed this relocation.
81 */
82 uint32_t last_target_offset;
83 /** Value added to target_buf's offset to get the relocation entry. */
84 uint32_t delta;
85 /** Cache domains the target buffer is read into. */
86 uint32_t read_domains;
87 /** Cache domain the target buffer will have dirty cachelines in. */
88 uint32_t write_domain;
Eric Anholt6a9eb082008-06-03 09:27:37 -070089};
90
91struct block {
Eric Anholtd70d6052009-10-06 12:40:42 -070092 struct block *next, *prev;
93 struct mem_block *mem; /* BM_MEM_AGP */
Eric Anholt6a9eb082008-06-03 09:27:37 -070094
Eric Anholtd70d6052009-10-06 12:40:42 -070095 /**
96 * Marks that the block is currently in the aperture and has yet to be
97 * fenced.
98 */
99 unsigned on_hardware:1;
100 /**
101 * Marks that the block is currently fenced (being used by rendering)
102 * and can't be freed until @fence is passed.
103 */
104 unsigned fenced:1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700105
Eric Anholtd70d6052009-10-06 12:40:42 -0700106 /** Fence cookie for the block. */
107 unsigned fence; /* Split to read_fence, write_fence */
Eric Anholt6a9eb082008-06-03 09:27:37 -0700108
Eric Anholtd70d6052009-10-06 12:40:42 -0700109 drm_intel_bo *bo;
110 void *virtual;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700111};
112
113typedef struct _bufmgr_fake {
Eric Anholtd70d6052009-10-06 12:40:42 -0700114 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700115
Eric Anholtd70d6052009-10-06 12:40:42 -0700116 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -0700117
Eric Anholtd70d6052009-10-06 12:40:42 -0700118 unsigned long low_offset;
119 unsigned long size;
120 void *virtual;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700121
Eric Anholtd70d6052009-10-06 12:40:42 -0700122 struct mem_block *heap;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700123
Eric Anholtd70d6052009-10-06 12:40:42 -0700124 unsigned buf_nr; /* for generating ids */
Eric Anholt6a9eb082008-06-03 09:27:37 -0700125
Eric Anholtd70d6052009-10-06 12:40:42 -0700126 /**
127 * List of blocks which are currently in the GART but haven't been
128 * fenced yet.
129 */
130 struct block on_hardware;
131 /**
132 * List of blocks which are in the GART and have an active fence on
133 * them.
134 */
135 struct block fenced;
136 /**
137 * List of blocks which have an expired fence and are ready to be
138 * evicted.
139 */
140 struct block lru;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700141
Eric Anholtd70d6052009-10-06 12:40:42 -0700142 unsigned int last_fence;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700143
Eric Anholtd70d6052009-10-06 12:40:42 -0700144 unsigned fail:1;
145 unsigned need_fence:1;
146 int thrashing;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700147
Eric Anholtd70d6052009-10-06 12:40:42 -0700148 /**
149 * Driver callback to emit a fence, returning the cookie.
150 *
151 * This allows the driver to hook in a replacement for the DRM usage in
152 * bufmgr_fake.
153 *
154 * Currently, this also requires that a write flush be emitted before
155 * emitting the fence, but this should change.
156 */
157 unsigned int (*fence_emit) (void *private);
158 /** Driver callback to wait for a fence cookie to have passed. */
159 void (*fence_wait) (unsigned int fence, void *private);
160 void *fence_priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700161
Eric Anholtd70d6052009-10-06 12:40:42 -0700162 /**
163 * Driver callback to execute a buffer.
164 *
165 * This allows the driver to hook in a replacement for the DRM usage in
166 * bufmgr_fake.
167 */
168 int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
169 void *exec_priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700170
Eric Anholtd70d6052009-10-06 12:40:42 -0700171 /** Driver-supplied argument to driver callbacks */
172 void *driver_priv;
173 /**
174 * Pointer to kernel-updated sarea data for the last completed user irq
175 */
176 volatile int *last_dispatch;
Eric Anholt869d8be2008-09-06 03:07:41 +0100177
Eric Anholtd70d6052009-10-06 12:40:42 -0700178 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700179
Eric Anholtd70d6052009-10-06 12:40:42 -0700180 int debug;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700181
Eric Anholtd70d6052009-10-06 12:40:42 -0700182 int performed_rendering;
Eric Anholt4b982642008-10-30 09:33:07 -0700183} drm_intel_bufmgr_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700184
Eric Anholt4b982642008-10-30 09:33:07 -0700185typedef struct _drm_intel_bo_fake {
Eric Anholtd70d6052009-10-06 12:40:42 -0700186 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700187
Eric Anholtd70d6052009-10-06 12:40:42 -0700188 unsigned id; /* debug only */
189 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700190
Eric Anholtd70d6052009-10-06 12:40:42 -0700191 unsigned dirty:1;
192 /**
193 * has the card written to this buffer - we make need to copy it back
194 */
195 unsigned card_dirty:1;
196 unsigned int refcount;
197 /* Flags may consist of any of the DRM_BO flags, plus
198 * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
199 * first two driver private flags.
200 */
201 uint64_t flags;
202 /** Cache domains the target buffer is read into. */
203 uint32_t read_domains;
204 /** Cache domain the target buffer will have dirty cachelines in. */
205 uint32_t write_domain;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700206
Eric Anholtd70d6052009-10-06 12:40:42 -0700207 unsigned int alignment;
208 int is_static, validated;
209 unsigned int map_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700210
Eric Anholtd70d6052009-10-06 12:40:42 -0700211 /** relocation list */
212 struct fake_buffer_reloc *relocs;
213 int nr_relocs;
214 /**
215 * Total size of the target_bos of this buffer.
216 *
217 * Used for estimation in check_aperture.
218 */
219 unsigned int child_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700220
Eric Anholtd70d6052009-10-06 12:40:42 -0700221 struct block *block;
222 void *backing_store;
223 void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
224 void *invalidate_ptr;
Eric Anholt4b982642008-10-30 09:33:07 -0700225} drm_intel_bo_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700226
Eric Anholt4b982642008-10-30 09:33:07 -0700227static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
Eric Anholt6a9eb082008-06-03 09:27:37 -0700228 unsigned int fence_cookie);
229
Eric Anholt6a9eb082008-06-03 09:27:37 -0700230#define MAXFENCE 0x7fffffff
231
Eric Anholtd70d6052009-10-06 12:40:42 -0700232static int
233FENCE_LTE(unsigned a, unsigned b)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700234{
Eric Anholtd70d6052009-10-06 12:40:42 -0700235 if (a == b)
236 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700237
Eric Anholtd70d6052009-10-06 12:40:42 -0700238 if (a < b && b - a < (1 << 24))
239 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700240
Eric Anholtd70d6052009-10-06 12:40:42 -0700241 if (a > b && MAXFENCE - a + b < (1 << 24))
242 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700243
Eric Anholtd70d6052009-10-06 12:40:42 -0700244 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700245}
246
Emil Velikov0f8da822015-03-31 22:32:11 +0100247void
Eric Anholtd70d6052009-10-06 12:40:42 -0700248drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
249 unsigned int (*emit) (void *priv),
250 void (*wait) (unsigned int fence,
251 void *priv),
252 void *priv)
Eric Anholtf9d98be2008-09-08 08:51:40 -0700253{
Eric Anholtd70d6052009-10-06 12:40:42 -0700254 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700255
Eric Anholtd70d6052009-10-06 12:40:42 -0700256 bufmgr_fake->fence_emit = emit;
257 bufmgr_fake->fence_wait = wait;
258 bufmgr_fake->fence_priv = priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700259}
260
Eric Anholt6a9eb082008-06-03 09:27:37 -0700261static unsigned int
Eric Anholt4b982642008-10-30 09:33:07 -0700262_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700263{
Eric Anholtd70d6052009-10-06 12:40:42 -0700264 struct drm_i915_irq_emit ie;
265 int ret, seq = 1;
Eric Anholt869d8be2008-09-06 03:07:41 +0100266
Eric Anholtd70d6052009-10-06 12:40:42 -0700267 if (bufmgr_fake->fence_emit != NULL) {
268 seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
269 return seq;
270 }
Eric Anholtf9d98be2008-09-08 08:51:40 -0700271
Eric Anholtd70d6052009-10-06 12:40:42 -0700272 ie.irq_seq = &seq;
273 ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
274 &ie, sizeof(ie));
275 if (ret) {
Emil Velikov41eb1312015-04-05 16:50:33 +0100276 drmMsg("%s: drm_i915_irq_emit: %d\n", __func__, ret);
Eric Anholtd70d6052009-10-06 12:40:42 -0700277 abort();
278 }
Eric Anholt869d8be2008-09-06 03:07:41 +0100279
Eric Anholtd70d6052009-10-06 12:40:42 -0700280 DBG("emit 0x%08x\n", seq);
281 return seq;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700282}
283
284static void
Eric Anholt4b982642008-10-30 09:33:07 -0700285_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700286{
Eric Anholtd70d6052009-10-06 12:40:42 -0700287 struct drm_i915_irq_wait iw;
288 int hw_seq, busy_count = 0;
289 int ret;
290 int kernel_lied;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700291
Eric Anholtd70d6052009-10-06 12:40:42 -0700292 if (bufmgr_fake->fence_wait != NULL) {
293 bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
294 clear_fenced(bufmgr_fake, seq);
295 return;
296 }
Eric Anholtf9d98be2008-09-08 08:51:40 -0700297
Eric Anholtd70d6052009-10-06 12:40:42 -0700298 iw.irq_seq = seq;
Eric Anholt0dccf012008-09-23 10:48:39 -0700299
Eric Anholt58e54f62010-05-25 20:11:23 -0700300 DBG("wait 0x%08x\n", iw.irq_seq);
301
Eric Anholtd70d6052009-10-06 12:40:42 -0700302 /* The kernel IRQ_WAIT implementation is all sorts of broken.
303 * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
304 * unsigned range.
305 * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
306 * signed range.
307 * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
308 * signed range.
309 * 4) It returns -EBUSY in 3 seconds even if the hardware is still
310 * successfully chewing through buffers.
311 *
312 * Assume that in userland we treat sequence numbers as ints, which
313 * makes some of the comparisons convenient, since the sequence
314 * numbers are all postive signed integers.
315 *
316 * From this we get several cases we need to handle. Here's a timeline.
317 * 0x2 0x7 0x7ffffff8 0x7ffffffd
318 * | | | |
319 * ------------------------------------------------------------
320 *
321 * A) Normal wait for hw to catch up
322 * hw_seq seq
323 * | |
324 * ------------------------------------------------------------
325 * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to
326 * catch up.
327 *
328 * B) Normal wait for a sequence number that's already passed.
329 * seq hw_seq
330 * | |
331 * ------------------------------------------------------------
332 * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
333 *
334 * C) Hardware has already wrapped around ahead of us
335 * hw_seq seq
336 * | |
337 * ------------------------------------------------------------
338 * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
339 * for hw_seq >= seq, which may never occur. Thus, we want to catch
340 * this in userland and return 0.
341 *
342 * D) We've wrapped around ahead of the hardware.
343 * seq hw_seq
344 * | |
345 * ------------------------------------------------------------
346 * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would
347 * return 0 quickly because hw_seq >= seq, even though the hardware
348 * isn't caught up. Thus, we need to catch this early return in
349 * userland and bother the kernel until the hardware really does
350 * catch up.
351 *
352 * E) Hardware might wrap after we test in userland.
353 * hw_seq seq
354 * | |
355 * ------------------------------------------------------------
356 * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >=
357 * hw_seq and wait. However, suppose hw_seq wraps before we make it
358 * into the kernel. The kernel sees hw_seq >= seq and waits for 3
359 * seconds then returns -EBUSY. This is case C). We should catch
360 * this and then return successfully.
361 *
362 * F) Hardware might take a long time on a buffer.
363 * hw_seq seq
364 * | |
365 * -------------------------------------------------------------------
366 * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5
367 * take too long, it will return -EBUSY. Batchbuffers in the
368 * gltestperf demo were seen to take up to 7 seconds. We should
369 * catch early -EBUSY return and keep trying.
370 */
Eric Anholt2db8e0c2008-09-23 17:06:01 -0700371
Eric Anholtd70d6052009-10-06 12:40:42 -0700372 do {
373 /* Keep a copy of last_dispatch so that if the wait -EBUSYs
374 * because the hardware didn't catch up in 3 seconds, we can
375 * see if it at least made progress and retry.
376 */
377 hw_seq = *bufmgr_fake->last_dispatch;
Eric Anholt0dccf012008-09-23 10:48:39 -0700378
Eric Anholtd70d6052009-10-06 12:40:42 -0700379 /* Catch case C */
380 if (seq - hw_seq > 0x40000000)
381 return;
Eric Anholt0dccf012008-09-23 10:48:39 -0700382
Eric Anholtd70d6052009-10-06 12:40:42 -0700383 ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
384 &iw, sizeof(iw));
385 /* Catch case D */
386 kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
387 -0x40000000);
Eric Anholt0dccf012008-09-23 10:48:39 -0700388
Eric Anholtd70d6052009-10-06 12:40:42 -0700389 /* Catch case E */
390 if (ret == -EBUSY
391 && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
392 ret = 0;
Eric Anholt2db8e0c2008-09-23 17:06:01 -0700393
Eric Anholtd70d6052009-10-06 12:40:42 -0700394 /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
395 if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
396 busy_count = 0;
397 else
398 busy_count++;
399 } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
400 (ret == -EBUSY && busy_count < 5));
Eric Anholt869d8be2008-09-06 03:07:41 +0100401
Eric Anholtd70d6052009-10-06 12:40:42 -0700402 if (ret != 0) {
403 drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
404 __LINE__, strerror(-ret));
405 abort();
406 }
407 clear_fenced(bufmgr_fake, seq);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700408}
409
Eric Anholtc4857422008-06-03 10:20:49 -0700410static int
Eric Anholt4b982642008-10-30 09:33:07 -0700411_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700412{
Eric Anholtd70d6052009-10-06 12:40:42 -0700413 /* Slight problem with wrap-around:
414 */
415 return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700416}
417
418/**
419 * Allocate a memory manager block for the buffer.
420 */
Eric Anholtc4857422008-06-03 10:20:49 -0700421static int
Eric Anholt4b982642008-10-30 09:33:07 -0700422alloc_block(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700423{
Eric Anholtd70d6052009-10-06 12:40:42 -0700424 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
425 drm_intel_bufmgr_fake *bufmgr_fake =
426 (drm_intel_bufmgr_fake *) bo->bufmgr;
427 struct block *block = (struct block *)calloc(sizeof *block, 1);
428 unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
429 unsigned int sz;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700430
Eric Anholtd70d6052009-10-06 12:40:42 -0700431 if (!block)
432 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700433
Eric Anholtd70d6052009-10-06 12:40:42 -0700434 sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700435
Eric Anholtd70d6052009-10-06 12:40:42 -0700436 block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
437 if (!block->mem) {
438 free(block);
439 return 0;
440 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700441
Eric Anholtd70d6052009-10-06 12:40:42 -0700442 DRMINITLISTHEAD(block);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700443
Eric Anholtd70d6052009-10-06 12:40:42 -0700444 /* Insert at head or at tail??? */
445 DRMLISTADDTAIL(block, &bufmgr_fake->lru);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700446
Eric Anholtd70d6052009-10-06 12:40:42 -0700447 block->virtual = (uint8_t *) bufmgr_fake->virtual +
448 block->mem->ofs - bufmgr_fake->low_offset;
449 block->bo = bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700450
Eric Anholtd70d6052009-10-06 12:40:42 -0700451 bo_fake->block = block;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700452
Eric Anholtd70d6052009-10-06 12:40:42 -0700453 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700454}
455
456/* Release the card storage associated with buf:
457 */
Eric Anholtd70d6052009-10-06 12:40:42 -0700458static void
459free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
460 int skip_dirty_copy)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700461{
Eric Anholtd70d6052009-10-06 12:40:42 -0700462 drm_intel_bo_fake *bo_fake;
463 DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
464 block->on_hardware, block->fenced);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700465
Eric Anholtd70d6052009-10-06 12:40:42 -0700466 if (!block)
467 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700468
Eric Anholtd70d6052009-10-06 12:40:42 -0700469 bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholtefa485b2009-02-24 21:36:56 -0800470
Eric Anholtd70d6052009-10-06 12:40:42 -0700471 if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
472 skip_dirty_copy = 1;
Eric Anholtefa485b2009-02-24 21:36:56 -0800473
Eric Anholtd70d6052009-10-06 12:40:42 -0700474 if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
475 memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
476 bo_fake->card_dirty = 0;
477 bo_fake->dirty = 1;
478 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700479
Eric Anholtd70d6052009-10-06 12:40:42 -0700480 if (block->on_hardware) {
481 block->bo = NULL;
482 } else if (block->fenced) {
483 block->bo = NULL;
484 } else {
485 DBG(" - free immediately\n");
486 DRMLISTDEL(block);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700487
Eric Anholtd70d6052009-10-06 12:40:42 -0700488 mmFreeMem(block->mem);
489 free(block);
490 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700491}
492
493static void
Eric Anholt4b982642008-10-30 09:33:07 -0700494alloc_backing_store(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700495{
Eric Anholtd70d6052009-10-06 12:40:42 -0700496 drm_intel_bufmgr_fake *bufmgr_fake =
497 (drm_intel_bufmgr_fake *) bo->bufmgr;
498 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
499 assert(!bo_fake->backing_store);
500 assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700501
Eric Anholtd70d6052009-10-06 12:40:42 -0700502 bo_fake->backing_store = malloc(bo->size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700503
Thierry Reding3d7a51e2014-04-08 22:18:18 +0200504 DBG("alloc_backing - buf %d %p %lu\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -0700505 bo_fake->backing_store, bo->size);
506 assert(bo_fake->backing_store);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700507}
508
509static void
Eric Anholt4b982642008-10-30 09:33:07 -0700510free_backing_store(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700511{
Eric Anholtd70d6052009-10-06 12:40:42 -0700512 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700513
Eric Anholtd70d6052009-10-06 12:40:42 -0700514 if (bo_fake->backing_store) {
515 assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
516 free(bo_fake->backing_store);
517 bo_fake->backing_store = NULL;
518 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700519}
520
521static void
Eric Anholt4b982642008-10-30 09:33:07 -0700522set_dirty(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700523{
Eric Anholtd70d6052009-10-06 12:40:42 -0700524 drm_intel_bufmgr_fake *bufmgr_fake =
525 (drm_intel_bufmgr_fake *) bo->bufmgr;
526 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700527
Eric Anholtd70d6052009-10-06 12:40:42 -0700528 if (bo_fake->flags & BM_NO_BACKING_STORE
529 && bo_fake->invalidate_cb != NULL)
530 bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700531
Eric Anholtd70d6052009-10-06 12:40:42 -0700532 assert(!(bo_fake->flags & BM_PINNED));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700533
Eric Anholtd70d6052009-10-06 12:40:42 -0700534 DBG("set_dirty - buf %d\n", bo_fake->id);
535 bo_fake->dirty = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700536}
537
Eric Anholtc4857422008-06-03 10:20:49 -0700538static int
Eric Anholt4b982642008-10-30 09:33:07 -0700539evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700540{
Eric Anholtd70d6052009-10-06 12:40:42 -0700541 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700542
Emil Velikov41eb1312015-04-05 16:50:33 +0100543 DBG("%s\n", __func__);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700544
Eric Anholtd70d6052009-10-06 12:40:42 -0700545 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
546 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700547
Eric Anholtd70d6052009-10-06 12:40:42 -0700548 if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
549 continue;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700550
Eric Anholtd70d6052009-10-06 12:40:42 -0700551 if (block->fence && max_fence && !FENCE_LTE(block->fence,
552 max_fence))
553 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700554
Eric Anholtd70d6052009-10-06 12:40:42 -0700555 set_dirty(&bo_fake->bo);
556 bo_fake->block = NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700557
Eric Anholtd70d6052009-10-06 12:40:42 -0700558 free_block(bufmgr_fake, block, 0);
559 return 1;
560 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700561
Eric Anholtd70d6052009-10-06 12:40:42 -0700562 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700563}
564
Eric Anholtc4857422008-06-03 10:20:49 -0700565static int
Eric Anholt4b982642008-10-30 09:33:07 -0700566evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700567{
Eric Anholtd70d6052009-10-06 12:40:42 -0700568 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700569
Emil Velikov41eb1312015-04-05 16:50:33 +0100570 DBG("%s\n", __func__);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700571
Eric Anholtd70d6052009-10-06 12:40:42 -0700572 DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
573 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700574
Eric Anholtd70d6052009-10-06 12:40:42 -0700575 if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
576 continue;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700577
Eric Anholtd70d6052009-10-06 12:40:42 -0700578 set_dirty(&bo_fake->bo);
579 bo_fake->block = NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700580
Eric Anholtd70d6052009-10-06 12:40:42 -0700581 free_block(bufmgr_fake, block, 0);
582 return 1;
583 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700584
Eric Anholtd70d6052009-10-06 12:40:42 -0700585 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700586}
587
588/**
589 * Removes all objects from the fenced list older than the given fence.
590 */
Eric Anholtd70d6052009-10-06 12:40:42 -0700591static int
592clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700593{
Eric Anholtd70d6052009-10-06 12:40:42 -0700594 struct block *block, *tmp;
595 int ret = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700596
Eric Anholtd70d6052009-10-06 12:40:42 -0700597 bufmgr_fake->last_fence = fence_cookie;
598 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
599 assert(block->fenced);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700600
Eric Anholtd70d6052009-10-06 12:40:42 -0700601 if (_fence_test(bufmgr_fake, block->fence)) {
Eric Anholt6a9eb082008-06-03 09:27:37 -0700602
Eric Anholtd70d6052009-10-06 12:40:42 -0700603 block->fenced = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700604
Eric Anholtd70d6052009-10-06 12:40:42 -0700605 if (!block->bo) {
606 DBG("delayed free: offset %x sz %x\n",
607 block->mem->ofs, block->mem->size);
608 DRMLISTDEL(block);
609 mmFreeMem(block->mem);
610 free(block);
611 } else {
612 DBG("return to lru: offset %x sz %x\n",
613 block->mem->ofs, block->mem->size);
614 DRMLISTDEL(block);
615 DRMLISTADDTAIL(block, &bufmgr_fake->lru);
616 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700617
Eric Anholtd70d6052009-10-06 12:40:42 -0700618 ret = 1;
619 } else {
620 /* Blocks are ordered by fence, so if one fails, all
621 * from here will fail also:
622 */
623 DBG("fence not passed: offset %x sz %x %d %d \n",
624 block->mem->ofs, block->mem->size, block->fence,
625 bufmgr_fake->last_fence);
626 break;
627 }
628 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700629
Emil Velikov41eb1312015-04-05 16:50:33 +0100630 DBG("%s: %d\n", __func__, ret);
Eric Anholtd70d6052009-10-06 12:40:42 -0700631 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700632}
633
Eric Anholtd70d6052009-10-06 12:40:42 -0700634static void
635fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700636{
Eric Anholtd70d6052009-10-06 12:40:42 -0700637 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700638
Eric Anholtd70d6052009-10-06 12:40:42 -0700639 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
640 DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
641 block, block->mem->size, block->mem->ofs, block->bo, fence);
642 block->fence = fence;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700643
Eric Anholtd70d6052009-10-06 12:40:42 -0700644 block->on_hardware = 0;
645 block->fenced = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700646
Eric Anholtd70d6052009-10-06 12:40:42 -0700647 /* Move to tail of pending list here
648 */
649 DRMLISTDEL(block);
650 DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
651 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700652
Eric Anholtd70d6052009-10-06 12:40:42 -0700653 assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700654}
655
Eric Anholtd70d6052009-10-06 12:40:42 -0700656static int
657evict_and_alloc_block(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700658{
Eric Anholtd70d6052009-10-06 12:40:42 -0700659 drm_intel_bufmgr_fake *bufmgr_fake =
660 (drm_intel_bufmgr_fake *) bo->bufmgr;
661 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700662
Eric Anholtd70d6052009-10-06 12:40:42 -0700663 assert(bo_fake->block == NULL);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700664
Eric Anholtd70d6052009-10-06 12:40:42 -0700665 /* Search for already free memory:
666 */
667 if (alloc_block(bo))
668 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700669
Eric Anholtd70d6052009-10-06 12:40:42 -0700670 /* If we're not thrashing, allow lru eviction to dig deeper into
671 * recently used textures. We'll probably be thrashing soon:
672 */
673 if (!bufmgr_fake->thrashing) {
674 while (evict_lru(bufmgr_fake, 0))
675 if (alloc_block(bo))
676 return 1;
677 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700678
Eric Anholtd70d6052009-10-06 12:40:42 -0700679 /* Keep thrashing counter alive?
680 */
681 if (bufmgr_fake->thrashing)
682 bufmgr_fake->thrashing = 20;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700683
Eric Anholtd70d6052009-10-06 12:40:42 -0700684 /* Wait on any already pending fences - here we are waiting for any
685 * freed memory that has been submitted to hardware and fenced to
686 * become available:
687 */
688 while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
689 uint32_t fence = bufmgr_fake->fenced.next->fence;
690 _fence_wait_internal(bufmgr_fake, fence);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700691
Eric Anholtd70d6052009-10-06 12:40:42 -0700692 if (alloc_block(bo))
693 return 1;
694 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700695
Eric Anholtd70d6052009-10-06 12:40:42 -0700696 if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
697 while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
698 uint32_t fence = bufmgr_fake->fenced.next->fence;
699 _fence_wait_internal(bufmgr_fake, fence);
700 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700701
Eric Anholtd70d6052009-10-06 12:40:42 -0700702 if (!bufmgr_fake->thrashing) {
703 DBG("thrashing\n");
704 }
705 bufmgr_fake->thrashing = 20;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700706
Eric Anholtd70d6052009-10-06 12:40:42 -0700707 if (alloc_block(bo))
708 return 1;
709 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700710
Eric Anholtd70d6052009-10-06 12:40:42 -0700711 while (evict_mru(bufmgr_fake))
712 if (alloc_block(bo))
713 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700714
Emil Velikov41eb1312015-04-05 16:50:33 +0100715 DBG("%s 0x%lx bytes failed\n", __func__, bo->size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700716
Eric Anholtd70d6052009-10-06 12:40:42 -0700717 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700718}
719
720/***********************************************************************
721 * Public functions
722 */
723
724/**
725 * Wait for hardware idle by emitting a fence and waiting for it.
726 */
727static void
Eric Anholt4b982642008-10-30 09:33:07 -0700728drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700729{
Eric Anholtd70d6052009-10-06 12:40:42 -0700730 unsigned int cookie;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700731
Eric Anholtd70d6052009-10-06 12:40:42 -0700732 cookie = _fence_emit_internal(bufmgr_fake);
733 _fence_wait_internal(bufmgr_fake, cookie);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700734}
735
736/**
737 * Wait for rendering to a buffer to complete.
738 *
739 * It is assumed that the bathcbuffer which performed the rendering included
740 * the necessary flushing.
741 */
742static void
Eric Anholt4b982642008-10-30 09:33:07 -0700743drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700744{
Eric Anholtd70d6052009-10-06 12:40:42 -0700745 drm_intel_bufmgr_fake *bufmgr_fake =
746 (drm_intel_bufmgr_fake *) bo->bufmgr;
747 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700748
Eric Anholtd70d6052009-10-06 12:40:42 -0700749 if (bo_fake->block == NULL || !bo_fake->block->fenced)
750 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700751
Eric Anholtd70d6052009-10-06 12:40:42 -0700752 _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
Eric Anholt3e03d782008-10-13 13:41:10 -0700753}
Eric Anholt6df7b072008-06-12 23:22:26 -0700754
Eric Anholt3e03d782008-10-13 13:41:10 -0700755static void
Eric Anholt4b982642008-10-30 09:33:07 -0700756drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700757{
Eric Anholtd70d6052009-10-06 12:40:42 -0700758 drm_intel_bufmgr_fake *bufmgr_fake =
759 (drm_intel_bufmgr_fake *) bo->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700760
Eric Anholtd70d6052009-10-06 12:40:42 -0700761 pthread_mutex_lock(&bufmgr_fake->lock);
762 drm_intel_fake_bo_wait_rendering_locked(bo);
763 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700764}
765
766/* Specifically ignore texture memory sharing.
767 * -- just evict everything
768 * -- and wait for idle
769 */
Emil Velikov0f8da822015-03-31 22:32:11 +0100770void
Eric Anholt4b982642008-10-30 09:33:07 -0700771drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700772{
Eric Anholtd70d6052009-10-06 12:40:42 -0700773 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
774 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700775
Eric Anholtd70d6052009-10-06 12:40:42 -0700776 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700777
Eric Anholtd70d6052009-10-06 12:40:42 -0700778 bufmgr_fake->need_fence = 1;
779 bufmgr_fake->fail = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700780
Eric Anholtd70d6052009-10-06 12:40:42 -0700781 /* Wait for hardware idle. We don't know where acceleration has been
782 * happening, so we'll need to wait anyway before letting anything get
783 * put on the card again.
784 */
785 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700786
Eric Anholtd70d6052009-10-06 12:40:42 -0700787 /* Check that we hadn't released the lock without having fenced the last
788 * set of buffers.
789 */
790 assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
791 assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700792
Eric Anholtd70d6052009-10-06 12:40:42 -0700793 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
794 assert(_fence_test(bufmgr_fake, block->fence));
795 set_dirty(block->bo);
796 }
Eric Anholt6df7b072008-06-12 23:22:26 -0700797
Eric Anholtd70d6052009-10-06 12:40:42 -0700798 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700799}
800
Eric Anholt4b982642008-10-30 09:33:07 -0700801static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700802drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
803 const char *name,
804 unsigned long size,
805 unsigned int alignment)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700806{
Eric Anholtd70d6052009-10-06 12:40:42 -0700807 drm_intel_bufmgr_fake *bufmgr_fake;
808 drm_intel_bo_fake *bo_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700809
Eric Anholtd70d6052009-10-06 12:40:42 -0700810 bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700811
Eric Anholtd70d6052009-10-06 12:40:42 -0700812 assert(size != 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700813
Eric Anholtd70d6052009-10-06 12:40:42 -0700814 bo_fake = calloc(1, sizeof(*bo_fake));
815 if (!bo_fake)
816 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700817
Eric Anholtd70d6052009-10-06 12:40:42 -0700818 bo_fake->bo.size = size;
819 bo_fake->bo.offset = -1;
820 bo_fake->bo.virtual = NULL;
821 bo_fake->bo.bufmgr = bufmgr;
822 bo_fake->refcount = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700823
Eric Anholtd70d6052009-10-06 12:40:42 -0700824 /* Alignment must be a power of two */
825 assert((alignment & (alignment - 1)) == 0);
826 if (alignment == 0)
827 alignment = 1;
828 bo_fake->alignment = alignment;
829 bo_fake->id = ++bufmgr_fake->buf_nr;
830 bo_fake->name = name;
831 bo_fake->flags = 0;
832 bo_fake->is_static = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700833
Thierry Reding3d7a51e2014-04-08 22:18:18 +0200834 DBG("drm_bo_alloc: (buf %d: %s, %lu kb)\n", bo_fake->id, bo_fake->name,
Eric Anholtd70d6052009-10-06 12:40:42 -0700835 bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700836
Eric Anholtd70d6052009-10-06 12:40:42 -0700837 return &bo_fake->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700838}
839
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700840static drm_intel_bo *
841drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
842 const char *name,
843 int x, int y, int cpp,
844 uint32_t *tiling_mode,
845 unsigned long *pitch,
846 unsigned long flags)
847{
848 unsigned long stride, aligned_y;
849
850 /* No runtime tiling support for fake. */
851 *tiling_mode = I915_TILING_NONE;
852
853 /* Align it for being a render target. Shouldn't need anything else. */
854 stride = x * cpp;
855 stride = ROUND_UP_TO(stride, 64);
856
857 /* 965 subspan loading alignment */
858 aligned_y = ALIGN(y, 2);
859
860 *pitch = stride;
861
862 return drm_intel_fake_bo_alloc(bufmgr, name, stride * aligned_y,
863 4096);
864}
865
Emil Velikov0f8da822015-03-31 22:32:11 +0100866drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700867drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
868 const char *name,
869 unsigned long offset,
870 unsigned long size, void *virtual)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700871{
Eric Anholtd70d6052009-10-06 12:40:42 -0700872 drm_intel_bufmgr_fake *bufmgr_fake;
873 drm_intel_bo_fake *bo_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700874
Eric Anholtd70d6052009-10-06 12:40:42 -0700875 bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700876
Eric Anholtd70d6052009-10-06 12:40:42 -0700877 assert(size != 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700878
Eric Anholtd70d6052009-10-06 12:40:42 -0700879 bo_fake = calloc(1, sizeof(*bo_fake));
880 if (!bo_fake)
881 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700882
Eric Anholtd70d6052009-10-06 12:40:42 -0700883 bo_fake->bo.size = size;
884 bo_fake->bo.offset = offset;
885 bo_fake->bo.virtual = virtual;
886 bo_fake->bo.bufmgr = bufmgr;
887 bo_fake->refcount = 1;
888 bo_fake->id = ++bufmgr_fake->buf_nr;
889 bo_fake->name = name;
890 bo_fake->flags = BM_PINNED;
891 bo_fake->is_static = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700892
Thierry Reding3d7a51e2014-04-08 22:18:18 +0200893 DBG("drm_bo_alloc_static: (buf %d: %s, %lu kb)\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -0700894 bo_fake->name, bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700895
Eric Anholtd70d6052009-10-06 12:40:42 -0700896 return &bo_fake->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700897}
898
899static void
Eric Anholt4b982642008-10-30 09:33:07 -0700900drm_intel_fake_bo_reference(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700901{
Eric Anholtd70d6052009-10-06 12:40:42 -0700902 drm_intel_bufmgr_fake *bufmgr_fake =
903 (drm_intel_bufmgr_fake *) bo->bufmgr;
904 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6df7b072008-06-12 23:22:26 -0700905
Eric Anholtd70d6052009-10-06 12:40:42 -0700906 pthread_mutex_lock(&bufmgr_fake->lock);
907 bo_fake->refcount++;
908 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700909}
910
911static void
Eric Anholt4b982642008-10-30 09:33:07 -0700912drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
Eric Anholt6df7b072008-06-12 23:22:26 -0700913{
Eric Anholtd70d6052009-10-06 12:40:42 -0700914 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700915
Eric Anholtd70d6052009-10-06 12:40:42 -0700916 bo_fake->refcount++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700917}
918
919static void
Eric Anholt4b982642008-10-30 09:33:07 -0700920drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700921{
Eric Anholtd70d6052009-10-06 12:40:42 -0700922 drm_intel_bufmgr_fake *bufmgr_fake =
923 (drm_intel_bufmgr_fake *) bo->bufmgr;
924 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
925 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700926
Eric Anholtd70d6052009-10-06 12:40:42 -0700927 if (--bo_fake->refcount == 0) {
928 assert(bo_fake->map_count == 0);
929 /* No remaining references, so free it */
930 if (bo_fake->block)
931 free_block(bufmgr_fake, bo_fake->block, 1);
932 free_backing_store(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700933
Eric Anholtd70d6052009-10-06 12:40:42 -0700934 for (i = 0; i < bo_fake->nr_relocs; i++)
935 drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
936 target_buf);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700937
Eric Anholtd70d6052009-10-06 12:40:42 -0700938 DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
939 bo_fake->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700940
Eric Anholtd70d6052009-10-06 12:40:42 -0700941 free(bo_fake->relocs);
942 free(bo);
943 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700944}
945
Eric Anholt6df7b072008-06-12 23:22:26 -0700946static void
Eric Anholt4b982642008-10-30 09:33:07 -0700947drm_intel_fake_bo_unreference(drm_intel_bo *bo)
Eric Anholt6df7b072008-06-12 23:22:26 -0700948{
Eric Anholtd70d6052009-10-06 12:40:42 -0700949 drm_intel_bufmgr_fake *bufmgr_fake =
950 (drm_intel_bufmgr_fake *) bo->bufmgr;
Eric Anholt6df7b072008-06-12 23:22:26 -0700951
Eric Anholtd70d6052009-10-06 12:40:42 -0700952 pthread_mutex_lock(&bufmgr_fake->lock);
953 drm_intel_fake_bo_unreference_locked(bo);
954 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700955}
956
Eric Anholt6a9eb082008-06-03 09:27:37 -0700957/**
958 * Set the buffer as not requiring backing store, and instead get the callback
959 * invoked whenever it would be set dirty.
960 */
Emil Velikov0f8da822015-03-31 22:32:11 +0100961void
Eric Anholtd70d6052009-10-06 12:40:42 -0700962drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
963 void (*invalidate_cb) (drm_intel_bo *bo,
964 void *ptr),
965 void *ptr)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700966{
Eric Anholtd70d6052009-10-06 12:40:42 -0700967 drm_intel_bufmgr_fake *bufmgr_fake =
968 (drm_intel_bufmgr_fake *) bo->bufmgr;
969 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700970
Eric Anholtd70d6052009-10-06 12:40:42 -0700971 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700972
Eric Anholtd70d6052009-10-06 12:40:42 -0700973 if (bo_fake->backing_store)
974 free_backing_store(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700975
Eric Anholtd70d6052009-10-06 12:40:42 -0700976 bo_fake->flags |= BM_NO_BACKING_STORE;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700977
Eric Anholtd70d6052009-10-06 12:40:42 -0700978 DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
979 bo_fake->dirty = 1;
980 bo_fake->invalidate_cb = invalidate_cb;
981 bo_fake->invalidate_ptr = ptr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700982
Eric Anholtd70d6052009-10-06 12:40:42 -0700983 /* Note that it is invalid right from the start. Also note
984 * invalidate_cb is called with the bufmgr locked, so cannot
985 * itself make bufmgr calls.
986 */
987 if (invalidate_cb != NULL)
988 invalidate_cb(bo, ptr);
Eric Anholt6df7b072008-06-12 23:22:26 -0700989
Eric Anholtd70d6052009-10-06 12:40:42 -0700990 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700991}
992
993/**
994 * Map a buffer into bo->virtual, allocating either card memory space (If
995 * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
996 */
997static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700998 drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700999{
Eric Anholtd70d6052009-10-06 12:40:42 -07001000 drm_intel_bufmgr_fake *bufmgr_fake =
1001 (drm_intel_bufmgr_fake *) bo->bufmgr;
1002 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001003
Eric Anholtd70d6052009-10-06 12:40:42 -07001004 /* Static buffers are always mapped. */
1005 if (bo_fake->is_static) {
1006 if (bo_fake->card_dirty) {
1007 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
1008 bo_fake->card_dirty = 0;
1009 }
1010 return 0;
1011 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001012
Eric Anholtd70d6052009-10-06 12:40:42 -07001013 /* Allow recursive mapping. Mesa may recursively map buffers with
1014 * nested display loops, and it is used internally in bufmgr_fake
1015 * for relocation.
1016 */
1017 if (bo_fake->map_count++ != 0)
1018 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001019
Eric Anholtd70d6052009-10-06 12:40:42 -07001020 {
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001021 DBG("drm_bo_map: (buf %d: %s, %lu kb)\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -07001022 bo_fake->name, bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001023
Eric Anholtd70d6052009-10-06 12:40:42 -07001024 if (bo->virtual != NULL) {
Emil Velikov41eb1312015-04-05 16:50:33 +01001025 drmMsg("%s: already mapped\n", __func__);
Eric Anholtd70d6052009-10-06 12:40:42 -07001026 abort();
1027 } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
Eric Anholt6a9eb082008-06-03 09:27:37 -07001028
Eric Anholtd70d6052009-10-06 12:40:42 -07001029 if (!bo_fake->block && !evict_and_alloc_block(bo)) {
Emil Velikov41eb1312015-04-05 16:50:33 +01001030 DBG("%s: alloc failed\n", __func__);
Eric Anholtd70d6052009-10-06 12:40:42 -07001031 bufmgr_fake->fail = 1;
1032 return 1;
1033 } else {
1034 assert(bo_fake->block);
1035 bo_fake->dirty = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001036
Eric Anholtd70d6052009-10-06 12:40:42 -07001037 if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
1038 bo_fake->block->fenced) {
1039 drm_intel_fake_bo_wait_rendering_locked
1040 (bo);
1041 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001042
Eric Anholtd70d6052009-10-06 12:40:42 -07001043 bo->virtual = bo_fake->block->virtual;
1044 }
1045 } else {
1046 if (write_enable)
1047 set_dirty(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001048
Eric Anholtd70d6052009-10-06 12:40:42 -07001049 if (bo_fake->backing_store == 0)
1050 alloc_backing_store(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001051
Eric Anholtd70d6052009-10-06 12:40:42 -07001052 if ((bo_fake->card_dirty == 1) && bo_fake->block) {
1053 if (bo_fake->block->fenced)
1054 drm_intel_fake_bo_wait_rendering_locked
1055 (bo);
Xiang, Haihao604759d2008-10-09 11:57:13 +08001056
Eric Anholtd70d6052009-10-06 12:40:42 -07001057 memcpy(bo_fake->backing_store,
1058 bo_fake->block->virtual,
1059 bo_fake->block->bo->size);
1060 bo_fake->card_dirty = 0;
1061 }
Xiang, Haihao073cb5e2008-09-27 11:01:24 +08001062
Eric Anholtd70d6052009-10-06 12:40:42 -07001063 bo->virtual = bo_fake->backing_store;
1064 }
1065 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001066
Eric Anholtd70d6052009-10-06 12:40:42 -07001067 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001068}
1069
1070static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001071 drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
Eric Anholt6df7b072008-06-12 23:22:26 -07001072{
Eric Anholtd70d6052009-10-06 12:40:42 -07001073 drm_intel_bufmgr_fake *bufmgr_fake =
1074 (drm_intel_bufmgr_fake *) bo->bufmgr;
1075 int ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001076
Eric Anholtd70d6052009-10-06 12:40:42 -07001077 pthread_mutex_lock(&bufmgr_fake->lock);
1078 ret = drm_intel_fake_bo_map_locked(bo, write_enable);
1079 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001080
Eric Anholtd70d6052009-10-06 12:40:42 -07001081 return ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001082}
1083
1084static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001085 drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001086{
Eric Anholtd70d6052009-10-06 12:40:42 -07001087 drm_intel_bufmgr_fake *bufmgr_fake =
1088 (drm_intel_bufmgr_fake *) bo->bufmgr;
1089 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001090
Eric Anholtd70d6052009-10-06 12:40:42 -07001091 /* Static buffers are always mapped. */
1092 if (bo_fake->is_static)
1093 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001094
Eric Anholtd70d6052009-10-06 12:40:42 -07001095 assert(bo_fake->map_count != 0);
1096 if (--bo_fake->map_count != 0)
1097 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001098
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001099 DBG("drm_bo_unmap: (buf %d: %s, %lu kb)\n", bo_fake->id, bo_fake->name,
Eric Anholtd70d6052009-10-06 12:40:42 -07001100 bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001101
Eric Anholtd70d6052009-10-06 12:40:42 -07001102 bo->virtual = NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001103
Eric Anholtd70d6052009-10-06 12:40:42 -07001104 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001105}
1106
Eric Anholtd70d6052009-10-06 12:40:42 -07001107static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
Eric Anholt6df7b072008-06-12 23:22:26 -07001108{
Eric Anholtd70d6052009-10-06 12:40:42 -07001109 drm_intel_bufmgr_fake *bufmgr_fake =
1110 (drm_intel_bufmgr_fake *) bo->bufmgr;
1111 int ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001112
Eric Anholtd70d6052009-10-06 12:40:42 -07001113 pthread_mutex_lock(&bufmgr_fake->lock);
1114 ret = drm_intel_fake_bo_unmap_locked(bo);
1115 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001116
Eric Anholtd70d6052009-10-06 12:40:42 -07001117 return ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001118}
1119
Eric Anholtf45305c2010-11-01 06:54:58 -07001120static int
1121drm_intel_fake_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1122 unsigned long size, const void *data)
1123{
1124 int ret;
1125
1126 if (size == 0 || data == NULL)
1127 return 0;
1128
1129 ret = drm_intel_bo_map(bo, 1);
1130 if (ret)
1131 return ret;
1132 memcpy((unsigned char *)bo->virtual + offset, data, size);
1133 drm_intel_bo_unmap(bo);
1134 return 0;
1135}
1136
Eric Anholt6a9eb082008-06-03 09:27:37 -07001137static void
Eric Anholtd70d6052009-10-06 12:40:42 -07001138 drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001139{
Eric Anholtd70d6052009-10-06 12:40:42 -07001140 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001141
Eric Anholtd70d6052009-10-06 12:40:42 -07001142 bufmgr_fake->performed_rendering = 0;
1143 /* okay for ever BO that is on the HW kick it off.
1144 seriously not afraid of the POLICE right now */
1145 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
1146 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001147
Eric Anholtd70d6052009-10-06 12:40:42 -07001148 block->on_hardware = 0;
1149 free_block(bufmgr_fake, block, 0);
1150 bo_fake->block = NULL;
1151 bo_fake->validated = 0;
1152 if (!(bo_fake->flags & BM_NO_BACKING_STORE))
1153 bo_fake->dirty = 1;
1154 }
Jesse Barnes9583c092008-12-10 15:47:28 -08001155
Eric Anholt6a9eb082008-06-03 09:27:37 -07001156}
1157
1158static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001159 drm_intel_fake_bo_validate(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001160{
Eric Anholtd70d6052009-10-06 12:40:42 -07001161 drm_intel_bufmgr_fake *bufmgr_fake;
1162 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001163
Eric Anholtd70d6052009-10-06 12:40:42 -07001164 bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001165
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001166 DBG("drm_bo_validate: (buf %d: %s, %lu kb)\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -07001167 bo_fake->name, bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001168
Eric Anholtd70d6052009-10-06 12:40:42 -07001169 /* Sanity check: Buffers should be unmapped before being validated.
1170 * This is not so much of a problem for bufmgr_fake, but TTM refuses,
1171 * and the problem is harder to debug there.
1172 */
1173 assert(bo_fake->map_count == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001174
Eric Anholtd70d6052009-10-06 12:40:42 -07001175 if (bo_fake->is_static) {
1176 /* Add it to the needs-fence list */
1177 bufmgr_fake->need_fence = 1;
1178 return 0;
1179 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001180
Eric Anholtd70d6052009-10-06 12:40:42 -07001181 /* Allocate the card memory */
1182 if (!bo_fake->block && !evict_and_alloc_block(bo)) {
1183 bufmgr_fake->fail = 1;
1184 DBG("Failed to validate buf %d:%s\n", bo_fake->id,
1185 bo_fake->name);
1186 return -1;
1187 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001188
Eric Anholtd70d6052009-10-06 12:40:42 -07001189 assert(bo_fake->block);
1190 assert(bo_fake->block->bo == &bo_fake->bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001191
Eric Anholtd70d6052009-10-06 12:40:42 -07001192 bo->offset = bo_fake->block->mem->ofs;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001193
Eric Anholtd70d6052009-10-06 12:40:42 -07001194 /* Upload the buffer contents if necessary */
1195 if (bo_fake->dirty) {
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001196 DBG("Upload dirty buf %d:%s, sz %lu offset 0x%x\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -07001197 bo_fake->name, bo->size, bo_fake->block->mem->ofs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001198
Eric Anholtd70d6052009-10-06 12:40:42 -07001199 assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
Eric Anholt6a9eb082008-06-03 09:27:37 -07001200
Eric Anholtd70d6052009-10-06 12:40:42 -07001201 /* Actually, should be able to just wait for a fence on the
1202 * mmory, hich we would be tracking when we free it. Waiting
1203 * for idle is a sufficiently large hammer for now.
1204 */
1205 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001206
Eric Anholtd70d6052009-10-06 12:40:42 -07001207 /* we may never have mapped this BO so it might not have any
1208 * backing store if this happens it should be rare, but 0 the
1209 * card memory in any case */
1210 if (bo_fake->backing_store)
1211 memcpy(bo_fake->block->virtual, bo_fake->backing_store,
1212 bo->size);
1213 else
1214 memset(bo_fake->block->virtual, 0, bo->size);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001215
Eric Anholtd70d6052009-10-06 12:40:42 -07001216 bo_fake->dirty = 0;
1217 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001218
Eric Anholtd70d6052009-10-06 12:40:42 -07001219 bo_fake->block->fenced = 0;
1220 bo_fake->block->on_hardware = 1;
1221 DRMLISTDEL(bo_fake->block);
1222 DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001223
Eric Anholtd70d6052009-10-06 12:40:42 -07001224 bo_fake->validated = 1;
1225 bufmgr_fake->need_fence = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001226
Eric Anholtd70d6052009-10-06 12:40:42 -07001227 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001228}
1229
1230static void
Eric Anholt4b982642008-10-30 09:33:07 -07001231drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001232{
Eric Anholtd70d6052009-10-06 12:40:42 -07001233 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1234 unsigned int cookie;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001235
Eric Anholtd70d6052009-10-06 12:40:42 -07001236 cookie = _fence_emit_internal(bufmgr_fake);
1237 fence_blocks(bufmgr_fake, cookie);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001238
Eric Anholtd70d6052009-10-06 12:40:42 -07001239 DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001240}
1241
1242static void
Eric Anholt4b982642008-10-30 09:33:07 -07001243drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001244{
Eric Anholtd70d6052009-10-06 12:40:42 -07001245 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001246
Eric Anholtd70d6052009-10-06 12:40:42 -07001247 pthread_mutex_destroy(&bufmgr_fake->lock);
1248 mmDestroy(bufmgr_fake->heap);
1249 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001250}
1251
1252static int
Eric Anholt4b982642008-10-30 09:33:07 -07001253drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1254 drm_intel_bo *target_bo, uint32_t target_offset,
1255 uint32_t read_domains, uint32_t write_domain)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001256{
Eric Anholtd70d6052009-10-06 12:40:42 -07001257 drm_intel_bufmgr_fake *bufmgr_fake =
1258 (drm_intel_bufmgr_fake *) bo->bufmgr;
1259 struct fake_buffer_reloc *r;
1260 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1261 drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
1262 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001263
Eric Anholtd70d6052009-10-06 12:40:42 -07001264 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001265
Eric Anholtd70d6052009-10-06 12:40:42 -07001266 assert(bo);
1267 assert(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001268
Eric Anholtd70d6052009-10-06 12:40:42 -07001269 if (bo_fake->relocs == NULL) {
1270 bo_fake->relocs =
1271 malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
1272 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001273
Eric Anholtd70d6052009-10-06 12:40:42 -07001274 r = &bo_fake->relocs[bo_fake->nr_relocs++];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001275
Eric Anholtd70d6052009-10-06 12:40:42 -07001276 assert(bo_fake->nr_relocs <= MAX_RELOCS);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001277
Eric Anholtd70d6052009-10-06 12:40:42 -07001278 drm_intel_fake_bo_reference_locked(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001279
Eric Anholtd70d6052009-10-06 12:40:42 -07001280 if (!target_fake->is_static) {
1281 bo_fake->child_size +=
1282 ALIGN(target_bo->size, target_fake->alignment);
1283 bo_fake->child_size += target_fake->child_size;
1284 }
1285 r->target_buf = target_bo;
1286 r->offset = offset;
1287 r->last_target_offset = target_bo->offset;
1288 r->delta = target_offset;
1289 r->read_domains = read_domains;
1290 r->write_domain = write_domain;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001291
Eric Anholtd70d6052009-10-06 12:40:42 -07001292 if (bufmgr_fake->debug) {
1293 /* Check that a conflicting relocation hasn't already been
1294 * emitted.
1295 */
1296 for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
1297 struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001298
Eric Anholtd70d6052009-10-06 12:40:42 -07001299 assert(r->offset != r2->offset);
1300 }
1301 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001302
Eric Anholtd70d6052009-10-06 12:40:42 -07001303 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001304
Eric Anholtd70d6052009-10-06 12:40:42 -07001305 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001306}
1307
1308/**
1309 * Incorporates the validation flags associated with each relocation into
1310 * the combined validation flags for the buffer on this batchbuffer submission.
1311 */
1312static void
Eric Anholt4b982642008-10-30 09:33:07 -07001313drm_intel_fake_calculate_domains(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001314{
Eric Anholtd70d6052009-10-06 12:40:42 -07001315 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1316 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001317
Eric Anholtd70d6052009-10-06 12:40:42 -07001318 for (i = 0; i < bo_fake->nr_relocs; i++) {
1319 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1320 drm_intel_bo_fake *target_fake =
1321 (drm_intel_bo_fake *) r->target_buf;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001322
Eric Anholtd70d6052009-10-06 12:40:42 -07001323 /* Do the same for the tree of buffers we depend on */
1324 drm_intel_fake_calculate_domains(r->target_buf);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001325
Eric Anholtd70d6052009-10-06 12:40:42 -07001326 target_fake->read_domains |= r->read_domains;
1327 target_fake->write_domain |= r->write_domain;
1328 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001329}
1330
Eric Anholt6a9eb082008-06-03 09:27:37 -07001331static int
Eric Anholt4b982642008-10-30 09:33:07 -07001332drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001333{
Eric Anholtd70d6052009-10-06 12:40:42 -07001334 drm_intel_bufmgr_fake *bufmgr_fake =
1335 (drm_intel_bufmgr_fake *) bo->bufmgr;
1336 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1337 int i, ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001338
Eric Anholtd70d6052009-10-06 12:40:42 -07001339 assert(bo_fake->map_count == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001340
Eric Anholtd70d6052009-10-06 12:40:42 -07001341 for (i = 0; i < bo_fake->nr_relocs; i++) {
1342 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1343 drm_intel_bo_fake *target_fake =
1344 (drm_intel_bo_fake *) r->target_buf;
1345 uint32_t reloc_data;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001346
Eric Anholtd70d6052009-10-06 12:40:42 -07001347 /* Validate the target buffer if that hasn't been done. */
1348 if (!target_fake->validated) {
1349 ret =
1350 drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
1351 if (ret != 0) {
1352 if (bo->virtual != NULL)
1353 drm_intel_fake_bo_unmap_locked(bo);
1354 return ret;
1355 }
1356 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001357
Eric Anholtd70d6052009-10-06 12:40:42 -07001358 /* Calculate the value of the relocation entry. */
1359 if (r->target_buf->offset != r->last_target_offset) {
1360 reloc_data = r->target_buf->offset + r->delta;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001361
Eric Anholtd70d6052009-10-06 12:40:42 -07001362 if (bo->virtual == NULL)
1363 drm_intel_fake_bo_map_locked(bo, 1);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001364
Eric Anholtd70d6052009-10-06 12:40:42 -07001365 *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
1366 reloc_data;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001367
Eric Anholtd70d6052009-10-06 12:40:42 -07001368 r->last_target_offset = r->target_buf->offset;
1369 }
1370 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001371
Eric Anholtd70d6052009-10-06 12:40:42 -07001372 if (bo->virtual != NULL)
1373 drm_intel_fake_bo_unmap_locked(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001374
Eric Anholtd70d6052009-10-06 12:40:42 -07001375 if (bo_fake->write_domain != 0) {
1376 if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
1377 if (bo_fake->backing_store == 0)
1378 alloc_backing_store(bo);
1379 }
1380 bo_fake->card_dirty = 1;
1381 bufmgr_fake->performed_rendering = 1;
1382 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001383
Eric Anholtd70d6052009-10-06 12:40:42 -07001384 return drm_intel_fake_bo_validate(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001385}
1386
Eric Anholt6a9eb082008-06-03 09:27:37 -07001387static void
Eric Anholt4b982642008-10-30 09:33:07 -07001388drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001389{
Eric Anholtd70d6052009-10-06 12:40:42 -07001390 drm_intel_bufmgr_fake *bufmgr_fake =
1391 (drm_intel_bufmgr_fake *) bo->bufmgr;
1392 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1393 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001394
Eric Anholtd70d6052009-10-06 12:40:42 -07001395 for (i = 0; i < bo_fake->nr_relocs; i++) {
1396 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1397 drm_intel_bo_fake *target_fake =
1398 (drm_intel_bo_fake *) r->target_buf;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001399
Eric Anholtd70d6052009-10-06 12:40:42 -07001400 if (target_fake->validated)
1401 drm_intel_bo_fake_post_submit(r->target_buf);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001402
Eric Anholtd70d6052009-10-06 12:40:42 -07001403 DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
1404 bo_fake->name, (uint32_t) bo->offset, r->offset,
1405 target_fake->name, (uint32_t) r->target_buf->offset,
1406 r->delta);
1407 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001408
Eric Anholtd70d6052009-10-06 12:40:42 -07001409 assert(bo_fake->map_count == 0);
1410 bo_fake->validated = 0;
1411 bo_fake->read_domains = 0;
1412 bo_fake->write_domain = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001413}
1414
Emil Velikov0f8da822015-03-31 22:32:11 +01001415void
Eric Anholtd70d6052009-10-06 12:40:42 -07001416drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
1417 int (*exec) (drm_intel_bo *bo,
1418 unsigned int used,
1419 void *priv),
Eric Anholt4b982642008-10-30 09:33:07 -07001420 void *priv)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001421{
Eric Anholtd70d6052009-10-06 12:40:42 -07001422 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001423
Eric Anholtd70d6052009-10-06 12:40:42 -07001424 bufmgr_fake->exec = exec;
1425 bufmgr_fake->exec_priv = priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001426}
1427
1428static int
Eric Anholt4b982642008-10-30 09:33:07 -07001429drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07001430 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholtf9d98be2008-09-08 08:51:40 -07001431{
Eric Anholtd70d6052009-10-06 12:40:42 -07001432 drm_intel_bufmgr_fake *bufmgr_fake =
1433 (drm_intel_bufmgr_fake *) bo->bufmgr;
1434 drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
1435 struct drm_i915_batchbuffer batch;
1436 int ret;
1437 int retry_count = 0;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001438
Eric Anholtd70d6052009-10-06 12:40:42 -07001439 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001440
Eric Anholtd70d6052009-10-06 12:40:42 -07001441 bufmgr_fake->performed_rendering = 0;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001442
Eric Anholtd70d6052009-10-06 12:40:42 -07001443 drm_intel_fake_calculate_domains(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001444
Eric Anholtd70d6052009-10-06 12:40:42 -07001445 batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001446
Eric Anholtd70d6052009-10-06 12:40:42 -07001447 /* we've ran out of RAM so blow the whole lot away and retry */
1448restart:
1449 ret = drm_intel_fake_reloc_and_validate_buffer(bo);
1450 if (bufmgr_fake->fail == 1) {
1451 if (retry_count == 0) {
1452 retry_count++;
1453 drm_intel_fake_kick_all_locked(bufmgr_fake);
1454 bufmgr_fake->fail = 0;
1455 goto restart;
1456 } else /* dump out the memory here */
1457 mmDumpMemInfo(bufmgr_fake->heap);
1458 }
Eric Anholtf9d98be2008-09-08 08:51:40 -07001459
Eric Anholtd70d6052009-10-06 12:40:42 -07001460 assert(ret == 0);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001461
Eric Anholtd70d6052009-10-06 12:40:42 -07001462 if (bufmgr_fake->exec != NULL) {
1463 int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
1464 if (ret != 0) {
1465 pthread_mutex_unlock(&bufmgr_fake->lock);
1466 return ret;
1467 }
1468 } else {
1469 batch.start = bo->offset;
1470 batch.used = used;
1471 batch.cliprects = cliprects;
1472 batch.num_cliprects = num_cliprects;
1473 batch.DR1 = 0;
1474 batch.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001475
Eric Anholtd70d6052009-10-06 12:40:42 -07001476 if (drmCommandWrite
1477 (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
1478 sizeof(batch))) {
1479 drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
1480 pthread_mutex_unlock(&bufmgr_fake->lock);
1481 return -errno;
1482 }
1483 }
Eric Anholtf9d98be2008-09-08 08:51:40 -07001484
Eric Anholtd70d6052009-10-06 12:40:42 -07001485 drm_intel_fake_fence_validated(bo->bufmgr);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001486
Eric Anholtd70d6052009-10-06 12:40:42 -07001487 drm_intel_bo_fake_post_submit(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001488
Eric Anholtd70d6052009-10-06 12:40:42 -07001489 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001490
Eric Anholtd70d6052009-10-06 12:40:42 -07001491 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001492}
1493
Eric Anholt46e92742008-08-08 13:13:46 -07001494/**
1495 * Return an error if the list of BOs will exceed the aperture size.
1496 *
1497 * This is a rough guess and likely to fail, as during the validate sequence we
1498 * may place a buffer in an inopportune spot early on and then fail to fit
1499 * a set smaller than the aperture.
1500 */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001501static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001502drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001503{
Eric Anholtd70d6052009-10-06 12:40:42 -07001504 drm_intel_bufmgr_fake *bufmgr_fake =
1505 (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
1506 unsigned int sz = 0;
1507 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001508
Eric Anholtd70d6052009-10-06 12:40:42 -07001509 for (i = 0; i < count; i++) {
1510 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001511
Eric Anholtd70d6052009-10-06 12:40:42 -07001512 if (bo_fake == NULL)
1513 continue;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001514
Eric Anholtd70d6052009-10-06 12:40:42 -07001515 if (!bo_fake->is_static)
1516 sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
1517 sz += bo_fake->child_size;
1518 }
Eric Anholt46e92742008-08-08 13:13:46 -07001519
Eric Anholtd70d6052009-10-06 12:40:42 -07001520 if (sz > bufmgr_fake->size) {
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001521 DBG("check_space: overflowed bufmgr size, %ukb vs %lukb\n",
Eric Anholtd70d6052009-10-06 12:40:42 -07001522 sz / 1024, bufmgr_fake->size / 1024);
1523 return -1;
1524 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001525
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001526 DBG("drm_check_space: sz %ukb vs bufgr %lukb\n", sz / 1024,
Eric Anholtd70d6052009-10-06 12:40:42 -07001527 bufmgr_fake->size / 1024);
1528 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001529}
1530
Eric Anholtd198e9b2008-06-05 08:44:46 -07001531/**
1532 * Evicts all buffers, waiting for fences to pass and copying contents out
1533 * as necessary.
1534 *
1535 * Used by the X Server on LeaveVT, when the card memory is no longer our
1536 * own.
1537 */
Emil Velikov0f8da822015-03-31 22:32:11 +01001538void
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001539drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
Eric Anholtd198e9b2008-06-05 08:44:46 -07001540{
Eric Anholtd70d6052009-10-06 12:40:42 -07001541 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1542 struct block *block, *tmp;
Eric Anholtd198e9b2008-06-05 08:44:46 -07001543
Eric Anholtd70d6052009-10-06 12:40:42 -07001544 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001545
Eric Anholtd70d6052009-10-06 12:40:42 -07001546 bufmgr_fake->need_fence = 1;
1547 bufmgr_fake->fail = 0;
Eric Anholtd198e9b2008-06-05 08:44:46 -07001548
Eric Anholtd70d6052009-10-06 12:40:42 -07001549 /* Wait for hardware idle. We don't know where acceleration has been
1550 * happening, so we'll need to wait anyway before letting anything get
1551 * put on the card again.
1552 */
1553 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
Eric Anholtd198e9b2008-06-05 08:44:46 -07001554
Eric Anholtd70d6052009-10-06 12:40:42 -07001555 /* Check that we hadn't released the lock without having fenced the last
1556 * set of buffers.
1557 */
1558 assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
1559 assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
Eric Anholtd198e9b2008-06-05 08:44:46 -07001560
Eric Anholtd70d6052009-10-06 12:40:42 -07001561 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
1562 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
1563 /* Releases the memory, and memcpys dirty contents out if
1564 * necessary.
1565 */
1566 free_block(bufmgr_fake, block, 0);
1567 bo_fake->block = NULL;
1568 }
Eric Anholt6df7b072008-06-12 23:22:26 -07001569
Eric Anholtd70d6052009-10-06 12:40:42 -07001570 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholtd198e9b2008-06-05 08:44:46 -07001571}
Eric Anholtd70d6052009-10-06 12:40:42 -07001572
Emil Velikov0f8da822015-03-31 22:32:11 +01001573void
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001574drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
1575 volatile unsigned int
1576 *last_dispatch)
Eric Anholt869d8be2008-09-06 03:07:41 +01001577{
Eric Anholtd70d6052009-10-06 12:40:42 -07001578 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt869d8be2008-09-06 03:07:41 +01001579
Eric Anholtd70d6052009-10-06 12:40:42 -07001580 bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
Eric Anholt869d8be2008-09-06 03:07:41 +01001581}
Eric Anholtd198e9b2008-06-05 08:44:46 -07001582
Emil Velikov0f8da822015-03-31 22:32:11 +01001583drm_intel_bufmgr *
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001584drm_intel_bufmgr_fake_init(int fd, unsigned long low_offset,
1585 void *low_virtual, unsigned long size,
1586 volatile unsigned int *last_dispatch)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001587{
Eric Anholtd70d6052009-10-06 12:40:42 -07001588 drm_intel_bufmgr_fake *bufmgr_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001589
Eric Anholtd70d6052009-10-06 12:40:42 -07001590 bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
Eric Anholt6a9eb082008-06-03 09:27:37 -07001591
Eric Anholtd70d6052009-10-06 12:40:42 -07001592 if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
1593 free(bufmgr_fake);
1594 return NULL;
1595 }
Eric Anholt6df7b072008-06-12 23:22:26 -07001596
Eric Anholtd70d6052009-10-06 12:40:42 -07001597 /* Initialize allocator */
1598 DRMINITLISTHEAD(&bufmgr_fake->fenced);
1599 DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
1600 DRMINITLISTHEAD(&bufmgr_fake->lru);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001601
Eric Anholtd70d6052009-10-06 12:40:42 -07001602 bufmgr_fake->low_offset = low_offset;
1603 bufmgr_fake->virtual = low_virtual;
1604 bufmgr_fake->size = size;
1605 bufmgr_fake->heap = mmInit(low_offset, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001606
Eric Anholtd70d6052009-10-06 12:40:42 -07001607 /* Hook in methods */
1608 bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
1609 bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07001610 bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07001611 bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
1612 bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
1613 bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
1614 bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
Eric Anholtf45305c2010-11-01 06:54:58 -07001615 bufmgr_fake->bufmgr.bo_subdata = drm_intel_fake_bo_subdata;
Eric Anholtd70d6052009-10-06 12:40:42 -07001616 bufmgr_fake->bufmgr.bo_wait_rendering =
1617 drm_intel_fake_bo_wait_rendering;
1618 bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
1619 bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
1620 bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
1621 bufmgr_fake->bufmgr.check_aperture_space =
1622 drm_intel_fake_check_aperture_space;
1623 bufmgr_fake->bufmgr.debug = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001624
Eric Anholtd70d6052009-10-06 12:40:42 -07001625 bufmgr_fake->fd = fd;
1626 bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001627
Eric Anholtd70d6052009-10-06 12:40:42 -07001628 return &bufmgr_fake->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001629}