blob: 641df6a1e5bd6f46a3431a157b484cc4c0299dcc [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
31 *
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
35 */
Eric Anholtc4857422008-06-03 10:20:49 -070036
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholtc4857422008-06-03 10:20:49 -070041#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
Eric Anholt869d8be2008-09-06 03:07:41 +010044#include <errno.h>
Chih-Wei Huang42f2f922015-10-30 11:49:42 +080045#include <strings.h>
Eric Anholt738e36a2008-09-05 10:35:32 +010046#include <xf86drm.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070047#include <pthread.h>
Eric Anholtc4857422008-06-03 10:20:49 -070048#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010049#include "intel_bufmgr_priv.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070050#include "drm.h"
51#include "i915_drm.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070052#include "mm.h"
Emil Velikov42465fe2015-04-05 15:51:59 +010053#include "libdrm_macros.h"
Eric Anholtf7a99402008-08-08 15:55:34 -070054#include "libdrm_lists.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070055
56#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070057 if (bufmgr_fake->bufmgr.debug) \
58 drmMsg(__VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070059} while (0)
60
61/* Internal flags:
62 */
63#define BM_NO_BACKING_STORE 0x00000001
64#define BM_NO_FENCE_SUBDATA 0x00000002
65#define BM_PINNED 0x00000004
66
67/* Wrapper around mm.c's mem_block, which understands that you must
68 * wait for fences to expire before memory can be freed. This is
69 * specific to our use of memcpy for uploads - an upload that was
70 * processed through the command queue wouldn't need to care about
71 * fences.
72 */
73#define MAX_RELOCS 4096
74
Eric Anholtd70d6052009-10-06 12:40:42 -070075struct fake_buffer_reloc {
76 /** Buffer object that the relocation points at. */
77 drm_intel_bo *target_buf;
78 /** Offset of the relocation entry within reloc_buf. */
79 uint32_t offset;
80 /**
81 * Cached value of the offset when we last performed this relocation.
82 */
83 uint32_t last_target_offset;
84 /** Value added to target_buf's offset to get the relocation entry. */
85 uint32_t delta;
86 /** Cache domains the target buffer is read into. */
87 uint32_t read_domains;
88 /** Cache domain the target buffer will have dirty cachelines in. */
89 uint32_t write_domain;
Eric Anholt6a9eb082008-06-03 09:27:37 -070090};
91
92struct block {
Eric Anholtd70d6052009-10-06 12:40:42 -070093 struct block *next, *prev;
94 struct mem_block *mem; /* BM_MEM_AGP */
Eric Anholt6a9eb082008-06-03 09:27:37 -070095
Eric Anholtd70d6052009-10-06 12:40:42 -070096 /**
97 * Marks that the block is currently in the aperture and has yet to be
98 * fenced.
99 */
100 unsigned on_hardware:1;
101 /**
102 * Marks that the block is currently fenced (being used by rendering)
103 * and can't be freed until @fence is passed.
104 */
105 unsigned fenced:1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700106
Eric Anholtd70d6052009-10-06 12:40:42 -0700107 /** Fence cookie for the block. */
108 unsigned fence; /* Split to read_fence, write_fence */
Eric Anholt6a9eb082008-06-03 09:27:37 -0700109
Eric Anholtd70d6052009-10-06 12:40:42 -0700110 drm_intel_bo *bo;
111 void *virtual;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700112};
113
114typedef struct _bufmgr_fake {
Eric Anholtd70d6052009-10-06 12:40:42 -0700115 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700116
Eric Anholtd70d6052009-10-06 12:40:42 -0700117 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -0700118
Eric Anholtd70d6052009-10-06 12:40:42 -0700119 unsigned long low_offset;
120 unsigned long size;
121 void *virtual;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700122
Eric Anholtd70d6052009-10-06 12:40:42 -0700123 struct mem_block *heap;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700124
Eric Anholtd70d6052009-10-06 12:40:42 -0700125 unsigned buf_nr; /* for generating ids */
Eric Anholt6a9eb082008-06-03 09:27:37 -0700126
Eric Anholtd70d6052009-10-06 12:40:42 -0700127 /**
128 * List of blocks which are currently in the GART but haven't been
129 * fenced yet.
130 */
131 struct block on_hardware;
132 /**
133 * List of blocks which are in the GART and have an active fence on
134 * them.
135 */
136 struct block fenced;
137 /**
138 * List of blocks which have an expired fence and are ready to be
139 * evicted.
140 */
141 struct block lru;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700142
Eric Anholtd70d6052009-10-06 12:40:42 -0700143 unsigned int last_fence;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700144
Eric Anholtd70d6052009-10-06 12:40:42 -0700145 unsigned fail:1;
146 unsigned need_fence:1;
147 int thrashing;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700148
Eric Anholtd70d6052009-10-06 12:40:42 -0700149 /**
150 * Driver callback to emit a fence, returning the cookie.
151 *
152 * This allows the driver to hook in a replacement for the DRM usage in
153 * bufmgr_fake.
154 *
155 * Currently, this also requires that a write flush be emitted before
156 * emitting the fence, but this should change.
157 */
158 unsigned int (*fence_emit) (void *private);
159 /** Driver callback to wait for a fence cookie to have passed. */
160 void (*fence_wait) (unsigned int fence, void *private);
161 void *fence_priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700162
Eric Anholtd70d6052009-10-06 12:40:42 -0700163 /**
164 * Driver callback to execute a buffer.
165 *
166 * This allows the driver to hook in a replacement for the DRM usage in
167 * bufmgr_fake.
168 */
169 int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
170 void *exec_priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700171
Eric Anholtd70d6052009-10-06 12:40:42 -0700172 /** Driver-supplied argument to driver callbacks */
173 void *driver_priv;
174 /**
175 * Pointer to kernel-updated sarea data for the last completed user irq
176 */
177 volatile int *last_dispatch;
Eric Anholt869d8be2008-09-06 03:07:41 +0100178
Eric Anholtd70d6052009-10-06 12:40:42 -0700179 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700180
Eric Anholtd70d6052009-10-06 12:40:42 -0700181 int debug;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700182
Eric Anholtd70d6052009-10-06 12:40:42 -0700183 int performed_rendering;
Eric Anholt4b982642008-10-30 09:33:07 -0700184} drm_intel_bufmgr_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700185
Eric Anholt4b982642008-10-30 09:33:07 -0700186typedef struct _drm_intel_bo_fake {
Eric Anholtd70d6052009-10-06 12:40:42 -0700187 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700188
Eric Anholtd70d6052009-10-06 12:40:42 -0700189 unsigned id; /* debug only */
190 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700191
Eric Anholtd70d6052009-10-06 12:40:42 -0700192 unsigned dirty:1;
193 /**
194 * has the card written to this buffer - we make need to copy it back
195 */
196 unsigned card_dirty:1;
197 unsigned int refcount;
198 /* Flags may consist of any of the DRM_BO flags, plus
199 * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
200 * first two driver private flags.
201 */
202 uint64_t flags;
203 /** Cache domains the target buffer is read into. */
204 uint32_t read_domains;
205 /** Cache domain the target buffer will have dirty cachelines in. */
206 uint32_t write_domain;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700207
Eric Anholtd70d6052009-10-06 12:40:42 -0700208 unsigned int alignment;
209 int is_static, validated;
210 unsigned int map_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700211
Eric Anholtd70d6052009-10-06 12:40:42 -0700212 /** relocation list */
213 struct fake_buffer_reloc *relocs;
214 int nr_relocs;
215 /**
216 * Total size of the target_bos of this buffer.
217 *
218 * Used for estimation in check_aperture.
219 */
220 unsigned int child_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700221
Eric Anholtd70d6052009-10-06 12:40:42 -0700222 struct block *block;
223 void *backing_store;
224 void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
225 void *invalidate_ptr;
Eric Anholt4b982642008-10-30 09:33:07 -0700226} drm_intel_bo_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700227
Eric Anholt4b982642008-10-30 09:33:07 -0700228static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
Eric Anholt6a9eb082008-06-03 09:27:37 -0700229 unsigned int fence_cookie);
230
Eric Anholt6a9eb082008-06-03 09:27:37 -0700231#define MAXFENCE 0x7fffffff
232
Eric Anholtd70d6052009-10-06 12:40:42 -0700233static int
234FENCE_LTE(unsigned a, unsigned b)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700235{
Eric Anholtd70d6052009-10-06 12:40:42 -0700236 if (a == b)
237 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700238
Eric Anholtd70d6052009-10-06 12:40:42 -0700239 if (a < b && b - a < (1 << 24))
240 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700241
Eric Anholtd70d6052009-10-06 12:40:42 -0700242 if (a > b && MAXFENCE - a + b < (1 << 24))
243 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700244
Eric Anholtd70d6052009-10-06 12:40:42 -0700245 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700246}
247
Emil Velikov0f8da822015-03-31 22:32:11 +0100248void
Eric Anholtd70d6052009-10-06 12:40:42 -0700249drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
250 unsigned int (*emit) (void *priv),
251 void (*wait) (unsigned int fence,
252 void *priv),
253 void *priv)
Eric Anholtf9d98be2008-09-08 08:51:40 -0700254{
Eric Anholtd70d6052009-10-06 12:40:42 -0700255 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700256
Eric Anholtd70d6052009-10-06 12:40:42 -0700257 bufmgr_fake->fence_emit = emit;
258 bufmgr_fake->fence_wait = wait;
259 bufmgr_fake->fence_priv = priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -0700260}
261
Eric Anholt6a9eb082008-06-03 09:27:37 -0700262static unsigned int
Eric Anholt4b982642008-10-30 09:33:07 -0700263_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700264{
Eric Anholtd70d6052009-10-06 12:40:42 -0700265 struct drm_i915_irq_emit ie;
266 int ret, seq = 1;
Eric Anholt869d8be2008-09-06 03:07:41 +0100267
Eric Anholtd70d6052009-10-06 12:40:42 -0700268 if (bufmgr_fake->fence_emit != NULL) {
269 seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
270 return seq;
271 }
Eric Anholtf9d98be2008-09-08 08:51:40 -0700272
Eric Anholtd70d6052009-10-06 12:40:42 -0700273 ie.irq_seq = &seq;
274 ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
275 &ie, sizeof(ie));
276 if (ret) {
Emil Velikov41eb1312015-04-05 16:50:33 +0100277 drmMsg("%s: drm_i915_irq_emit: %d\n", __func__, ret);
Eric Anholtd70d6052009-10-06 12:40:42 -0700278 abort();
279 }
Eric Anholt869d8be2008-09-06 03:07:41 +0100280
Eric Anholtd70d6052009-10-06 12:40:42 -0700281 DBG("emit 0x%08x\n", seq);
282 return seq;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700283}
284
285static void
Eric Anholt4b982642008-10-30 09:33:07 -0700286_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700287{
Eric Anholtd70d6052009-10-06 12:40:42 -0700288 struct drm_i915_irq_wait iw;
289 int hw_seq, busy_count = 0;
290 int ret;
291 int kernel_lied;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700292
Eric Anholtd70d6052009-10-06 12:40:42 -0700293 if (bufmgr_fake->fence_wait != NULL) {
294 bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
295 clear_fenced(bufmgr_fake, seq);
296 return;
297 }
Eric Anholtf9d98be2008-09-08 08:51:40 -0700298
Eric Anholtd70d6052009-10-06 12:40:42 -0700299 iw.irq_seq = seq;
Eric Anholt0dccf012008-09-23 10:48:39 -0700300
Eric Anholt58e54f62010-05-25 20:11:23 -0700301 DBG("wait 0x%08x\n", iw.irq_seq);
302
Eric Anholtd70d6052009-10-06 12:40:42 -0700303 /* The kernel IRQ_WAIT implementation is all sorts of broken.
304 * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
305 * unsigned range.
306 * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
307 * signed range.
308 * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
309 * signed range.
310 * 4) It returns -EBUSY in 3 seconds even if the hardware is still
311 * successfully chewing through buffers.
312 *
313 * Assume that in userland we treat sequence numbers as ints, which
314 * makes some of the comparisons convenient, since the sequence
Eric Engestrom723a6942016-04-03 19:48:09 +0100315 * numbers are all positive signed integers.
Eric Anholtd70d6052009-10-06 12:40:42 -0700316 *
317 * From this we get several cases we need to handle. Here's a timeline.
318 * 0x2 0x7 0x7ffffff8 0x7ffffffd
319 * | | | |
320 * ------------------------------------------------------------
321 *
322 * A) Normal wait for hw to catch up
323 * hw_seq seq
324 * | |
325 * ------------------------------------------------------------
326 * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to
327 * catch up.
328 *
329 * B) Normal wait for a sequence number that's already passed.
330 * seq hw_seq
331 * | |
332 * ------------------------------------------------------------
333 * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
334 *
335 * C) Hardware has already wrapped around ahead of us
336 * hw_seq seq
337 * | |
338 * ------------------------------------------------------------
339 * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
340 * for hw_seq >= seq, which may never occur. Thus, we want to catch
341 * this in userland and return 0.
342 *
343 * D) We've wrapped around ahead of the hardware.
344 * seq hw_seq
345 * | |
346 * ------------------------------------------------------------
347 * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would
348 * return 0 quickly because hw_seq >= seq, even though the hardware
349 * isn't caught up. Thus, we need to catch this early return in
350 * userland and bother the kernel until the hardware really does
351 * catch up.
352 *
353 * E) Hardware might wrap after we test in userland.
354 * hw_seq seq
355 * | |
356 * ------------------------------------------------------------
357 * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >=
358 * hw_seq and wait. However, suppose hw_seq wraps before we make it
359 * into the kernel. The kernel sees hw_seq >= seq and waits for 3
360 * seconds then returns -EBUSY. This is case C). We should catch
361 * this and then return successfully.
362 *
363 * F) Hardware might take a long time on a buffer.
364 * hw_seq seq
365 * | |
366 * -------------------------------------------------------------------
367 * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5
368 * take too long, it will return -EBUSY. Batchbuffers in the
369 * gltestperf demo were seen to take up to 7 seconds. We should
370 * catch early -EBUSY return and keep trying.
371 */
Eric Anholt2db8e0c2008-09-23 17:06:01 -0700372
Eric Anholtd70d6052009-10-06 12:40:42 -0700373 do {
374 /* Keep a copy of last_dispatch so that if the wait -EBUSYs
375 * because the hardware didn't catch up in 3 seconds, we can
376 * see if it at least made progress and retry.
377 */
378 hw_seq = *bufmgr_fake->last_dispatch;
Eric Anholt0dccf012008-09-23 10:48:39 -0700379
Eric Anholtd70d6052009-10-06 12:40:42 -0700380 /* Catch case C */
381 if (seq - hw_seq > 0x40000000)
382 return;
Eric Anholt0dccf012008-09-23 10:48:39 -0700383
Eric Anholtd70d6052009-10-06 12:40:42 -0700384 ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
385 &iw, sizeof(iw));
386 /* Catch case D */
387 kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
388 -0x40000000);
Eric Anholt0dccf012008-09-23 10:48:39 -0700389
Eric Anholtd70d6052009-10-06 12:40:42 -0700390 /* Catch case E */
391 if (ret == -EBUSY
392 && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
393 ret = 0;
Eric Anholt2db8e0c2008-09-23 17:06:01 -0700394
Eric Anholtd70d6052009-10-06 12:40:42 -0700395 /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
396 if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
397 busy_count = 0;
398 else
399 busy_count++;
400 } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
401 (ret == -EBUSY && busy_count < 5));
Eric Anholt869d8be2008-09-06 03:07:41 +0100402
Eric Anholtd70d6052009-10-06 12:40:42 -0700403 if (ret != 0) {
404 drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
405 __LINE__, strerror(-ret));
406 abort();
407 }
408 clear_fenced(bufmgr_fake, seq);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700409}
410
Eric Anholtc4857422008-06-03 10:20:49 -0700411static int
Eric Anholt4b982642008-10-30 09:33:07 -0700412_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700413{
Eric Anholtd70d6052009-10-06 12:40:42 -0700414 /* Slight problem with wrap-around:
415 */
416 return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700417}
418
419/**
420 * Allocate a memory manager block for the buffer.
421 */
Eric Anholtc4857422008-06-03 10:20:49 -0700422static int
Eric Anholt4b982642008-10-30 09:33:07 -0700423alloc_block(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700424{
Eric Anholtd70d6052009-10-06 12:40:42 -0700425 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
426 drm_intel_bufmgr_fake *bufmgr_fake =
427 (drm_intel_bufmgr_fake *) bo->bufmgr;
428 struct block *block = (struct block *)calloc(sizeof *block, 1);
429 unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
430 unsigned int sz;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700431
Eric Anholtd70d6052009-10-06 12:40:42 -0700432 if (!block)
433 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700434
Eric Anholtd70d6052009-10-06 12:40:42 -0700435 sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700436
Eric Anholtd70d6052009-10-06 12:40:42 -0700437 block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
438 if (!block->mem) {
439 free(block);
440 return 0;
441 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700442
Eric Anholtd70d6052009-10-06 12:40:42 -0700443 DRMINITLISTHEAD(block);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700444
Eric Anholtd70d6052009-10-06 12:40:42 -0700445 /* Insert at head or at tail??? */
446 DRMLISTADDTAIL(block, &bufmgr_fake->lru);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700447
Eric Anholtd70d6052009-10-06 12:40:42 -0700448 block->virtual = (uint8_t *) bufmgr_fake->virtual +
449 block->mem->ofs - bufmgr_fake->low_offset;
450 block->bo = bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700451
Eric Anholtd70d6052009-10-06 12:40:42 -0700452 bo_fake->block = block;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700453
Eric Anholtd70d6052009-10-06 12:40:42 -0700454 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700455}
456
457/* Release the card storage associated with buf:
458 */
Eric Anholtd70d6052009-10-06 12:40:42 -0700459static void
460free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
461 int skip_dirty_copy)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700462{
Eric Anholtd70d6052009-10-06 12:40:42 -0700463 drm_intel_bo_fake *bo_fake;
464 DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
465 block->on_hardware, block->fenced);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700466
Eric Anholtd70d6052009-10-06 12:40:42 -0700467 if (!block)
468 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700469
Eric Anholtd70d6052009-10-06 12:40:42 -0700470 bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholtefa485b2009-02-24 21:36:56 -0800471
Eric Anholtd70d6052009-10-06 12:40:42 -0700472 if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
473 skip_dirty_copy = 1;
Eric Anholtefa485b2009-02-24 21:36:56 -0800474
Eric Anholtd70d6052009-10-06 12:40:42 -0700475 if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
476 memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
477 bo_fake->card_dirty = 0;
478 bo_fake->dirty = 1;
479 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700480
Eric Anholtd70d6052009-10-06 12:40:42 -0700481 if (block->on_hardware) {
482 block->bo = NULL;
483 } else if (block->fenced) {
484 block->bo = NULL;
485 } else {
486 DBG(" - free immediately\n");
487 DRMLISTDEL(block);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700488
Eric Anholtd70d6052009-10-06 12:40:42 -0700489 mmFreeMem(block->mem);
490 free(block);
491 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700492}
493
494static void
Eric Anholt4b982642008-10-30 09:33:07 -0700495alloc_backing_store(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700496{
Eric Anholtd70d6052009-10-06 12:40:42 -0700497 drm_intel_bufmgr_fake *bufmgr_fake =
498 (drm_intel_bufmgr_fake *) bo->bufmgr;
499 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
500 assert(!bo_fake->backing_store);
501 assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700502
Eric Anholtd70d6052009-10-06 12:40:42 -0700503 bo_fake->backing_store = malloc(bo->size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700504
Thierry Reding3d7a51e2014-04-08 22:18:18 +0200505 DBG("alloc_backing - buf %d %p %lu\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -0700506 bo_fake->backing_store, bo->size);
507 assert(bo_fake->backing_store);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700508}
509
510static void
Eric Anholt4b982642008-10-30 09:33:07 -0700511free_backing_store(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700512{
Eric Anholtd70d6052009-10-06 12:40:42 -0700513 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700514
Eric Anholtd70d6052009-10-06 12:40:42 -0700515 if (bo_fake->backing_store) {
516 assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
517 free(bo_fake->backing_store);
518 bo_fake->backing_store = NULL;
519 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700520}
521
522static void
Eric Anholt4b982642008-10-30 09:33:07 -0700523set_dirty(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700524{
Eric Anholtd70d6052009-10-06 12:40:42 -0700525 drm_intel_bufmgr_fake *bufmgr_fake =
526 (drm_intel_bufmgr_fake *) bo->bufmgr;
527 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700528
Eric Anholtd70d6052009-10-06 12:40:42 -0700529 if (bo_fake->flags & BM_NO_BACKING_STORE
530 && bo_fake->invalidate_cb != NULL)
531 bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700532
Eric Anholtd70d6052009-10-06 12:40:42 -0700533 assert(!(bo_fake->flags & BM_PINNED));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700534
Eric Anholtd70d6052009-10-06 12:40:42 -0700535 DBG("set_dirty - buf %d\n", bo_fake->id);
536 bo_fake->dirty = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700537}
538
Eric Anholtc4857422008-06-03 10:20:49 -0700539static int
Eric Anholt4b982642008-10-30 09:33:07 -0700540evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700541{
Eric Anholtd70d6052009-10-06 12:40:42 -0700542 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700543
Emil Velikov41eb1312015-04-05 16:50:33 +0100544 DBG("%s\n", __func__);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700545
Eric Anholtd70d6052009-10-06 12:40:42 -0700546 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
547 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700548
Eric Anholtd70d6052009-10-06 12:40:42 -0700549 if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
550 continue;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700551
Eric Anholtd70d6052009-10-06 12:40:42 -0700552 if (block->fence && max_fence && !FENCE_LTE(block->fence,
553 max_fence))
554 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700555
Eric Anholtd70d6052009-10-06 12:40:42 -0700556 set_dirty(&bo_fake->bo);
557 bo_fake->block = NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700558
Eric Anholtd70d6052009-10-06 12:40:42 -0700559 free_block(bufmgr_fake, block, 0);
560 return 1;
561 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700562
Eric Anholtd70d6052009-10-06 12:40:42 -0700563 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700564}
565
Eric Anholtc4857422008-06-03 10:20:49 -0700566static int
Eric Anholt4b982642008-10-30 09:33:07 -0700567evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700568{
Eric Anholtd70d6052009-10-06 12:40:42 -0700569 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700570
Emil Velikov41eb1312015-04-05 16:50:33 +0100571 DBG("%s\n", __func__);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700572
Eric Anholtd70d6052009-10-06 12:40:42 -0700573 DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
574 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700575
Eric Anholtd70d6052009-10-06 12:40:42 -0700576 if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
577 continue;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700578
Eric Anholtd70d6052009-10-06 12:40:42 -0700579 set_dirty(&bo_fake->bo);
580 bo_fake->block = NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700581
Eric Anholtd70d6052009-10-06 12:40:42 -0700582 free_block(bufmgr_fake, block, 0);
583 return 1;
584 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700585
Eric Anholtd70d6052009-10-06 12:40:42 -0700586 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700587}
588
589/**
590 * Removes all objects from the fenced list older than the given fence.
591 */
Eric Anholtd70d6052009-10-06 12:40:42 -0700592static int
593clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700594{
Eric Anholtd70d6052009-10-06 12:40:42 -0700595 struct block *block, *tmp;
596 int ret = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700597
Eric Anholtd70d6052009-10-06 12:40:42 -0700598 bufmgr_fake->last_fence = fence_cookie;
599 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
600 assert(block->fenced);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700601
Eric Anholtd70d6052009-10-06 12:40:42 -0700602 if (_fence_test(bufmgr_fake, block->fence)) {
Eric Anholt6a9eb082008-06-03 09:27:37 -0700603
Eric Anholtd70d6052009-10-06 12:40:42 -0700604 block->fenced = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700605
Eric Anholtd70d6052009-10-06 12:40:42 -0700606 if (!block->bo) {
607 DBG("delayed free: offset %x sz %x\n",
608 block->mem->ofs, block->mem->size);
609 DRMLISTDEL(block);
610 mmFreeMem(block->mem);
611 free(block);
612 } else {
613 DBG("return to lru: offset %x sz %x\n",
614 block->mem->ofs, block->mem->size);
615 DRMLISTDEL(block);
616 DRMLISTADDTAIL(block, &bufmgr_fake->lru);
617 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700618
Eric Anholtd70d6052009-10-06 12:40:42 -0700619 ret = 1;
620 } else {
621 /* Blocks are ordered by fence, so if one fails, all
622 * from here will fail also:
623 */
624 DBG("fence not passed: offset %x sz %x %d %d \n",
625 block->mem->ofs, block->mem->size, block->fence,
626 bufmgr_fake->last_fence);
627 break;
628 }
629 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700630
Emil Velikov41eb1312015-04-05 16:50:33 +0100631 DBG("%s: %d\n", __func__, ret);
Eric Anholtd70d6052009-10-06 12:40:42 -0700632 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700633}
634
Eric Anholtd70d6052009-10-06 12:40:42 -0700635static void
636fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700637{
Eric Anholtd70d6052009-10-06 12:40:42 -0700638 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700639
Eric Anholtd70d6052009-10-06 12:40:42 -0700640 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
641 DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
642 block, block->mem->size, block->mem->ofs, block->bo, fence);
643 block->fence = fence;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700644
Eric Anholtd70d6052009-10-06 12:40:42 -0700645 block->on_hardware = 0;
646 block->fenced = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700647
Eric Anholtd70d6052009-10-06 12:40:42 -0700648 /* Move to tail of pending list here
649 */
650 DRMLISTDEL(block);
651 DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
652 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700653
Eric Anholtd70d6052009-10-06 12:40:42 -0700654 assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700655}
656
Eric Anholtd70d6052009-10-06 12:40:42 -0700657static int
658evict_and_alloc_block(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700659{
Eric Anholtd70d6052009-10-06 12:40:42 -0700660 drm_intel_bufmgr_fake *bufmgr_fake =
661 (drm_intel_bufmgr_fake *) bo->bufmgr;
662 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700663
Eric Anholtd70d6052009-10-06 12:40:42 -0700664 assert(bo_fake->block == NULL);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700665
Eric Anholtd70d6052009-10-06 12:40:42 -0700666 /* Search for already free memory:
667 */
668 if (alloc_block(bo))
669 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700670
Eric Anholtd70d6052009-10-06 12:40:42 -0700671 /* If we're not thrashing, allow lru eviction to dig deeper into
672 * recently used textures. We'll probably be thrashing soon:
673 */
674 if (!bufmgr_fake->thrashing) {
675 while (evict_lru(bufmgr_fake, 0))
676 if (alloc_block(bo))
677 return 1;
678 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700679
Eric Anholtd70d6052009-10-06 12:40:42 -0700680 /* Keep thrashing counter alive?
681 */
682 if (bufmgr_fake->thrashing)
683 bufmgr_fake->thrashing = 20;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700684
Eric Anholtd70d6052009-10-06 12:40:42 -0700685 /* Wait on any already pending fences - here we are waiting for any
686 * freed memory that has been submitted to hardware and fenced to
687 * become available:
688 */
689 while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
690 uint32_t fence = bufmgr_fake->fenced.next->fence;
691 _fence_wait_internal(bufmgr_fake, fence);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700692
Eric Anholtd70d6052009-10-06 12:40:42 -0700693 if (alloc_block(bo))
694 return 1;
695 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700696
Eric Anholtd70d6052009-10-06 12:40:42 -0700697 if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
698 while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
699 uint32_t fence = bufmgr_fake->fenced.next->fence;
700 _fence_wait_internal(bufmgr_fake, fence);
701 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700702
Eric Anholtd70d6052009-10-06 12:40:42 -0700703 if (!bufmgr_fake->thrashing) {
704 DBG("thrashing\n");
705 }
706 bufmgr_fake->thrashing = 20;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700707
Eric Anholtd70d6052009-10-06 12:40:42 -0700708 if (alloc_block(bo))
709 return 1;
710 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700711
Eric Anholtd70d6052009-10-06 12:40:42 -0700712 while (evict_mru(bufmgr_fake))
713 if (alloc_block(bo))
714 return 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700715
Emil Velikov41eb1312015-04-05 16:50:33 +0100716 DBG("%s 0x%lx bytes failed\n", __func__, bo->size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700717
Eric Anholtd70d6052009-10-06 12:40:42 -0700718 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700719}
720
721/***********************************************************************
722 * Public functions
723 */
724
725/**
726 * Wait for hardware idle by emitting a fence and waiting for it.
727 */
728static void
Eric Anholt4b982642008-10-30 09:33:07 -0700729drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700730{
Eric Anholtd70d6052009-10-06 12:40:42 -0700731 unsigned int cookie;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700732
Eric Anholtd70d6052009-10-06 12:40:42 -0700733 cookie = _fence_emit_internal(bufmgr_fake);
734 _fence_wait_internal(bufmgr_fake, cookie);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700735}
736
737/**
738 * Wait for rendering to a buffer to complete.
739 *
Grazvydas Ignotas1924b672016-11-20 20:25:46 +0200740 * It is assumed that the batchbuffer which performed the rendering included
Eric Anholt6a9eb082008-06-03 09:27:37 -0700741 * the necessary flushing.
742 */
743static void
Eric Anholt4b982642008-10-30 09:33:07 -0700744drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700745{
Eric Anholtd70d6052009-10-06 12:40:42 -0700746 drm_intel_bufmgr_fake *bufmgr_fake =
747 (drm_intel_bufmgr_fake *) bo->bufmgr;
748 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700749
Eric Anholtd70d6052009-10-06 12:40:42 -0700750 if (bo_fake->block == NULL || !bo_fake->block->fenced)
751 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700752
Eric Anholtd70d6052009-10-06 12:40:42 -0700753 _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
Eric Anholt3e03d782008-10-13 13:41:10 -0700754}
Eric Anholt6df7b072008-06-12 23:22:26 -0700755
Eric Anholt3e03d782008-10-13 13:41:10 -0700756static void
Eric Anholt4b982642008-10-30 09:33:07 -0700757drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700758{
Eric Anholtd70d6052009-10-06 12:40:42 -0700759 drm_intel_bufmgr_fake *bufmgr_fake =
760 (drm_intel_bufmgr_fake *) bo->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700761
Eric Anholtd70d6052009-10-06 12:40:42 -0700762 pthread_mutex_lock(&bufmgr_fake->lock);
763 drm_intel_fake_bo_wait_rendering_locked(bo);
764 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700765}
766
767/* Specifically ignore texture memory sharing.
768 * -- just evict everything
769 * -- and wait for idle
770 */
Emil Velikov0f8da822015-03-31 22:32:11 +0100771void
Eric Anholt4b982642008-10-30 09:33:07 -0700772drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700773{
Eric Anholtd70d6052009-10-06 12:40:42 -0700774 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
775 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700776
Eric Anholtd70d6052009-10-06 12:40:42 -0700777 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700778
Eric Anholtd70d6052009-10-06 12:40:42 -0700779 bufmgr_fake->need_fence = 1;
780 bufmgr_fake->fail = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700781
Eric Anholtd70d6052009-10-06 12:40:42 -0700782 /* Wait for hardware idle. We don't know where acceleration has been
783 * happening, so we'll need to wait anyway before letting anything get
784 * put on the card again.
785 */
786 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700787
Eric Anholtd70d6052009-10-06 12:40:42 -0700788 /* Check that we hadn't released the lock without having fenced the last
789 * set of buffers.
790 */
791 assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
792 assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700793
Eric Anholtd70d6052009-10-06 12:40:42 -0700794 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
795 assert(_fence_test(bufmgr_fake, block->fence));
796 set_dirty(block->bo);
797 }
Eric Anholt6df7b072008-06-12 23:22:26 -0700798
Eric Anholtd70d6052009-10-06 12:40:42 -0700799 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700800}
801
Eric Anholt4b982642008-10-30 09:33:07 -0700802static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700803drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
804 const char *name,
805 unsigned long size,
806 unsigned int alignment)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700807{
Eric Anholtd70d6052009-10-06 12:40:42 -0700808 drm_intel_bufmgr_fake *bufmgr_fake;
809 drm_intel_bo_fake *bo_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700810
Eric Anholtd70d6052009-10-06 12:40:42 -0700811 bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700812
Eric Anholtd70d6052009-10-06 12:40:42 -0700813 assert(size != 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700814
Eric Anholtd70d6052009-10-06 12:40:42 -0700815 bo_fake = calloc(1, sizeof(*bo_fake));
816 if (!bo_fake)
817 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700818
Eric Anholtd70d6052009-10-06 12:40:42 -0700819 bo_fake->bo.size = size;
820 bo_fake->bo.offset = -1;
821 bo_fake->bo.virtual = NULL;
822 bo_fake->bo.bufmgr = bufmgr;
823 bo_fake->refcount = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700824
Eric Anholtd70d6052009-10-06 12:40:42 -0700825 /* Alignment must be a power of two */
826 assert((alignment & (alignment - 1)) == 0);
827 if (alignment == 0)
828 alignment = 1;
829 bo_fake->alignment = alignment;
830 bo_fake->id = ++bufmgr_fake->buf_nr;
831 bo_fake->name = name;
832 bo_fake->flags = 0;
833 bo_fake->is_static = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700834
Thierry Reding3d7a51e2014-04-08 22:18:18 +0200835 DBG("drm_bo_alloc: (buf %d: %s, %lu kb)\n", bo_fake->id, bo_fake->name,
Eric Anholtd70d6052009-10-06 12:40:42 -0700836 bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700837
Eric Anholtd70d6052009-10-06 12:40:42 -0700838 return &bo_fake->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700839}
840
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700841static drm_intel_bo *
842drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
843 const char *name,
844 int x, int y, int cpp,
845 uint32_t *tiling_mode,
846 unsigned long *pitch,
847 unsigned long flags)
848{
849 unsigned long stride, aligned_y;
850
851 /* No runtime tiling support for fake. */
852 *tiling_mode = I915_TILING_NONE;
853
854 /* Align it for being a render target. Shouldn't need anything else. */
855 stride = x * cpp;
856 stride = ROUND_UP_TO(stride, 64);
857
858 /* 965 subspan loading alignment */
859 aligned_y = ALIGN(y, 2);
860
861 *pitch = stride;
862
863 return drm_intel_fake_bo_alloc(bufmgr, name, stride * aligned_y,
864 4096);
865}
866
Emil Velikov0f8da822015-03-31 22:32:11 +0100867drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700868drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
869 const char *name,
870 unsigned long offset,
871 unsigned long size, void *virtual)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700872{
Eric Anholtd70d6052009-10-06 12:40:42 -0700873 drm_intel_bufmgr_fake *bufmgr_fake;
874 drm_intel_bo_fake *bo_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700875
Eric Anholtd70d6052009-10-06 12:40:42 -0700876 bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700877
Eric Anholtd70d6052009-10-06 12:40:42 -0700878 assert(size != 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700879
Eric Anholtd70d6052009-10-06 12:40:42 -0700880 bo_fake = calloc(1, sizeof(*bo_fake));
881 if (!bo_fake)
882 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700883
Eric Anholtd70d6052009-10-06 12:40:42 -0700884 bo_fake->bo.size = size;
885 bo_fake->bo.offset = offset;
886 bo_fake->bo.virtual = virtual;
887 bo_fake->bo.bufmgr = bufmgr;
888 bo_fake->refcount = 1;
889 bo_fake->id = ++bufmgr_fake->buf_nr;
890 bo_fake->name = name;
891 bo_fake->flags = BM_PINNED;
892 bo_fake->is_static = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700893
Thierry Reding3d7a51e2014-04-08 22:18:18 +0200894 DBG("drm_bo_alloc_static: (buf %d: %s, %lu kb)\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -0700895 bo_fake->name, bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700896
Eric Anholtd70d6052009-10-06 12:40:42 -0700897 return &bo_fake->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700898}
899
900static void
Eric Anholt4b982642008-10-30 09:33:07 -0700901drm_intel_fake_bo_reference(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700902{
Eric Anholtd70d6052009-10-06 12:40:42 -0700903 drm_intel_bufmgr_fake *bufmgr_fake =
904 (drm_intel_bufmgr_fake *) bo->bufmgr;
905 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6df7b072008-06-12 23:22:26 -0700906
Eric Anholtd70d6052009-10-06 12:40:42 -0700907 pthread_mutex_lock(&bufmgr_fake->lock);
908 bo_fake->refcount++;
909 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700910}
911
912static void
Eric Anholt4b982642008-10-30 09:33:07 -0700913drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
Eric Anholt6df7b072008-06-12 23:22:26 -0700914{
Eric Anholtd70d6052009-10-06 12:40:42 -0700915 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700916
Eric Anholtd70d6052009-10-06 12:40:42 -0700917 bo_fake->refcount++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700918}
919
920static void
Eric Anholt4b982642008-10-30 09:33:07 -0700921drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700922{
Eric Anholtd70d6052009-10-06 12:40:42 -0700923 drm_intel_bufmgr_fake *bufmgr_fake =
924 (drm_intel_bufmgr_fake *) bo->bufmgr;
925 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
926 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700927
Eric Anholtd70d6052009-10-06 12:40:42 -0700928 if (--bo_fake->refcount == 0) {
929 assert(bo_fake->map_count == 0);
930 /* No remaining references, so free it */
931 if (bo_fake->block)
932 free_block(bufmgr_fake, bo_fake->block, 1);
933 free_backing_store(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700934
Eric Anholtd70d6052009-10-06 12:40:42 -0700935 for (i = 0; i < bo_fake->nr_relocs; i++)
936 drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
937 target_buf);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700938
Eric Anholtd70d6052009-10-06 12:40:42 -0700939 DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
940 bo_fake->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700941
Eric Anholtd70d6052009-10-06 12:40:42 -0700942 free(bo_fake->relocs);
943 free(bo);
944 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700945}
946
Eric Anholt6df7b072008-06-12 23:22:26 -0700947static void
Eric Anholt4b982642008-10-30 09:33:07 -0700948drm_intel_fake_bo_unreference(drm_intel_bo *bo)
Eric Anholt6df7b072008-06-12 23:22:26 -0700949{
Eric Anholtd70d6052009-10-06 12:40:42 -0700950 drm_intel_bufmgr_fake *bufmgr_fake =
951 (drm_intel_bufmgr_fake *) bo->bufmgr;
Eric Anholt6df7b072008-06-12 23:22:26 -0700952
Eric Anholtd70d6052009-10-06 12:40:42 -0700953 pthread_mutex_lock(&bufmgr_fake->lock);
954 drm_intel_fake_bo_unreference_locked(bo);
955 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700956}
957
Eric Anholt6a9eb082008-06-03 09:27:37 -0700958/**
959 * Set the buffer as not requiring backing store, and instead get the callback
960 * invoked whenever it would be set dirty.
961 */
Emil Velikov0f8da822015-03-31 22:32:11 +0100962void
Eric Anholtd70d6052009-10-06 12:40:42 -0700963drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
964 void (*invalidate_cb) (drm_intel_bo *bo,
965 void *ptr),
966 void *ptr)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700967{
Eric Anholtd70d6052009-10-06 12:40:42 -0700968 drm_intel_bufmgr_fake *bufmgr_fake =
969 (drm_intel_bufmgr_fake *) bo->bufmgr;
970 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700971
Eric Anholtd70d6052009-10-06 12:40:42 -0700972 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700973
Eric Anholtd70d6052009-10-06 12:40:42 -0700974 if (bo_fake->backing_store)
975 free_backing_store(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700976
Eric Anholtd70d6052009-10-06 12:40:42 -0700977 bo_fake->flags |= BM_NO_BACKING_STORE;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700978
Eric Anholtd70d6052009-10-06 12:40:42 -0700979 DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
980 bo_fake->dirty = 1;
981 bo_fake->invalidate_cb = invalidate_cb;
982 bo_fake->invalidate_ptr = ptr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700983
Eric Anholtd70d6052009-10-06 12:40:42 -0700984 /* Note that it is invalid right from the start. Also note
985 * invalidate_cb is called with the bufmgr locked, so cannot
986 * itself make bufmgr calls.
987 */
988 if (invalidate_cb != NULL)
989 invalidate_cb(bo, ptr);
Eric Anholt6df7b072008-06-12 23:22:26 -0700990
Eric Anholtd70d6052009-10-06 12:40:42 -0700991 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700992}
993
994/**
995 * Map a buffer into bo->virtual, allocating either card memory space (If
996 * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
997 */
998static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700999 drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001000{
Eric Anholtd70d6052009-10-06 12:40:42 -07001001 drm_intel_bufmgr_fake *bufmgr_fake =
1002 (drm_intel_bufmgr_fake *) bo->bufmgr;
1003 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001004
Eric Anholtd70d6052009-10-06 12:40:42 -07001005 /* Static buffers are always mapped. */
1006 if (bo_fake->is_static) {
1007 if (bo_fake->card_dirty) {
1008 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
1009 bo_fake->card_dirty = 0;
1010 }
1011 return 0;
1012 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001013
Eric Anholtd70d6052009-10-06 12:40:42 -07001014 /* Allow recursive mapping. Mesa may recursively map buffers with
1015 * nested display loops, and it is used internally in bufmgr_fake
1016 * for relocation.
1017 */
1018 if (bo_fake->map_count++ != 0)
1019 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001020
Eric Anholtd70d6052009-10-06 12:40:42 -07001021 {
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001022 DBG("drm_bo_map: (buf %d: %s, %lu kb)\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -07001023 bo_fake->name, bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001024
Eric Anholtd70d6052009-10-06 12:40:42 -07001025 if (bo->virtual != NULL) {
Emil Velikov41eb1312015-04-05 16:50:33 +01001026 drmMsg("%s: already mapped\n", __func__);
Eric Anholtd70d6052009-10-06 12:40:42 -07001027 abort();
1028 } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
Eric Anholt6a9eb082008-06-03 09:27:37 -07001029
Eric Anholtd70d6052009-10-06 12:40:42 -07001030 if (!bo_fake->block && !evict_and_alloc_block(bo)) {
Emil Velikov41eb1312015-04-05 16:50:33 +01001031 DBG("%s: alloc failed\n", __func__);
Eric Anholtd70d6052009-10-06 12:40:42 -07001032 bufmgr_fake->fail = 1;
1033 return 1;
1034 } else {
1035 assert(bo_fake->block);
1036 bo_fake->dirty = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001037
Eric Anholtd70d6052009-10-06 12:40:42 -07001038 if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
1039 bo_fake->block->fenced) {
1040 drm_intel_fake_bo_wait_rendering_locked
1041 (bo);
1042 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001043
Eric Anholtd70d6052009-10-06 12:40:42 -07001044 bo->virtual = bo_fake->block->virtual;
1045 }
1046 } else {
1047 if (write_enable)
1048 set_dirty(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001049
Eric Anholtd70d6052009-10-06 12:40:42 -07001050 if (bo_fake->backing_store == 0)
1051 alloc_backing_store(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001052
Eric Anholtd70d6052009-10-06 12:40:42 -07001053 if ((bo_fake->card_dirty == 1) && bo_fake->block) {
1054 if (bo_fake->block->fenced)
1055 drm_intel_fake_bo_wait_rendering_locked
1056 (bo);
Xiang, Haihao604759d2008-10-09 11:57:13 +08001057
Eric Anholtd70d6052009-10-06 12:40:42 -07001058 memcpy(bo_fake->backing_store,
1059 bo_fake->block->virtual,
1060 bo_fake->block->bo->size);
1061 bo_fake->card_dirty = 0;
1062 }
Xiang, Haihao073cb5e2008-09-27 11:01:24 +08001063
Eric Anholtd70d6052009-10-06 12:40:42 -07001064 bo->virtual = bo_fake->backing_store;
1065 }
1066 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001067
Eric Anholtd70d6052009-10-06 12:40:42 -07001068 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001069}
1070
1071static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001072 drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
Eric Anholt6df7b072008-06-12 23:22:26 -07001073{
Eric Anholtd70d6052009-10-06 12:40:42 -07001074 drm_intel_bufmgr_fake *bufmgr_fake =
1075 (drm_intel_bufmgr_fake *) bo->bufmgr;
1076 int ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001077
Eric Anholtd70d6052009-10-06 12:40:42 -07001078 pthread_mutex_lock(&bufmgr_fake->lock);
1079 ret = drm_intel_fake_bo_map_locked(bo, write_enable);
1080 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001081
Eric Anholtd70d6052009-10-06 12:40:42 -07001082 return ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001083}
1084
1085static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001086 drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001087{
Eric Anholtd70d6052009-10-06 12:40:42 -07001088 drm_intel_bufmgr_fake *bufmgr_fake =
1089 (drm_intel_bufmgr_fake *) bo->bufmgr;
1090 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001091
Eric Anholtd70d6052009-10-06 12:40:42 -07001092 /* Static buffers are always mapped. */
1093 if (bo_fake->is_static)
1094 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001095
Eric Anholtd70d6052009-10-06 12:40:42 -07001096 assert(bo_fake->map_count != 0);
1097 if (--bo_fake->map_count != 0)
1098 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001099
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001100 DBG("drm_bo_unmap: (buf %d: %s, %lu kb)\n", bo_fake->id, bo_fake->name,
Eric Anholtd70d6052009-10-06 12:40:42 -07001101 bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001102
Eric Anholtd70d6052009-10-06 12:40:42 -07001103 bo->virtual = NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001104
Eric Anholtd70d6052009-10-06 12:40:42 -07001105 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001106}
1107
Eric Anholtd70d6052009-10-06 12:40:42 -07001108static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
Eric Anholt6df7b072008-06-12 23:22:26 -07001109{
Eric Anholtd70d6052009-10-06 12:40:42 -07001110 drm_intel_bufmgr_fake *bufmgr_fake =
1111 (drm_intel_bufmgr_fake *) bo->bufmgr;
1112 int ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001113
Eric Anholtd70d6052009-10-06 12:40:42 -07001114 pthread_mutex_lock(&bufmgr_fake->lock);
1115 ret = drm_intel_fake_bo_unmap_locked(bo);
1116 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001117
Eric Anholtd70d6052009-10-06 12:40:42 -07001118 return ret;
Eric Anholt6df7b072008-06-12 23:22:26 -07001119}
1120
Eric Anholtf45305c2010-11-01 06:54:58 -07001121static int
1122drm_intel_fake_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1123 unsigned long size, const void *data)
1124{
1125 int ret;
1126
1127 if (size == 0 || data == NULL)
1128 return 0;
1129
1130 ret = drm_intel_bo_map(bo, 1);
1131 if (ret)
1132 return ret;
1133 memcpy((unsigned char *)bo->virtual + offset, data, size);
1134 drm_intel_bo_unmap(bo);
1135 return 0;
1136}
1137
Eric Anholt6a9eb082008-06-03 09:27:37 -07001138static void
Eric Anholtd70d6052009-10-06 12:40:42 -07001139 drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001140{
Eric Anholtd70d6052009-10-06 12:40:42 -07001141 struct block *block, *tmp;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001142
Eric Anholtd70d6052009-10-06 12:40:42 -07001143 bufmgr_fake->performed_rendering = 0;
1144 /* okay for ever BO that is on the HW kick it off.
1145 seriously not afraid of the POLICE right now */
1146 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
1147 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001148
Eric Anholtd70d6052009-10-06 12:40:42 -07001149 block->on_hardware = 0;
1150 free_block(bufmgr_fake, block, 0);
1151 bo_fake->block = NULL;
1152 bo_fake->validated = 0;
1153 if (!(bo_fake->flags & BM_NO_BACKING_STORE))
1154 bo_fake->dirty = 1;
1155 }
Jesse Barnes9583c092008-12-10 15:47:28 -08001156
Eric Anholt6a9eb082008-06-03 09:27:37 -07001157}
1158
1159static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001160 drm_intel_fake_bo_validate(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001161{
Eric Anholtd70d6052009-10-06 12:40:42 -07001162 drm_intel_bufmgr_fake *bufmgr_fake;
1163 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001164
Eric Anholtd70d6052009-10-06 12:40:42 -07001165 bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001166
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001167 DBG("drm_bo_validate: (buf %d: %s, %lu kb)\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -07001168 bo_fake->name, bo_fake->bo.size / 1024);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001169
Eric Anholtd70d6052009-10-06 12:40:42 -07001170 /* Sanity check: Buffers should be unmapped before being validated.
1171 * This is not so much of a problem for bufmgr_fake, but TTM refuses,
1172 * and the problem is harder to debug there.
1173 */
1174 assert(bo_fake->map_count == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001175
Eric Anholtd70d6052009-10-06 12:40:42 -07001176 if (bo_fake->is_static) {
1177 /* Add it to the needs-fence list */
1178 bufmgr_fake->need_fence = 1;
1179 return 0;
1180 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001181
Eric Anholtd70d6052009-10-06 12:40:42 -07001182 /* Allocate the card memory */
1183 if (!bo_fake->block && !evict_and_alloc_block(bo)) {
1184 bufmgr_fake->fail = 1;
1185 DBG("Failed to validate buf %d:%s\n", bo_fake->id,
1186 bo_fake->name);
1187 return -1;
1188 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001189
Eric Anholtd70d6052009-10-06 12:40:42 -07001190 assert(bo_fake->block);
1191 assert(bo_fake->block->bo == &bo_fake->bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001192
Eric Anholtd70d6052009-10-06 12:40:42 -07001193 bo->offset = bo_fake->block->mem->ofs;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001194
Eric Anholtd70d6052009-10-06 12:40:42 -07001195 /* Upload the buffer contents if necessary */
1196 if (bo_fake->dirty) {
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001197 DBG("Upload dirty buf %d:%s, sz %lu offset 0x%x\n", bo_fake->id,
Eric Anholtd70d6052009-10-06 12:40:42 -07001198 bo_fake->name, bo->size, bo_fake->block->mem->ofs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001199
Eric Anholtd70d6052009-10-06 12:40:42 -07001200 assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
Eric Anholt6a9eb082008-06-03 09:27:37 -07001201
Eric Anholtd70d6052009-10-06 12:40:42 -07001202 /* Actually, should be able to just wait for a fence on the
Grazvydas Ignotas1924b672016-11-20 20:25:46 +02001203 * memory, which we would be tracking when we free it. Waiting
Eric Anholtd70d6052009-10-06 12:40:42 -07001204 * for idle is a sufficiently large hammer for now.
1205 */
1206 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001207
Eric Anholtd70d6052009-10-06 12:40:42 -07001208 /* we may never have mapped this BO so it might not have any
1209 * backing store if this happens it should be rare, but 0 the
1210 * card memory in any case */
1211 if (bo_fake->backing_store)
1212 memcpy(bo_fake->block->virtual, bo_fake->backing_store,
1213 bo->size);
1214 else
1215 memset(bo_fake->block->virtual, 0, bo->size);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001216
Eric Anholtd70d6052009-10-06 12:40:42 -07001217 bo_fake->dirty = 0;
1218 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001219
Eric Anholtd70d6052009-10-06 12:40:42 -07001220 bo_fake->block->fenced = 0;
1221 bo_fake->block->on_hardware = 1;
1222 DRMLISTDEL(bo_fake->block);
1223 DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001224
Eric Anholtd70d6052009-10-06 12:40:42 -07001225 bo_fake->validated = 1;
1226 bufmgr_fake->need_fence = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001227
Eric Anholtd70d6052009-10-06 12:40:42 -07001228 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001229}
1230
1231static void
Eric Anholt4b982642008-10-30 09:33:07 -07001232drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001233{
Eric Anholtd70d6052009-10-06 12:40:42 -07001234 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1235 unsigned int cookie;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001236
Eric Anholtd70d6052009-10-06 12:40:42 -07001237 cookie = _fence_emit_internal(bufmgr_fake);
1238 fence_blocks(bufmgr_fake, cookie);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001239
Eric Anholtd70d6052009-10-06 12:40:42 -07001240 DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001241}
1242
1243static void
Eric Anholt4b982642008-10-30 09:33:07 -07001244drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001245{
Eric Anholtd70d6052009-10-06 12:40:42 -07001246 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001247
Eric Anholtd70d6052009-10-06 12:40:42 -07001248 pthread_mutex_destroy(&bufmgr_fake->lock);
1249 mmDestroy(bufmgr_fake->heap);
1250 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001251}
1252
1253static int
Eric Anholt4b982642008-10-30 09:33:07 -07001254drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1255 drm_intel_bo *target_bo, uint32_t target_offset,
1256 uint32_t read_domains, uint32_t write_domain)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001257{
Eric Anholtd70d6052009-10-06 12:40:42 -07001258 drm_intel_bufmgr_fake *bufmgr_fake =
1259 (drm_intel_bufmgr_fake *) bo->bufmgr;
1260 struct fake_buffer_reloc *r;
1261 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1262 drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
1263 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001264
Eric Anholtd70d6052009-10-06 12:40:42 -07001265 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001266
Eric Anholtd70d6052009-10-06 12:40:42 -07001267 assert(bo);
1268 assert(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001269
Eric Anholtd70d6052009-10-06 12:40:42 -07001270 if (bo_fake->relocs == NULL) {
1271 bo_fake->relocs =
1272 malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
1273 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001274
Eric Anholtd70d6052009-10-06 12:40:42 -07001275 r = &bo_fake->relocs[bo_fake->nr_relocs++];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001276
Eric Anholtd70d6052009-10-06 12:40:42 -07001277 assert(bo_fake->nr_relocs <= MAX_RELOCS);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001278
Eric Anholtd70d6052009-10-06 12:40:42 -07001279 drm_intel_fake_bo_reference_locked(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001280
Eric Anholtd70d6052009-10-06 12:40:42 -07001281 if (!target_fake->is_static) {
1282 bo_fake->child_size +=
1283 ALIGN(target_bo->size, target_fake->alignment);
1284 bo_fake->child_size += target_fake->child_size;
1285 }
1286 r->target_buf = target_bo;
1287 r->offset = offset;
1288 r->last_target_offset = target_bo->offset;
1289 r->delta = target_offset;
1290 r->read_domains = read_domains;
1291 r->write_domain = write_domain;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001292
Eric Anholtd70d6052009-10-06 12:40:42 -07001293 if (bufmgr_fake->debug) {
1294 /* Check that a conflicting relocation hasn't already been
1295 * emitted.
1296 */
1297 for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
1298 struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001299
Eric Anholtd70d6052009-10-06 12:40:42 -07001300 assert(r->offset != r2->offset);
1301 }
1302 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001303
Eric Anholtd70d6052009-10-06 12:40:42 -07001304 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001305
Eric Anholtd70d6052009-10-06 12:40:42 -07001306 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001307}
1308
1309/**
1310 * Incorporates the validation flags associated with each relocation into
1311 * the combined validation flags for the buffer on this batchbuffer submission.
1312 */
1313static void
Eric Anholt4b982642008-10-30 09:33:07 -07001314drm_intel_fake_calculate_domains(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001315{
Eric Anholtd70d6052009-10-06 12:40:42 -07001316 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1317 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001318
Eric Anholtd70d6052009-10-06 12:40:42 -07001319 for (i = 0; i < bo_fake->nr_relocs; i++) {
1320 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1321 drm_intel_bo_fake *target_fake =
1322 (drm_intel_bo_fake *) r->target_buf;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001323
Eric Anholtd70d6052009-10-06 12:40:42 -07001324 /* Do the same for the tree of buffers we depend on */
1325 drm_intel_fake_calculate_domains(r->target_buf);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001326
Eric Anholtd70d6052009-10-06 12:40:42 -07001327 target_fake->read_domains |= r->read_domains;
1328 target_fake->write_domain |= r->write_domain;
1329 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001330}
1331
Eric Anholt6a9eb082008-06-03 09:27:37 -07001332static int
Eric Anholt4b982642008-10-30 09:33:07 -07001333drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001334{
Eric Anholtd70d6052009-10-06 12:40:42 -07001335 drm_intel_bufmgr_fake *bufmgr_fake =
1336 (drm_intel_bufmgr_fake *) bo->bufmgr;
1337 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1338 int i, ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001339
Eric Anholtd70d6052009-10-06 12:40:42 -07001340 assert(bo_fake->map_count == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001341
Eric Anholtd70d6052009-10-06 12:40:42 -07001342 for (i = 0; i < bo_fake->nr_relocs; i++) {
1343 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1344 drm_intel_bo_fake *target_fake =
1345 (drm_intel_bo_fake *) r->target_buf;
1346 uint32_t reloc_data;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001347
Eric Anholtd70d6052009-10-06 12:40:42 -07001348 /* Validate the target buffer if that hasn't been done. */
1349 if (!target_fake->validated) {
1350 ret =
1351 drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
1352 if (ret != 0) {
1353 if (bo->virtual != NULL)
1354 drm_intel_fake_bo_unmap_locked(bo);
1355 return ret;
1356 }
1357 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001358
Eric Anholtd70d6052009-10-06 12:40:42 -07001359 /* Calculate the value of the relocation entry. */
1360 if (r->target_buf->offset != r->last_target_offset) {
1361 reloc_data = r->target_buf->offset + r->delta;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001362
Eric Anholtd70d6052009-10-06 12:40:42 -07001363 if (bo->virtual == NULL)
1364 drm_intel_fake_bo_map_locked(bo, 1);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001365
Eric Anholtd70d6052009-10-06 12:40:42 -07001366 *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
1367 reloc_data;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001368
Eric Anholtd70d6052009-10-06 12:40:42 -07001369 r->last_target_offset = r->target_buf->offset;
1370 }
1371 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001372
Eric Anholtd70d6052009-10-06 12:40:42 -07001373 if (bo->virtual != NULL)
1374 drm_intel_fake_bo_unmap_locked(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001375
Eric Anholtd70d6052009-10-06 12:40:42 -07001376 if (bo_fake->write_domain != 0) {
1377 if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
1378 if (bo_fake->backing_store == 0)
1379 alloc_backing_store(bo);
1380 }
1381 bo_fake->card_dirty = 1;
1382 bufmgr_fake->performed_rendering = 1;
1383 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001384
Eric Anholtd70d6052009-10-06 12:40:42 -07001385 return drm_intel_fake_bo_validate(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001386}
1387
Eric Anholt6a9eb082008-06-03 09:27:37 -07001388static void
Eric Anholt4b982642008-10-30 09:33:07 -07001389drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001390{
Eric Anholtd70d6052009-10-06 12:40:42 -07001391 drm_intel_bufmgr_fake *bufmgr_fake =
1392 (drm_intel_bufmgr_fake *) bo->bufmgr;
1393 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1394 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001395
Eric Anholtd70d6052009-10-06 12:40:42 -07001396 for (i = 0; i < bo_fake->nr_relocs; i++) {
1397 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1398 drm_intel_bo_fake *target_fake =
1399 (drm_intel_bo_fake *) r->target_buf;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001400
Eric Anholtd70d6052009-10-06 12:40:42 -07001401 if (target_fake->validated)
1402 drm_intel_bo_fake_post_submit(r->target_buf);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001403
Eric Anholtd70d6052009-10-06 12:40:42 -07001404 DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
1405 bo_fake->name, (uint32_t) bo->offset, r->offset,
1406 target_fake->name, (uint32_t) r->target_buf->offset,
1407 r->delta);
1408 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001409
Eric Anholtd70d6052009-10-06 12:40:42 -07001410 assert(bo_fake->map_count == 0);
1411 bo_fake->validated = 0;
1412 bo_fake->read_domains = 0;
1413 bo_fake->write_domain = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001414}
1415
Emil Velikov0f8da822015-03-31 22:32:11 +01001416void
Eric Anholtd70d6052009-10-06 12:40:42 -07001417drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
1418 int (*exec) (drm_intel_bo *bo,
1419 unsigned int used,
1420 void *priv),
Eric Anholt4b982642008-10-30 09:33:07 -07001421 void *priv)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001422{
Eric Anholtd70d6052009-10-06 12:40:42 -07001423 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001424
Eric Anholtd70d6052009-10-06 12:40:42 -07001425 bufmgr_fake->exec = exec;
1426 bufmgr_fake->exec_priv = priv;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001427}
1428
1429static int
Eric Anholt4b982642008-10-30 09:33:07 -07001430drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07001431 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholtf9d98be2008-09-08 08:51:40 -07001432{
Eric Anholtd70d6052009-10-06 12:40:42 -07001433 drm_intel_bufmgr_fake *bufmgr_fake =
1434 (drm_intel_bufmgr_fake *) bo->bufmgr;
1435 drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
1436 struct drm_i915_batchbuffer batch;
1437 int ret;
1438 int retry_count = 0;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001439
Eric Anholtd70d6052009-10-06 12:40:42 -07001440 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001441
Eric Anholtd70d6052009-10-06 12:40:42 -07001442 bufmgr_fake->performed_rendering = 0;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001443
Eric Anholtd70d6052009-10-06 12:40:42 -07001444 drm_intel_fake_calculate_domains(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001445
Eric Anholtd70d6052009-10-06 12:40:42 -07001446 batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001447
Eric Anholtd70d6052009-10-06 12:40:42 -07001448 /* we've ran out of RAM so blow the whole lot away and retry */
1449restart:
1450 ret = drm_intel_fake_reloc_and_validate_buffer(bo);
1451 if (bufmgr_fake->fail == 1) {
1452 if (retry_count == 0) {
1453 retry_count++;
1454 drm_intel_fake_kick_all_locked(bufmgr_fake);
1455 bufmgr_fake->fail = 0;
1456 goto restart;
1457 } else /* dump out the memory here */
1458 mmDumpMemInfo(bufmgr_fake->heap);
1459 }
Eric Anholtf9d98be2008-09-08 08:51:40 -07001460
Eric Anholtd70d6052009-10-06 12:40:42 -07001461 assert(ret == 0);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001462
Eric Anholtd70d6052009-10-06 12:40:42 -07001463 if (bufmgr_fake->exec != NULL) {
Emil Velikovcf7e32b2015-08-15 15:42:34 +01001464 ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
Eric Anholtd70d6052009-10-06 12:40:42 -07001465 if (ret != 0) {
1466 pthread_mutex_unlock(&bufmgr_fake->lock);
1467 return ret;
1468 }
1469 } else {
1470 batch.start = bo->offset;
1471 batch.used = used;
1472 batch.cliprects = cliprects;
1473 batch.num_cliprects = num_cliprects;
1474 batch.DR1 = 0;
1475 batch.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001476
Eric Anholtd70d6052009-10-06 12:40:42 -07001477 if (drmCommandWrite
1478 (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
1479 sizeof(batch))) {
1480 drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
1481 pthread_mutex_unlock(&bufmgr_fake->lock);
1482 return -errno;
1483 }
1484 }
Eric Anholtf9d98be2008-09-08 08:51:40 -07001485
Eric Anholtd70d6052009-10-06 12:40:42 -07001486 drm_intel_fake_fence_validated(bo->bufmgr);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001487
Eric Anholtd70d6052009-10-06 12:40:42 -07001488 drm_intel_bo_fake_post_submit(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001489
Eric Anholtd70d6052009-10-06 12:40:42 -07001490 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001491
Eric Anholtd70d6052009-10-06 12:40:42 -07001492 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001493}
1494
Eric Anholt46e92742008-08-08 13:13:46 -07001495/**
1496 * Return an error if the list of BOs will exceed the aperture size.
1497 *
1498 * This is a rough guess and likely to fail, as during the validate sequence we
1499 * may place a buffer in an inopportune spot early on and then fail to fit
1500 * a set smaller than the aperture.
1501 */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001502static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001503drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001504{
Eric Anholtd70d6052009-10-06 12:40:42 -07001505 drm_intel_bufmgr_fake *bufmgr_fake =
1506 (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
1507 unsigned int sz = 0;
1508 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001509
Eric Anholtd70d6052009-10-06 12:40:42 -07001510 for (i = 0; i < count; i++) {
1511 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001512
Eric Anholtd70d6052009-10-06 12:40:42 -07001513 if (bo_fake == NULL)
1514 continue;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001515
Eric Anholtd70d6052009-10-06 12:40:42 -07001516 if (!bo_fake->is_static)
1517 sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
1518 sz += bo_fake->child_size;
1519 }
Eric Anholt46e92742008-08-08 13:13:46 -07001520
Eric Anholtd70d6052009-10-06 12:40:42 -07001521 if (sz > bufmgr_fake->size) {
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001522 DBG("check_space: overflowed bufmgr size, %ukb vs %lukb\n",
Eric Anholtd70d6052009-10-06 12:40:42 -07001523 sz / 1024, bufmgr_fake->size / 1024);
1524 return -1;
1525 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001526
Thierry Reding3d7a51e2014-04-08 22:18:18 +02001527 DBG("drm_check_space: sz %ukb vs bufgr %lukb\n", sz / 1024,
Eric Anholtd70d6052009-10-06 12:40:42 -07001528 bufmgr_fake->size / 1024);
1529 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001530}
1531
Eric Anholtd198e9b2008-06-05 08:44:46 -07001532/**
1533 * Evicts all buffers, waiting for fences to pass and copying contents out
1534 * as necessary.
1535 *
1536 * Used by the X Server on LeaveVT, when the card memory is no longer our
1537 * own.
1538 */
Emil Velikov0f8da822015-03-31 22:32:11 +01001539void
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001540drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
Eric Anholtd198e9b2008-06-05 08:44:46 -07001541{
Eric Anholtd70d6052009-10-06 12:40:42 -07001542 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1543 struct block *block, *tmp;
Eric Anholtd198e9b2008-06-05 08:44:46 -07001544
Eric Anholtd70d6052009-10-06 12:40:42 -07001545 pthread_mutex_lock(&bufmgr_fake->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001546
Eric Anholtd70d6052009-10-06 12:40:42 -07001547 bufmgr_fake->need_fence = 1;
1548 bufmgr_fake->fail = 0;
Eric Anholtd198e9b2008-06-05 08:44:46 -07001549
Eric Anholtd70d6052009-10-06 12:40:42 -07001550 /* Wait for hardware idle. We don't know where acceleration has been
1551 * happening, so we'll need to wait anyway before letting anything get
1552 * put on the card again.
1553 */
1554 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
Eric Anholtd198e9b2008-06-05 08:44:46 -07001555
Eric Anholtd70d6052009-10-06 12:40:42 -07001556 /* Check that we hadn't released the lock without having fenced the last
1557 * set of buffers.
1558 */
1559 assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
1560 assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
Eric Anholtd198e9b2008-06-05 08:44:46 -07001561
Eric Anholtd70d6052009-10-06 12:40:42 -07001562 DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
1563 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
1564 /* Releases the memory, and memcpys dirty contents out if
1565 * necessary.
1566 */
1567 free_block(bufmgr_fake, block, 0);
1568 bo_fake->block = NULL;
1569 }
Eric Anholt6df7b072008-06-12 23:22:26 -07001570
Eric Anholtd70d6052009-10-06 12:40:42 -07001571 pthread_mutex_unlock(&bufmgr_fake->lock);
Eric Anholtd198e9b2008-06-05 08:44:46 -07001572}
Eric Anholtd70d6052009-10-06 12:40:42 -07001573
Emil Velikov0f8da822015-03-31 22:32:11 +01001574void
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001575drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
1576 volatile unsigned int
1577 *last_dispatch)
Eric Anholt869d8be2008-09-06 03:07:41 +01001578{
Eric Anholtd70d6052009-10-06 12:40:42 -07001579 drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
Eric Anholt869d8be2008-09-06 03:07:41 +01001580
Eric Anholtd70d6052009-10-06 12:40:42 -07001581 bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
Eric Anholt869d8be2008-09-06 03:07:41 +01001582}
Eric Anholtd198e9b2008-06-05 08:44:46 -07001583
Emil Velikov0f8da822015-03-31 22:32:11 +01001584drm_intel_bufmgr *
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001585drm_intel_bufmgr_fake_init(int fd, unsigned long low_offset,
1586 void *low_virtual, unsigned long size,
1587 volatile unsigned int *last_dispatch)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001588{
Eric Anholtd70d6052009-10-06 12:40:42 -07001589 drm_intel_bufmgr_fake *bufmgr_fake;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001590
Eric Anholtd70d6052009-10-06 12:40:42 -07001591 bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
Eric Anholt6a9eb082008-06-03 09:27:37 -07001592
Eric Anholtd70d6052009-10-06 12:40:42 -07001593 if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
1594 free(bufmgr_fake);
1595 return NULL;
1596 }
Eric Anholt6df7b072008-06-12 23:22:26 -07001597
Eric Anholtd70d6052009-10-06 12:40:42 -07001598 /* Initialize allocator */
1599 DRMINITLISTHEAD(&bufmgr_fake->fenced);
1600 DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
1601 DRMINITLISTHEAD(&bufmgr_fake->lru);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001602
Eric Anholtd70d6052009-10-06 12:40:42 -07001603 bufmgr_fake->low_offset = low_offset;
1604 bufmgr_fake->virtual = low_virtual;
1605 bufmgr_fake->size = size;
1606 bufmgr_fake->heap = mmInit(low_offset, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001607
Eric Anholtd70d6052009-10-06 12:40:42 -07001608 /* Hook in methods */
1609 bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
1610 bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07001611 bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07001612 bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
1613 bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
1614 bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
1615 bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
Eric Anholtf45305c2010-11-01 06:54:58 -07001616 bufmgr_fake->bufmgr.bo_subdata = drm_intel_fake_bo_subdata;
Eric Anholtd70d6052009-10-06 12:40:42 -07001617 bufmgr_fake->bufmgr.bo_wait_rendering =
1618 drm_intel_fake_bo_wait_rendering;
1619 bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
1620 bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
1621 bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
1622 bufmgr_fake->bufmgr.check_aperture_space =
1623 drm_intel_fake_check_aperture_space;
1624 bufmgr_fake->bufmgr.debug = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001625
Eric Anholtd70d6052009-10-06 12:40:42 -07001626 bufmgr_fake->fd = fd;
1627 bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001628
Eric Anholtd70d6052009-10-06 12:40:42 -07001629 return &bufmgr_fake->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001630}