blob: e9896e7dc01869fec15628bf22db535f89015b50 [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholt6a9eb082008-06-03 09:27:37 -070041#include <xf86drm.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080042#include <fcntl.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070043#include <stdio.h>
44#include <stdlib.h>
45#include <string.h>
46#include <unistd.h>
47#include <assert.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070048#include <pthread.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070049#include <sys/ioctl.h>
50#include <sys/mman.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080051#include <sys/stat.h>
52#include <sys/types.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070053
54#include "errno.h"
Eric Anholt72abe982009-02-18 13:06:35 -080055#include "libdrm_lists.h"
Chris Wilson04495ee2009-10-02 04:39:22 +010056#include "intel_atomic.h"
Eric Anholtc4857422008-06-03 10:20:49 -070057#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010058#include "intel_bufmgr_priv.h"
Eric Anholtcbdd6272009-01-27 17:16:11 -080059#include "intel_chipset.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070060#include "string.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070061
62#include "i915_drm.h"
63
Eric Anholt6a9eb082008-06-03 09:27:37 -070064#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070065 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070067} while (0)
68
Eric Anholt4b982642008-10-30 09:33:07 -070069typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
Keith Packarda919ff52008-06-05 15:58:09 -070070
Eric Anholt4b982642008-10-30 09:33:07 -070071struct drm_intel_gem_bo_bucket {
Eric Anholtd70d6052009-10-06 12:40:42 -070072 drmMMListHead head;
73 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -070074};
75
Eric Anholt469655f2009-05-18 16:07:45 -070076/* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
Eric Anholt6a9eb082008-06-03 09:27:37 -070078 */
Eric Anholt469655f2009-05-18 16:07:45 -070079#define DRM_INTEL_GEM_BO_BUCKETS 14
Eric Anholt4b982642008-10-30 09:33:07 -070080typedef struct _drm_intel_bufmgr_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -070081 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -070082
Eric Anholtd70d6052009-10-06 12:40:42 -070083 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -070084
Eric Anholtd70d6052009-10-06 12:40:42 -070085 int max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -070086
Eric Anholtd70d6052009-10-06 12:40:42 -070087 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -070088
Eric Anholtd70d6052009-10-06 12:40:42 -070089 struct drm_i915_gem_exec_object *exec_objects;
90 drm_intel_bo **exec_bos;
91 int exec_size;
92 int exec_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -070093
Eric Anholtd70d6052009-10-06 12:40:42 -070094 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
Eric Anholt6a9eb082008-06-03 09:27:37 -070096
Eric Anholtd70d6052009-10-06 12:40:42 -070097 uint64_t gtt_size;
98 int available_fences;
99 int pci_device;
100 char bo_reuse;
Eric Anholt4b982642008-10-30 09:33:07 -0700101} drm_intel_bufmgr_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700102
Eric Anholt4b982642008-10-30 09:33:07 -0700103struct _drm_intel_bo_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -0700104 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700105
Eric Anholtd70d6052009-10-06 12:40:42 -0700106 atomic_t refcount;
107 uint32_t gem_handle;
108 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700109
Eric Anholtd70d6052009-10-06 12:40:42 -0700110 /**
111 * Kenel-assigned global name for this object
112 */
113 unsigned int global_name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700114
Eric Anholtd70d6052009-10-06 12:40:42 -0700115 /**
116 * Index of the buffer within the validation list while preparing a
117 * batchbuffer execution.
118 */
119 int validate_index;
Keith Packard18f091d2008-12-15 15:08:12 -0800120
Eric Anholtd70d6052009-10-06 12:40:42 -0700121 /**
122 * Current tiling mode
123 */
124 uint32_t tiling_mode;
125 uint32_t swizzle_mode;
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700126
Eric Anholtd70d6052009-10-06 12:40:42 -0700127 time_t free_time;
Keith Packard329e0862008-06-05 16:05:35 -0700128
Eric Anholtd70d6052009-10-06 12:40:42 -0700129 /** Array passed to the DRM containing relocation information. */
130 struct drm_i915_gem_relocation_entry *relocs;
131 /** Array of bos corresponding to relocs[i].target_handle */
132 drm_intel_bo **reloc_target_bo;
133 /** Number of entries in relocs */
134 int reloc_count;
135 /** Mapped address for the buffer, saved across map/unmap cycles */
136 void *mem_virtual;
137 /** GTT virtual address for the buffer, saved across map/unmap cycles */
138 void *gtt_virtual;
Eric Anholt0e867312008-10-21 00:10:54 -0700139
Eric Anholtd70d6052009-10-06 12:40:42 -0700140 /** BO cache list */
141 drmMMListHead head;
Eric Anholt0e867312008-10-21 00:10:54 -0700142
Eric Anholtd70d6052009-10-06 12:40:42 -0700143 /**
144 * Boolean of whether this BO and its children have been included in
145 * the current drm_intel_bufmgr_check_aperture_space() total.
146 */
147 char included_in_check_aperture;
Eric Anholt0e867312008-10-21 00:10:54 -0700148
Eric Anholtd70d6052009-10-06 12:40:42 -0700149 /**
150 * Boolean of whether this buffer has been used as a relocation
151 * target and had its size accounted for, and thus can't have any
152 * further relocations added to it.
153 */
154 char used_as_reloc_target;
Keith Packard5b5ce302009-05-11 13:42:12 -0700155
Eric Anholtd70d6052009-10-06 12:40:42 -0700156 /**
157 * Boolean of whether this buffer can be re-used
158 */
159 char reusable;
160
161 /**
162 * Size in bytes of this buffer and its relocation descendents.
163 *
164 * Used to avoid costly tree walking in
165 * drm_intel_bufmgr_check_aperture in the common case.
166 */
167 int reloc_tree_size;
168
169 /**
170 * Number of potential fence registers required by this buffer and its
171 * relocations.
172 */
173 int reloc_tree_fences;
Keith Packarda919ff52008-06-05 15:58:09 -0700174};
Eric Anholt6a9eb082008-06-03 09:27:37 -0700175
Keith Packardb13f4e12008-11-21 01:49:39 -0800176static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700177drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800178
179static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700180drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800181
Eric Anholt6a9eb082008-06-03 09:27:37 -0700182static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700183drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
184 uint32_t * swizzle_mode);
Keith Packard18f091d2008-12-15 15:08:12 -0800185
186static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700187drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
Keith Packard18f091d2008-12-15 15:08:12 -0800188 uint32_t stride);
189
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700190static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
191 time_t time);
Chris Wilson04495ee2009-10-02 04:39:22 +0100192
Eric Anholtd70d6052009-10-06 12:40:42 -0700193static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
Keith Packard18f091d2008-12-15 15:08:12 -0800194
Eric Anholtd70d6052009-10-06 12:40:42 -0700195static void drm_intel_gem_bo_free(drm_intel_bo *bo);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100196
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700197static unsigned long
198drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
199 uint32_t *tiling_mode)
200{
201 unsigned long min_size, max_size;
202 unsigned long i;
203
204 if (*tiling_mode == I915_TILING_NONE)
205 return size;
206
207 /* 965+ just need multiples of page size for tiling */
208 if (IS_I965G(bufmgr_gem))
209 return ROUND_UP_TO(size, 4096);
210
211 /* Older chips need powers of two, of at least 512k or 1M */
212 if (IS_I9XX(bufmgr_gem)) {
213 min_size = 1024*1024;
214 max_size = 128*1024*1024;
215 } else {
216 min_size = 512*1024;
217 max_size = 64*1024*1024;
218 }
219
220 if (size > max_size) {
221 *tiling_mode = I915_TILING_NONE;
222 return size;
223 }
224
225 for (i = min_size; i < size; i <<= 1)
226 ;
227
228 return i;
229}
230
231/*
232 * Round a given pitch up to the minimum required for X tiling on a
233 * given chip. We use 512 as the minimum to allow for a later tiling
234 * change.
235 */
236static unsigned long
237drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
238 unsigned long pitch, uint32_t tiling_mode)
239{
240 unsigned long tile_width = 512;
241 unsigned long i;
242
243 if (tiling_mode == I915_TILING_NONE)
244 return ROUND_UP_TO(pitch, tile_width);
245
246 /* 965 is flexible */
247 if (IS_I965G(bufmgr_gem))
248 return ROUND_UP_TO(pitch, tile_width);
249
250 /* Pre-965 needs power of two tile width */
251 for (i = tile_width; i < pitch; i <<= 1)
252 ;
253
254 return i;
255}
256
Eric Anholt4b982642008-10-30 09:33:07 -0700257static struct drm_intel_gem_bo_bucket *
258drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
259 unsigned long size)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700260{
Eric Anholtd70d6052009-10-06 12:40:42 -0700261 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700262
Eric Anholtd70d6052009-10-06 12:40:42 -0700263 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
264 struct drm_intel_gem_bo_bucket *bucket =
265 &bufmgr_gem->cache_bucket[i];
266 if (bucket->size >= size) {
267 return bucket;
268 }
Eric Anholt78fa5902009-07-06 11:55:28 -0700269 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700270
Eric Anholtd70d6052009-10-06 12:40:42 -0700271 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700272}
273
Eric Anholtd70d6052009-10-06 12:40:42 -0700274static void
275drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700276{
Eric Anholtd70d6052009-10-06 12:40:42 -0700277 int i, j;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700278
Eric Anholtd70d6052009-10-06 12:40:42 -0700279 for (i = 0; i < bufmgr_gem->exec_count; i++) {
280 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
281 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700282
Eric Anholtd70d6052009-10-06 12:40:42 -0700283 if (bo_gem->relocs == NULL) {
284 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
285 bo_gem->name);
286 continue;
287 }
288
289 for (j = 0; j < bo_gem->reloc_count; j++) {
290 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
291 drm_intel_bo_gem *target_gem =
292 (drm_intel_bo_gem *) target_bo;
293
294 DBG("%2d: %d (%s)@0x%08llx -> "
295 "%d (%s)@0x%08lx + 0x%08x\n",
296 i,
297 bo_gem->gem_handle, bo_gem->name,
298 (unsigned long long)bo_gem->relocs[j].offset,
299 target_gem->gem_handle,
300 target_gem->name,
301 target_bo->offset,
302 bo_gem->relocs[j].delta);
303 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700304 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700305}
306
Chris Wilson9fec2a82009-12-02 10:42:51 +0000307static inline void
Chris Wilson04495ee2009-10-02 04:39:22 +0100308drm_intel_gem_bo_reference(drm_intel_bo *bo)
309{
Eric Anholtd70d6052009-10-06 12:40:42 -0700310 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Chris Wilson04495ee2009-10-02 04:39:22 +0100311
Eric Anholtd70d6052009-10-06 12:40:42 -0700312 assert(atomic_read(&bo_gem->refcount) > 0);
313 atomic_inc(&bo_gem->refcount);
Chris Wilson04495ee2009-10-02 04:39:22 +0100314}
315
Eric Anholt6a9eb082008-06-03 09:27:37 -0700316/**
317 * Adds the given buffer to the list of buffers to be validated (moved into the
318 * appropriate memory type) with the next batch submission.
319 *
320 * If a buffer is validated multiple times in a batch submission, it ends up
321 * with the intersection of the memory type flags and the union of the
322 * access flags.
323 */
324static void
Eric Anholt4b982642008-10-30 09:33:07 -0700325drm_intel_add_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700326{
Eric Anholtd70d6052009-10-06 12:40:42 -0700327 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
328 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
329 int index;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700330
Eric Anholtd70d6052009-10-06 12:40:42 -0700331 if (bo_gem->validate_index != -1)
332 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700333
Eric Anholtd70d6052009-10-06 12:40:42 -0700334 /* Extend the array of validation entries as necessary. */
335 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
336 int new_size = bufmgr_gem->exec_size * 2;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700337
Eric Anholtd70d6052009-10-06 12:40:42 -0700338 if (new_size == 0)
339 new_size = 5;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700340
Eric Anholtd70d6052009-10-06 12:40:42 -0700341 bufmgr_gem->exec_objects =
342 realloc(bufmgr_gem->exec_objects,
343 sizeof(*bufmgr_gem->exec_objects) * new_size);
344 bufmgr_gem->exec_bos =
345 realloc(bufmgr_gem->exec_bos,
346 sizeof(*bufmgr_gem->exec_bos) * new_size);
347 bufmgr_gem->exec_size = new_size;
348 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700349
Eric Anholtd70d6052009-10-06 12:40:42 -0700350 index = bufmgr_gem->exec_count;
351 bo_gem->validate_index = index;
352 /* Fill in array entry */
353 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
354 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
355 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
356 bufmgr_gem->exec_objects[index].alignment = 0;
357 bufmgr_gem->exec_objects[index].offset = 0;
358 bufmgr_gem->exec_bos[index] = bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700359 bufmgr_gem->exec_count++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700360}
361
Eric Anholt6a9eb082008-06-03 09:27:37 -0700362#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
363 sizeof(uint32_t))
364
Chris Wilsone22fb792009-11-30 22:14:30 +0000365static void
366drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
367 drm_intel_bo_gem *bo_gem)
368{
369 int size;
370
371 assert(!bo_gem->used_as_reloc_target);
372
373 /* The older chipsets are far-less flexible in terms of tiling,
374 * and require tiled buffer to be size aligned in the aperture.
375 * This means that in the worst possible case we will need a hole
376 * twice as large as the object in order for it to fit into the
377 * aperture. Optimal packing is for wimps.
378 */
379 size = bo_gem->bo.size;
380 if (!IS_I965G(bufmgr_gem) && bo_gem->tiling_mode != I915_TILING_NONE)
381 size *= 2;
382
383 bo_gem->reloc_tree_size = size;
384}
385
Eric Anholt6a9eb082008-06-03 09:27:37 -0700386static int
Eric Anholt4b982642008-10-30 09:33:07 -0700387drm_intel_setup_reloc_list(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700388{
Eric Anholtd70d6052009-10-06 12:40:42 -0700389 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
390 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
391 unsigned int max_relocs = bufmgr_gem->max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700392
Eric Anholtd70d6052009-10-06 12:40:42 -0700393 if (bo->size / 4 < max_relocs)
394 max_relocs = bo->size / 4;
Eric Anholt3c9bd062009-10-05 16:35:32 -0700395
Eric Anholtd70d6052009-10-06 12:40:42 -0700396 bo_gem->relocs = malloc(max_relocs *
397 sizeof(struct drm_i915_gem_relocation_entry));
398 bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
Eric Anholt6a9eb082008-06-03 09:27:37 -0700399
Eric Anholtd70d6052009-10-06 12:40:42 -0700400 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700401}
402
Eric Anholt8214a652009-08-27 18:32:07 -0700403static int
404drm_intel_gem_bo_busy(drm_intel_bo *bo)
405{
Eric Anholtd70d6052009-10-06 12:40:42 -0700406 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
407 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
408 struct drm_i915_gem_busy busy;
409 int ret;
Eric Anholt8214a652009-08-27 18:32:07 -0700410
Eric Anholtd70d6052009-10-06 12:40:42 -0700411 memset(&busy, 0, sizeof(busy));
412 busy.handle = bo_gem->gem_handle;
Eric Anholt8214a652009-08-27 18:32:07 -0700413
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000414 do {
415 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
416 } while (ret == -1 && errno == EINTR);
Eric Anholt8214a652009-08-27 18:32:07 -0700417
Eric Anholtd70d6052009-10-06 12:40:42 -0700418 return (ret == 0 && busy.busy);
Eric Anholt8214a652009-08-27 18:32:07 -0700419}
420
Chris Wilson0fb215a2009-10-02 04:31:34 +0100421static int
Chris Wilson83a35b62009-11-11 13:04:38 +0000422drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
423 drm_intel_bo_gem *bo_gem, int state)
Chris Wilson0fb215a2009-10-02 04:31:34 +0100424{
Eric Anholtd70d6052009-10-06 12:40:42 -0700425 struct drm_i915_gem_madvise madv;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100426
Eric Anholtd70d6052009-10-06 12:40:42 -0700427 madv.handle = bo_gem->gem_handle;
428 madv.madv = state;
429 madv.retained = 1;
430 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100431
Eric Anholtd70d6052009-10-06 12:40:42 -0700432 return madv.retained;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100433}
434
Chris Wilson83a35b62009-11-11 13:04:38 +0000435static int
436drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
437{
438 return drm_intel_gem_bo_madvise_internal
439 ((drm_intel_bufmgr_gem *) bo->bufmgr,
440 (drm_intel_bo_gem *) bo,
441 madv);
442}
443
Chris Wilson0fb215a2009-10-02 04:31:34 +0100444/* drop the oldest entries that have been purged by the kernel */
445static void
446drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
447 struct drm_intel_gem_bo_bucket *bucket)
448{
Eric Anholtd70d6052009-10-06 12:40:42 -0700449 while (!DRMLISTEMPTY(&bucket->head)) {
450 drm_intel_bo_gem *bo_gem;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100451
Eric Anholtd70d6052009-10-06 12:40:42 -0700452 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
453 bucket->head.next, head);
Chris Wilson83a35b62009-11-11 13:04:38 +0000454 if (drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700455 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
456 break;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100457
Eric Anholtd70d6052009-10-06 12:40:42 -0700458 DRMLISTDEL(&bo_gem->head);
459 drm_intel_gem_bo_free(&bo_gem->bo);
460 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100461}
462
Eric Anholt4b982642008-10-30 09:33:07 -0700463static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700464drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
465 const char *name,
466 unsigned long size,
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700467 unsigned long flags)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700468{
Eric Anholtd70d6052009-10-06 12:40:42 -0700469 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
470 drm_intel_bo_gem *bo_gem;
471 unsigned int page_size = getpagesize();
472 int ret;
473 struct drm_intel_gem_bo_bucket *bucket;
474 int alloc_from_cache;
475 unsigned long bo_size;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700476 int for_render = 0;
477
478 if (flags & BO_ALLOC_FOR_RENDER)
479 for_render = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700480
Eric Anholtd70d6052009-10-06 12:40:42 -0700481 /* Round the allocated size up to a power of two number of pages. */
482 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700483
Eric Anholtd70d6052009-10-06 12:40:42 -0700484 /* If we don't have caching at this size, don't actually round the
485 * allocation up.
486 */
487 if (bucket == NULL) {
488 bo_size = size;
489 if (bo_size < page_size)
490 bo_size = page_size;
Eric Anholt72abe982009-02-18 13:06:35 -0800491 } else {
Eric Anholtd70d6052009-10-06 12:40:42 -0700492 bo_size = bucket->size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700493 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100494
Eric Anholtd70d6052009-10-06 12:40:42 -0700495 pthread_mutex_lock(&bufmgr_gem->lock);
496 /* Get a buffer out of the cache if available */
497retry:
498 alloc_from_cache = 0;
499 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
500 if (for_render) {
501 /* Allocate new render-target BOs from the tail (MRU)
502 * of the list, as it will likely be hot in the GPU
503 * cache and in the aperture for us.
504 */
505 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
506 bucket->head.prev, head);
507 DRMLISTDEL(&bo_gem->head);
508 alloc_from_cache = 1;
509 } else {
510 /* For non-render-target BOs (where we're probably
511 * going to map it first thing in order to fill it
512 * with data), check if the last BO in the cache is
513 * unbusy, and only reuse in that case. Otherwise,
514 * allocating a new buffer is probably faster than
515 * waiting for the GPU to finish.
516 */
517 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
518 bucket->head.next, head);
519 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
520 alloc_from_cache = 1;
521 DRMLISTDEL(&bo_gem->head);
522 }
523 }
524
525 if (alloc_from_cache) {
Chris Wilson83a35b62009-11-11 13:04:38 +0000526 if (!drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700527 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
528 drm_intel_gem_bo_free(&bo_gem->bo);
529 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
530 bucket);
531 goto retry;
532 }
533 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100534 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700535 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700536
Eric Anholtd70d6052009-10-06 12:40:42 -0700537 if (!alloc_from_cache) {
538 struct drm_i915_gem_create create;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700539
Eric Anholtd70d6052009-10-06 12:40:42 -0700540 bo_gem = calloc(1, sizeof(*bo_gem));
541 if (!bo_gem)
542 return NULL;
Keith Packarda919ff52008-06-05 15:58:09 -0700543
Eric Anholtd70d6052009-10-06 12:40:42 -0700544 bo_gem->bo.size = bo_size;
545 memset(&create, 0, sizeof(create));
546 create.size = bo_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700547
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000548 do {
549 ret = ioctl(bufmgr_gem->fd,
550 DRM_IOCTL_I915_GEM_CREATE,
551 &create);
552 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700553 bo_gem->gem_handle = create.handle;
554 bo_gem->bo.handle = bo_gem->gem_handle;
555 if (ret != 0) {
556 free(bo_gem);
557 return NULL;
558 }
559 bo_gem->bo.bufmgr = bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700560 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700561
Eric Anholtd70d6052009-10-06 12:40:42 -0700562 bo_gem->name = name;
563 atomic_set(&bo_gem->refcount, 1);
564 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700565 bo_gem->reloc_tree_fences = 0;
566 bo_gem->used_as_reloc_target = 0;
567 bo_gem->tiling_mode = I915_TILING_NONE;
568 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
569 bo_gem->reusable = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700570
Chris Wilsone22fb792009-11-30 22:14:30 +0000571 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
572
Eric Anholtd70d6052009-10-06 12:40:42 -0700573 DBG("bo_create: buf %d (%s) %ldb\n",
574 bo_gem->gem_handle, bo_gem->name, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700575
Eric Anholtd70d6052009-10-06 12:40:42 -0700576 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700577}
578
Eric Anholt72abe982009-02-18 13:06:35 -0800579static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700580drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
581 const char *name,
582 unsigned long size,
583 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800584{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700585 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
586 BO_ALLOC_FOR_RENDER);
Eric Anholt72abe982009-02-18 13:06:35 -0800587}
588
589static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700590drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
591 const char *name,
592 unsigned long size,
593 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800594{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700595 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
596}
597
598static drm_intel_bo *
599drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
600 int x, int y, int cpp, uint32_t *tiling_mode,
601 unsigned long *pitch, unsigned long flags)
602{
603 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
604 drm_intel_bo *bo;
605 unsigned long size, stride, aligned_y = y;
606 int ret;
607
608 if (*tiling_mode == I915_TILING_NONE)
609 aligned_y = ALIGN(y, 2);
610 else if (*tiling_mode == I915_TILING_X)
611 aligned_y = ALIGN(y, 8);
612 else if (*tiling_mode == I915_TILING_Y)
613 aligned_y = ALIGN(y, 32);
614
615 stride = x * cpp;
616 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
617 size = stride * aligned_y;
618 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
619
620 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
621 if (!bo)
622 return NULL;
623
624 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
625 if (ret != 0) {
626 drm_intel_gem_bo_unreference(bo);
627 return NULL;
628 }
629
630 *pitch = stride;
631
632 return bo;
Eric Anholt72abe982009-02-18 13:06:35 -0800633}
634
Eric Anholt6a9eb082008-06-03 09:27:37 -0700635/**
Eric Anholt4b982642008-10-30 09:33:07 -0700636 * Returns a drm_intel_bo wrapping the given buffer object handle.
Eric Anholt6a9eb082008-06-03 09:27:37 -0700637 *
638 * This can be used when one application needs to pass a buffer object
639 * to another.
640 */
Eric Anholt4b982642008-10-30 09:33:07 -0700641drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700642drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
643 const char *name,
Eric Anholt4b982642008-10-30 09:33:07 -0700644 unsigned int handle)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700645{
Eric Anholtd70d6052009-10-06 12:40:42 -0700646 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
647 drm_intel_bo_gem *bo_gem;
648 int ret;
649 struct drm_gem_open open_arg;
650 struct drm_i915_gem_get_tiling get_tiling;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700651
Eric Anholtd70d6052009-10-06 12:40:42 -0700652 bo_gem = calloc(1, sizeof(*bo_gem));
653 if (!bo_gem)
654 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700655
Eric Anholtd70d6052009-10-06 12:40:42 -0700656 memset(&open_arg, 0, sizeof(open_arg));
657 open_arg.name = handle;
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000658 do {
659 ret = ioctl(bufmgr_gem->fd,
660 DRM_IOCTL_GEM_OPEN,
661 &open_arg);
662 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700663 if (ret != 0) {
664 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
665 name, handle, strerror(errno));
666 free(bo_gem);
667 return NULL;
668 }
669 bo_gem->bo.size = open_arg.size;
670 bo_gem->bo.offset = 0;
671 bo_gem->bo.virtual = NULL;
672 bo_gem->bo.bufmgr = bufmgr;
673 bo_gem->name = name;
674 atomic_set(&bo_gem->refcount, 1);
675 bo_gem->validate_index = -1;
676 bo_gem->gem_handle = open_arg.handle;
677 bo_gem->global_name = handle;
678 bo_gem->reusable = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700679
Eric Anholtd70d6052009-10-06 12:40:42 -0700680 memset(&get_tiling, 0, sizeof(get_tiling));
681 get_tiling.handle = bo_gem->gem_handle;
682 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
683 if (ret != 0) {
684 drm_intel_gem_bo_unreference(&bo_gem->bo);
685 return NULL;
686 }
687 bo_gem->tiling_mode = get_tiling.tiling_mode;
688 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
689 if (bo_gem->tiling_mode == I915_TILING_NONE)
690 bo_gem->reloc_tree_fences = 0;
691 else
692 bo_gem->reloc_tree_fences = 1;
Chris Wilsone22fb792009-11-30 22:14:30 +0000693 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
Keith Packard18f091d2008-12-15 15:08:12 -0800694
Eric Anholtd70d6052009-10-06 12:40:42 -0700695 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700696
Eric Anholtd70d6052009-10-06 12:40:42 -0700697 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700698}
699
700static void
Eric Anholt4b982642008-10-30 09:33:07 -0700701drm_intel_gem_bo_free(drm_intel_bo *bo)
Eric Anholt500c81d2008-06-06 17:13:16 -0700702{
Eric Anholtd70d6052009-10-06 12:40:42 -0700703 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
704 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
705 struct drm_gem_close close;
706 int ret;
Eric Anholt500c81d2008-06-06 17:13:16 -0700707
Eric Anholtd70d6052009-10-06 12:40:42 -0700708 if (bo_gem->mem_virtual)
709 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
710 if (bo_gem->gtt_virtual)
711 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
Eric Anholt500c81d2008-06-06 17:13:16 -0700712
Eric Anholtd70d6052009-10-06 12:40:42 -0700713 free(bo_gem->reloc_target_bo);
714 free(bo_gem->relocs);
Eric Anholt12d9b7c2009-10-02 11:11:31 -0700715
Eric Anholtd70d6052009-10-06 12:40:42 -0700716 /* Close this object */
717 memset(&close, 0, sizeof(close));
718 close.handle = bo_gem->gem_handle;
719 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
720 if (ret != 0) {
721 fprintf(stderr,
722 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
723 bo_gem->gem_handle, bo_gem->name, strerror(errno));
724 }
725 free(bo);
Eric Anholt500c81d2008-06-06 17:13:16 -0700726}
727
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700728/** Frees all cached buffers significantly older than @time. */
729static void
730drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
731{
Chris Wilson04495ee2009-10-02 04:39:22 +0100732 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700733
Eric Anholtd70d6052009-10-06 12:40:42 -0700734 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
735 struct drm_intel_gem_bo_bucket *bucket =
736 &bufmgr_gem->cache_bucket[i];
Chris Wilson04495ee2009-10-02 04:39:22 +0100737
Eric Anholtd70d6052009-10-06 12:40:42 -0700738 while (!DRMLISTEMPTY(&bucket->head)) {
739 drm_intel_bo_gem *bo_gem;
Chris Wilson04495ee2009-10-02 04:39:22 +0100740
Eric Anholtd70d6052009-10-06 12:40:42 -0700741 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
742 bucket->head.next, head);
743 if (time - bo_gem->free_time <= 1)
744 break;
Chris Wilson04495ee2009-10-02 04:39:22 +0100745
Eric Anholtd70d6052009-10-06 12:40:42 -0700746 DRMLISTDEL(&bo_gem->head);
Chris Wilson04495ee2009-10-02 04:39:22 +0100747
Eric Anholtd70d6052009-10-06 12:40:42 -0700748 drm_intel_gem_bo_free(&bo_gem->bo);
749 }
750 }
Chris Wilson04495ee2009-10-02 04:39:22 +0100751}
752
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700753static void
754drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
Chris Wilson04495ee2009-10-02 04:39:22 +0100755{
Eric Anholtd70d6052009-10-06 12:40:42 -0700756 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
757 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
758 struct drm_intel_gem_bo_bucket *bucket;
759 uint32_t tiling_mode;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700760 int i;
Chris Wilson04495ee2009-10-02 04:39:22 +0100761
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700762 /* Unreference all the target buffers */
763 for (i = 0; i < bo_gem->reloc_count; i++) {
764 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
765 reloc_target_bo[i],
766 time);
Eric Anholtd70d6052009-10-06 12:40:42 -0700767 }
Chris Wilsonb666f412009-11-30 23:07:19 +0000768 bo_gem->reloc_count = 0;
769 bo_gem->used_as_reloc_target = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -0700770
771 DBG("bo_unreference final: %d (%s)\n",
772 bo_gem->gem_handle, bo_gem->name);
773
774 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
775 /* Put the buffer into our internal cache for reuse if we can. */
776 tiling_mode = I915_TILING_NONE;
777 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
Chris Wilson60aa8032009-11-30 20:02:05 +0000778 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
779 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
780 I915_MADV_DONTNEED)) {
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700781 bo_gem->free_time = time;
Eric Anholtd70d6052009-10-06 12:40:42 -0700782
783 bo_gem->name = NULL;
784 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700785
786 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
787
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700788 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
Eric Anholtd70d6052009-10-06 12:40:42 -0700789 } else {
790 drm_intel_gem_bo_free(bo);
791 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700792}
793
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700794static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
795 time_t time)
796{
797 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
798
799 assert(atomic_read(&bo_gem->refcount) > 0);
Eric Anholtd70d6052009-10-06 12:40:42 -0700800 if (atomic_dec_and_test(&bo_gem->refcount))
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700801 drm_intel_gem_bo_unreference_final(bo, time);
Eric Anholtd70d6052009-10-06 12:40:42 -0700802}
803
804static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
805{
806 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
807
808 assert(atomic_read(&bo_gem->refcount) > 0);
809 if (atomic_dec_and_test(&bo_gem->refcount)) {
810 drm_intel_bufmgr_gem *bufmgr_gem =
811 (drm_intel_bufmgr_gem *) bo->bufmgr;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700812 struct timespec time;
813
814 clock_gettime(CLOCK_MONOTONIC, &time);
815
Eric Anholtd70d6052009-10-06 12:40:42 -0700816 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700817 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
Eric Anholtd70d6052009-10-06 12:40:42 -0700818 pthread_mutex_unlock(&bufmgr_gem->lock);
819 }
820}
821
822static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
823{
824 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
825 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
826 struct drm_i915_gem_set_domain set_domain;
827 int ret;
828
Chris Wilson04495ee2009-10-02 04:39:22 +0100829 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700830
Eric Anholtd70d6052009-10-06 12:40:42 -0700831 /* Allow recursive mapping. Mesa may recursively map buffers with
832 * nested display loops.
Carl Worthafd245d2009-04-29 14:43:55 -0700833 */
Eric Anholtd70d6052009-10-06 12:40:42 -0700834 if (!bo_gem->mem_virtual) {
835 struct drm_i915_gem_mmap mmap_arg;
Carl Worthafd245d2009-04-29 14:43:55 -0700836
Eric Anholtd70d6052009-10-06 12:40:42 -0700837 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
838
839 memset(&mmap_arg, 0, sizeof(mmap_arg));
840 mmap_arg.handle = bo_gem->gem_handle;
841 mmap_arg.offset = 0;
842 mmap_arg.size = bo->size;
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000843 do {
844 ret = ioctl(bufmgr_gem->fd,
845 DRM_IOCTL_I915_GEM_MMAP,
846 &mmap_arg);
847 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700848 if (ret != 0) {
849 fprintf(stderr,
850 "%s:%d: Error mapping buffer %d (%s): %s .\n",
851 __FILE__, __LINE__, bo_gem->gem_handle,
852 bo_gem->name, strerror(errno));
853 pthread_mutex_unlock(&bufmgr_gem->lock);
854 return ret;
855 }
856 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
857 }
858 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
859 bo_gem->mem_virtual);
860 bo->virtual = bo_gem->mem_virtual;
861
862 set_domain.handle = bo_gem->gem_handle;
863 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
864 if (write_enable)
865 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
866 else
867 set_domain.write_domain = 0;
868 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000869 ret = ioctl(bufmgr_gem->fd,
870 DRM_IOCTL_I915_GEM_SET_DOMAIN,
Eric Anholtd70d6052009-10-06 12:40:42 -0700871 &set_domain);
872 } while (ret == -1 && errno == EINTR);
873 if (ret != 0) {
874 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
875 __FILE__, __LINE__, bo_gem->gem_handle,
876 strerror(errno));
877 pthread_mutex_unlock(&bufmgr_gem->lock);
878 return ret;
879 }
880
881 pthread_mutex_unlock(&bufmgr_gem->lock);
882
883 return 0;
884}
885
886int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
887{
888 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
889 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
890 struct drm_i915_gem_set_domain set_domain;
891 int ret;
892
893 pthread_mutex_lock(&bufmgr_gem->lock);
894
895 /* Get a mapping of the buffer if we haven't before. */
896 if (bo_gem->gtt_virtual == NULL) {
897 struct drm_i915_gem_mmap_gtt mmap_arg;
898
899 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
900 bo_gem->name);
901
902 memset(&mmap_arg, 0, sizeof(mmap_arg));
903 mmap_arg.handle = bo_gem->gem_handle;
904
905 /* Get the fake offset back... */
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000906 do {
907 ret = ioctl(bufmgr_gem->fd,
908 DRM_IOCTL_I915_GEM_MMAP_GTT,
909 &mmap_arg);
910 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700911 if (ret != 0) {
912 fprintf(stderr,
913 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
914 __FILE__, __LINE__,
915 bo_gem->gem_handle, bo_gem->name,
916 strerror(errno));
917 pthread_mutex_unlock(&bufmgr_gem->lock);
918 return ret;
919 }
920
921 /* and mmap it */
922 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
923 MAP_SHARED, bufmgr_gem->fd,
924 mmap_arg.offset);
925 if (bo_gem->gtt_virtual == MAP_FAILED) {
926 fprintf(stderr,
927 "%s:%d: Error mapping buffer %d (%s): %s .\n",
928 __FILE__, __LINE__,
929 bo_gem->gem_handle, bo_gem->name,
930 strerror(errno));
931 pthread_mutex_unlock(&bufmgr_gem->lock);
932 return errno;
933 }
934 }
935
936 bo->virtual = bo_gem->gtt_virtual;
937
938 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
939 bo_gem->gtt_virtual);
940
941 /* Now move it to the GTT domain so that the CPU caches are flushed */
942 set_domain.handle = bo_gem->gem_handle;
943 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
944 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
945 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000946 ret = ioctl(bufmgr_gem->fd,
947 DRM_IOCTL_I915_GEM_SET_DOMAIN,
Eric Anholtd70d6052009-10-06 12:40:42 -0700948 &set_domain);
949 } while (ret == -1 && errno == EINTR);
950
951 if (ret != 0) {
952 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
953 __FILE__, __LINE__, bo_gem->gem_handle,
954 strerror(errno));
955 }
956
957 pthread_mutex_unlock(&bufmgr_gem->lock);
958
Chris Wilson60aa8032009-11-30 20:02:05 +0000959 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -0700960}
961
962int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
963{
964 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
965 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
966 int ret = 0;
967
968 if (bo == NULL)
969 return 0;
970
971 assert(bo_gem->gtt_virtual != NULL);
972
973 pthread_mutex_lock(&bufmgr_gem->lock);
974 bo->virtual = NULL;
975 pthread_mutex_unlock(&bufmgr_gem->lock);
976
977 return ret;
978}
979
980static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
981{
982 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
983 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
984 struct drm_i915_gem_sw_finish sw_finish;
985 int ret;
986
987 if (bo == NULL)
988 return 0;
989
990 assert(bo_gem->mem_virtual != NULL);
991
992 pthread_mutex_lock(&bufmgr_gem->lock);
993
994 /* Cause a flush to happen if the buffer's pinned for scanout, so the
995 * results show up in a timely manner.
996 */
997 sw_finish.handle = bo_gem->gem_handle;
998 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000999 ret = ioctl(bufmgr_gem->fd,
1000 DRM_IOCTL_I915_GEM_SW_FINISH,
Eric Anholtd70d6052009-10-06 12:40:42 -07001001 &sw_finish);
1002 } while (ret == -1 && errno == EINTR);
1003
1004 bo->virtual = NULL;
1005 pthread_mutex_unlock(&bufmgr_gem->lock);
1006 return 0;
Carl Worthafd245d2009-04-29 14:43:55 -07001007}
1008
Eric Anholt6a9eb082008-06-03 09:27:37 -07001009static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001010drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1011 unsigned long size, const void *data)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001012{
Eric Anholtd70d6052009-10-06 12:40:42 -07001013 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1014 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1015 struct drm_i915_gem_pwrite pwrite;
1016 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001017
Eric Anholtd70d6052009-10-06 12:40:42 -07001018 memset(&pwrite, 0, sizeof(pwrite));
1019 pwrite.handle = bo_gem->gem_handle;
1020 pwrite.offset = offset;
1021 pwrite.size = size;
1022 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1023 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001024 ret = ioctl(bufmgr_gem->fd,
1025 DRM_IOCTL_I915_GEM_PWRITE,
1026 &pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001027 } while (ret == -1 && errno == EINTR);
1028 if (ret != 0) {
1029 fprintf(stderr,
1030 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1031 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1032 (int)size, strerror(errno));
1033 }
1034 return 0;
1035}
1036
1037static int
1038drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1039{
1040 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1041 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1042 int ret;
1043
1044 get_pipe_from_crtc_id.crtc_id = crtc_id;
1045 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1046 &get_pipe_from_crtc_id);
1047 if (ret != 0) {
1048 /* We return -1 here to signal that we don't
1049 * know which pipe is associated with this crtc.
1050 * This lets the caller know that this information
1051 * isn't available; using the wrong pipe for
1052 * vblank waiting can cause the chipset to lock up
1053 */
1054 return -1;
1055 }
1056
1057 return get_pipe_from_crtc_id.pipe;
1058}
1059
1060static int
1061drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1062 unsigned long size, void *data)
1063{
1064 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1065 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1066 struct drm_i915_gem_pread pread;
1067 int ret;
1068
1069 memset(&pread, 0, sizeof(pread));
1070 pread.handle = bo_gem->gem_handle;
1071 pread.offset = offset;
1072 pread.size = size;
1073 pread.data_ptr = (uint64_t) (uintptr_t) data;
1074 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001075 ret = ioctl(bufmgr_gem->fd,
1076 DRM_IOCTL_I915_GEM_PREAD,
1077 &pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001078 } while (ret == -1 && errno == EINTR);
1079 if (ret != 0) {
1080 fprintf(stderr,
1081 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1082 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1083 (int)size, strerror(errno));
1084 }
1085 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001086}
1087
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001088/** Waits for all GPU rendering to the object to have completed. */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001089static void
Eric Anholt4b982642008-10-30 09:33:07 -07001090drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001091{
Eric Anholtd70d6052009-10-06 12:40:42 -07001092 drm_intel_gem_bo_start_gtt_access(bo, 0);
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001093}
1094
1095/**
1096 * Sets the object to the GTT read and possibly write domain, used by the X
1097 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1098 *
1099 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1100 * can do tiled pixmaps this way.
1101 */
1102void
1103drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1104{
Eric Anholtd70d6052009-10-06 12:40:42 -07001105 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1106 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1107 struct drm_i915_gem_set_domain set_domain;
1108 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001109
Eric Anholtd70d6052009-10-06 12:40:42 -07001110 set_domain.handle = bo_gem->gem_handle;
1111 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1112 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1113 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001114 ret = ioctl(bufmgr_gem->fd,
1115 DRM_IOCTL_I915_GEM_SET_DOMAIN,
Eric Anholtd70d6052009-10-06 12:40:42 -07001116 &set_domain);
1117 } while (ret == -1 && errno == EINTR);
1118 if (ret != 0) {
1119 fprintf(stderr,
1120 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1121 __FILE__, __LINE__, bo_gem->gem_handle,
1122 set_domain.read_domains, set_domain.write_domain,
1123 strerror(errno));
1124 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001125}
1126
1127static void
Eric Anholt4b982642008-10-30 09:33:07 -07001128drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001129{
Eric Anholtd70d6052009-10-06 12:40:42 -07001130 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1131 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001132
Eric Anholtd70d6052009-10-06 12:40:42 -07001133 free(bufmgr_gem->exec_objects);
1134 free(bufmgr_gem->exec_bos);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001135
Eric Anholtd70d6052009-10-06 12:40:42 -07001136 pthread_mutex_destroy(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001137
Eric Anholtd70d6052009-10-06 12:40:42 -07001138 /* Free any cached buffer objects we were going to reuse */
1139 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1140 struct drm_intel_gem_bo_bucket *bucket =
1141 &bufmgr_gem->cache_bucket[i];
1142 drm_intel_bo_gem *bo_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001143
Eric Anholtd70d6052009-10-06 12:40:42 -07001144 while (!DRMLISTEMPTY(&bucket->head)) {
1145 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1146 bucket->head.next, head);
1147 DRMLISTDEL(&bo_gem->head);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001148
Eric Anholtd70d6052009-10-06 12:40:42 -07001149 drm_intel_gem_bo_free(&bo_gem->bo);
1150 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001151 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001152
Eric Anholtd70d6052009-10-06 12:40:42 -07001153 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001154}
1155
1156/**
1157 * Adds the target buffer to the validation list and adds the relocation
1158 * to the reloc_buffer's relocation list.
1159 *
1160 * The relocation entry at the given offset must already contain the
1161 * precomputed relocation value, because the kernel will optimize out
1162 * the relocation entry write when the buffer hasn't moved from the
1163 * last known offset in target_bo.
1164 */
1165static int
Eric Anholt4b982642008-10-30 09:33:07 -07001166drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1167 drm_intel_bo *target_bo, uint32_t target_offset,
1168 uint32_t read_domains, uint32_t write_domain)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001169{
Eric Anholtd70d6052009-10-06 12:40:42 -07001170 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1171 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1172 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001173
Eric Anholtd70d6052009-10-06 12:40:42 -07001174 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001175
Eric Anholtd70d6052009-10-06 12:40:42 -07001176 /* Create a new relocation list if needed */
1177 if (bo_gem->relocs == NULL)
1178 drm_intel_setup_reloc_list(bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001179
Eric Anholtd70d6052009-10-06 12:40:42 -07001180 /* Check overflow */
1181 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001182
Eric Anholtd70d6052009-10-06 12:40:42 -07001183 /* Check args */
1184 assert(offset <= bo->size - 4);
1185 assert((write_domain & (write_domain - 1)) == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001186
Eric Anholtd70d6052009-10-06 12:40:42 -07001187 /* Make sure that we're not adding a reloc to something whose size has
1188 * already been accounted for.
1189 */
1190 assert(!bo_gem->used_as_reloc_target);
1191 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1192 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
Eric Anholt0e867312008-10-21 00:10:54 -07001193
Eric Anholtd70d6052009-10-06 12:40:42 -07001194 /* Flag the target to disallow further relocations in it. */
1195 target_bo_gem->used_as_reloc_target = 1;
Eric Anholt0e867312008-10-21 00:10:54 -07001196
Eric Anholtd70d6052009-10-06 12:40:42 -07001197 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1198 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1199 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1200 target_bo_gem->gem_handle;
1201 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1202 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1203 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001204
Eric Anholtd70d6052009-10-06 12:40:42 -07001205 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
1206 drm_intel_gem_bo_reference(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001207
Eric Anholtd70d6052009-10-06 12:40:42 -07001208 bo_gem->reloc_count++;
Eric Anholt6df7b072008-06-12 23:22:26 -07001209
Eric Anholtd70d6052009-10-06 12:40:42 -07001210 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001211
Eric Anholtd70d6052009-10-06 12:40:42 -07001212 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001213}
1214
1215/**
1216 * Walk the tree of relocations rooted at BO and accumulate the list of
1217 * validations to be performed and update the relocation buffers with
1218 * index values into the validation list.
1219 */
1220static void
Eric Anholt4b982642008-10-30 09:33:07 -07001221drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001222{
Eric Anholtd70d6052009-10-06 12:40:42 -07001223 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1224 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001225
Eric Anholtd70d6052009-10-06 12:40:42 -07001226 if (bo_gem->relocs == NULL)
1227 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001228
Eric Anholtd70d6052009-10-06 12:40:42 -07001229 for (i = 0; i < bo_gem->reloc_count; i++) {
1230 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001231
Eric Anholtd70d6052009-10-06 12:40:42 -07001232 /* Continue walking the tree depth-first. */
1233 drm_intel_gem_bo_process_reloc(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001234
Eric Anholtd70d6052009-10-06 12:40:42 -07001235 /* Add the target to the validate list */
1236 drm_intel_add_validate_buffer(target_bo);
1237 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001238}
1239
Eric Anholt6a9eb082008-06-03 09:27:37 -07001240static void
Eric Anholtd70d6052009-10-06 12:40:42 -07001241drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001242{
Eric Anholtd70d6052009-10-06 12:40:42 -07001243 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001244
Eric Anholtd70d6052009-10-06 12:40:42 -07001245 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1246 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1247 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001248
Eric Anholtd70d6052009-10-06 12:40:42 -07001249 /* Update the buffer offset */
1250 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1251 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1252 bo_gem->gem_handle, bo_gem->name, bo->offset,
1253 (unsigned long long)bufmgr_gem->exec_objects[i].
1254 offset);
1255 bo->offset = bufmgr_gem->exec_objects[i].offset;
1256 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001257 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001258}
1259
Eric Anholtf9d98be2008-09-08 08:51:40 -07001260static int
Eric Anholt4b982642008-10-30 09:33:07 -07001261drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07001262 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001263{
Eric Anholtd70d6052009-10-06 12:40:42 -07001264 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1265 struct drm_i915_gem_execbuffer execbuf;
1266 int ret, i;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001267
Eric Anholtd70d6052009-10-06 12:40:42 -07001268 pthread_mutex_lock(&bufmgr_gem->lock);
1269 /* Update indices and set up the validate list. */
1270 drm_intel_gem_bo_process_reloc(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001271
Eric Anholtd70d6052009-10-06 12:40:42 -07001272 /* Add the batch buffer to the validation list. There are no
1273 * relocations pointing to it.
1274 */
1275 drm_intel_add_validate_buffer(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001276
Eric Anholtd70d6052009-10-06 12:40:42 -07001277 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1278 execbuf.buffer_count = bufmgr_gem->exec_count;
1279 execbuf.batch_start_offset = 0;
1280 execbuf.batch_len = used;
1281 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1282 execbuf.num_cliprects = num_cliprects;
1283 execbuf.DR1 = 0;
1284 execbuf.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001285
Eric Anholtd70d6052009-10-06 12:40:42 -07001286 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001287 ret = ioctl(bufmgr_gem->fd,
1288 DRM_IOCTL_I915_GEM_EXECBUFFER,
Eric Anholtd70d6052009-10-06 12:40:42 -07001289 &execbuf);
1290 } while (ret != 0 && errno == EAGAIN);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001291
Eric Anholtd70d6052009-10-06 12:40:42 -07001292 if (ret != 0 && errno == ENOMEM) {
1293 fprintf(stderr,
1294 "Execbuffer fails to pin. "
1295 "Estimate: %u. Actual: %u. Available: %u\n",
1296 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1297 bufmgr_gem->
1298 exec_count),
1299 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1300 bufmgr_gem->
1301 exec_count),
1302 (unsigned int)bufmgr_gem->gtt_size);
1303 }
1304 drm_intel_update_buffer_offsets(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001305
Eric Anholtd70d6052009-10-06 12:40:42 -07001306 if (bufmgr_gem->bufmgr.debug)
1307 drm_intel_gem_dump_validation_list(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001308
Eric Anholtd70d6052009-10-06 12:40:42 -07001309 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1310 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1311 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001312
Eric Anholtd70d6052009-10-06 12:40:42 -07001313 /* Disconnect the buffer from the validate list */
1314 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001315 bufmgr_gem->exec_bos[i] = NULL;
1316 }
1317 bufmgr_gem->exec_count = 0;
1318 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001319
Eric Anholtd70d6052009-10-06 12:40:42 -07001320 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001321}
1322
Keith Packard8e41ce12008-08-04 00:34:08 -07001323static int
Eric Anholt4b982642008-10-30 09:33:07 -07001324drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
Keith Packard8e41ce12008-08-04 00:34:08 -07001325{
Eric Anholtd70d6052009-10-06 12:40:42 -07001326 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1327 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1328 struct drm_i915_gem_pin pin;
1329 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001330
Eric Anholtd70d6052009-10-06 12:40:42 -07001331 memset(&pin, 0, sizeof(pin));
1332 pin.handle = bo_gem->gem_handle;
1333 pin.alignment = alignment;
Keith Packard8e41ce12008-08-04 00:34:08 -07001334
Eric Anholtd70d6052009-10-06 12:40:42 -07001335 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001336 ret = ioctl(bufmgr_gem->fd,
1337 DRM_IOCTL_I915_GEM_PIN,
1338 &pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07001339 } while (ret == -1 && errno == EINTR);
Eric Anholt02445ea2009-01-04 17:37:18 -08001340
Eric Anholtd70d6052009-10-06 12:40:42 -07001341 if (ret != 0)
1342 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07001343
Eric Anholtd70d6052009-10-06 12:40:42 -07001344 bo->offset = pin.offset;
1345 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001346}
1347
1348static int
Eric Anholt4b982642008-10-30 09:33:07 -07001349drm_intel_gem_bo_unpin(drm_intel_bo *bo)
Keith Packard8e41ce12008-08-04 00:34:08 -07001350{
Eric Anholtd70d6052009-10-06 12:40:42 -07001351 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1352 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1353 struct drm_i915_gem_unpin unpin;
1354 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001355
Eric Anholtd70d6052009-10-06 12:40:42 -07001356 memset(&unpin, 0, sizeof(unpin));
1357 unpin.handle = bo_gem->gem_handle;
Keith Packard8e41ce12008-08-04 00:34:08 -07001358
Eric Anholtd70d6052009-10-06 12:40:42 -07001359 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1360 if (ret != 0)
1361 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07001362
Eric Anholtd70d6052009-10-06 12:40:42 -07001363 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001364}
1365
1366static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001367drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
Eric Anholt4b982642008-10-30 09:33:07 -07001368 uint32_t stride)
Keith Packard8e41ce12008-08-04 00:34:08 -07001369{
Eric Anholtd70d6052009-10-06 12:40:42 -07001370 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1371 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1372 struct drm_i915_gem_set_tiling set_tiling;
1373 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001374
Eric Anholtd70d6052009-10-06 12:40:42 -07001375 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1376 return 0;
Keith Packard18f091d2008-12-15 15:08:12 -08001377
Eric Anholtd70d6052009-10-06 12:40:42 -07001378 /* If we're going from non-tiling to tiling, bump fence count */
1379 if (bo_gem->tiling_mode == I915_TILING_NONE)
1380 bo_gem->reloc_tree_fences++;
Eric Anholt9209c9a2009-01-27 16:54:11 -08001381
Eric Anholtd70d6052009-10-06 12:40:42 -07001382 memset(&set_tiling, 0, sizeof(set_tiling));
1383 set_tiling.handle = bo_gem->gem_handle;
1384 set_tiling.tiling_mode = *tiling_mode;
1385 set_tiling.stride = stride;
Keith Packard8e41ce12008-08-04 00:34:08 -07001386
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001387 do {
1388 ret = ioctl(bufmgr_gem->fd,
1389 DRM_IOCTL_I915_GEM_SET_TILING,
1390 &set_tiling);
1391 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -07001392 if (ret != 0) {
1393 *tiling_mode = bo_gem->tiling_mode;
1394 return -errno;
1395 }
1396 bo_gem->tiling_mode = set_tiling.tiling_mode;
1397 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1398
1399 /* If we're going from tiling to non-tiling, drop fence count */
1400 if (bo_gem->tiling_mode == I915_TILING_NONE)
1401 bo_gem->reloc_tree_fences--;
1402
Chris Wilsone22fb792009-11-30 22:14:30 +00001403 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1404
Keith Packard18f091d2008-12-15 15:08:12 -08001405 *tiling_mode = bo_gem->tiling_mode;
Eric Anholtd70d6052009-10-06 12:40:42 -07001406 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001407}
1408
1409static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001410drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1411 uint32_t * swizzle_mode)
Keith Packard8e41ce12008-08-04 00:34:08 -07001412{
Eric Anholtd70d6052009-10-06 12:40:42 -07001413 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt99338382008-10-14 13:18:11 -07001414
Eric Anholtd70d6052009-10-06 12:40:42 -07001415 *tiling_mode = bo_gem->tiling_mode;
1416 *swizzle_mode = bo_gem->swizzle_mode;
1417 return 0;
Eric Anholt99338382008-10-14 13:18:11 -07001418}
1419
1420static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001421drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
Keith Packard8e41ce12008-08-04 00:34:08 -07001422{
Eric Anholtd70d6052009-10-06 12:40:42 -07001423 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1424 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1425 struct drm_gem_flink flink;
1426 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001427
Eric Anholtd70d6052009-10-06 12:40:42 -07001428 if (!bo_gem->global_name) {
1429 memset(&flink, 0, sizeof(flink));
1430 flink.handle = bo_gem->gem_handle;
1431
1432 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1433 if (ret != 0)
1434 return -errno;
1435 bo_gem->global_name = flink.name;
1436 bo_gem->reusable = 0;
1437 }
1438
1439 *name = bo_gem->global_name;
1440 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001441}
1442
Eric Anholt6a9eb082008-06-03 09:27:37 -07001443/**
1444 * Enables unlimited caching of buffer objects for reuse.
1445 *
1446 * This is potentially very memory expensive, as the cache at each bucket
1447 * size is only bounded by how many buffers of that size we've managed to have
1448 * in flight at once.
1449 */
1450void
Eric Anholt4b982642008-10-30 09:33:07 -07001451drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001452{
Eric Anholtd70d6052009-10-06 12:40:42 -07001453 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001454
Eric Anholtd70d6052009-10-06 12:40:42 -07001455 bufmgr_gem->bo_reuse = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001456}
1457
Eric Anholt0e867312008-10-21 00:10:54 -07001458/**
1459 * Return the additional aperture space required by the tree of buffer objects
1460 * rooted at bo.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001461 */
1462static int
Eric Anholt4b982642008-10-30 09:33:07 -07001463drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001464{
Eric Anholtd70d6052009-10-06 12:40:42 -07001465 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1466 int i;
1467 int total = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07001468
Eric Anholtd70d6052009-10-06 12:40:42 -07001469 if (bo == NULL || bo_gem->included_in_check_aperture)
1470 return 0;
Eric Anholt0e867312008-10-21 00:10:54 -07001471
Eric Anholtd70d6052009-10-06 12:40:42 -07001472 total += bo->size;
1473 bo_gem->included_in_check_aperture = 1;
Eric Anholt0e867312008-10-21 00:10:54 -07001474
Eric Anholtd70d6052009-10-06 12:40:42 -07001475 for (i = 0; i < bo_gem->reloc_count; i++)
1476 total +=
1477 drm_intel_gem_bo_get_aperture_space(bo_gem->
1478 reloc_target_bo[i]);
Eric Anholt0e867312008-10-21 00:10:54 -07001479
Eric Anholtd70d6052009-10-06 12:40:42 -07001480 return total;
Eric Anholt0e867312008-10-21 00:10:54 -07001481}
1482
1483/**
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001484 * Count the number of buffers in this list that need a fence reg
1485 *
1486 * If the count is greater than the number of available regs, we'll have
1487 * to ask the caller to resubmit a batch with fewer tiled buffers.
1488 *
Eric Anholt9209c9a2009-01-27 16:54:11 -08001489 * This function over-counts if the same buffer is used multiple times.
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001490 */
1491static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -07001492drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001493{
Eric Anholtd70d6052009-10-06 12:40:42 -07001494 int i;
1495 unsigned int total = 0;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001496
Eric Anholtd70d6052009-10-06 12:40:42 -07001497 for (i = 0; i < count; i++) {
1498 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001499
Eric Anholtd70d6052009-10-06 12:40:42 -07001500 if (bo_gem == NULL)
1501 continue;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001502
Eric Anholtd70d6052009-10-06 12:40:42 -07001503 total += bo_gem->reloc_tree_fences;
1504 }
1505 return total;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001506}
1507
1508/**
Eric Anholt4b982642008-10-30 09:33:07 -07001509 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1510 * for the next drm_intel_bufmgr_check_aperture_space() call.
Eric Anholt0e867312008-10-21 00:10:54 -07001511 */
1512static void
Eric Anholt4b982642008-10-30 09:33:07 -07001513drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
Eric Anholt0e867312008-10-21 00:10:54 -07001514{
Eric Anholtd70d6052009-10-06 12:40:42 -07001515 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1516 int i;
Eric Anholt0e867312008-10-21 00:10:54 -07001517
Eric Anholtd70d6052009-10-06 12:40:42 -07001518 if (bo == NULL || !bo_gem->included_in_check_aperture)
1519 return;
Eric Anholt0e867312008-10-21 00:10:54 -07001520
Eric Anholtd70d6052009-10-06 12:40:42 -07001521 bo_gem->included_in_check_aperture = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07001522
Eric Anholtd70d6052009-10-06 12:40:42 -07001523 for (i = 0; i < bo_gem->reloc_count; i++)
1524 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1525 reloc_target_bo[i]);
Eric Anholt0e867312008-10-21 00:10:54 -07001526}
1527
1528/**
Keith Packardb13f4e12008-11-21 01:49:39 -08001529 * Return a conservative estimate for the amount of aperture required
1530 * for a collection of buffers. This may double-count some buffers.
1531 */
1532static unsigned int
1533drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1534{
Eric Anholtd70d6052009-10-06 12:40:42 -07001535 int i;
1536 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08001537
Eric Anholtd70d6052009-10-06 12:40:42 -07001538 for (i = 0; i < count; i++) {
1539 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1540 if (bo_gem != NULL)
1541 total += bo_gem->reloc_tree_size;
1542 }
1543 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08001544}
1545
1546/**
1547 * Return the amount of aperture needed for a collection of buffers.
1548 * This avoids double counting any buffers, at the cost of looking
1549 * at every buffer in the set.
1550 */
1551static unsigned int
1552drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1553{
Eric Anholtd70d6052009-10-06 12:40:42 -07001554 int i;
1555 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08001556
Eric Anholtd70d6052009-10-06 12:40:42 -07001557 for (i = 0; i < count; i++) {
1558 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1559 /* For the first buffer object in the array, we get an
1560 * accurate count back for its reloc_tree size (since nothing
1561 * had been flagged as being counted yet). We can save that
1562 * value out as a more conservative reloc_tree_size that
1563 * avoids double-counting target buffers. Since the first
1564 * buffer happens to usually be the batch buffer in our
1565 * callers, this can pull us back from doing the tree
1566 * walk on every new batch emit.
1567 */
1568 if (i == 0) {
1569 drm_intel_bo_gem *bo_gem =
1570 (drm_intel_bo_gem *) bo_array[i];
1571 bo_gem->reloc_tree_size = total;
1572 }
Eric Anholt7ce8d4c2009-02-27 13:46:31 -08001573 }
Keith Packardb13f4e12008-11-21 01:49:39 -08001574
Eric Anholtd70d6052009-10-06 12:40:42 -07001575 for (i = 0; i < count; i++)
1576 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1577 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08001578}
1579
1580/**
Eric Anholt0e867312008-10-21 00:10:54 -07001581 * Return -1 if the batchbuffer should be flushed before attempting to
1582 * emit rendering referencing the buffers pointed to by bo_array.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001583 *
Eric Anholt0e867312008-10-21 00:10:54 -07001584 * This is required because if we try to emit a batchbuffer with relocations
1585 * to a tree of buffers that won't simultaneously fit in the aperture,
1586 * the rendering will return an error at a point where the software is not
1587 * prepared to recover from it.
1588 *
1589 * However, we also want to emit the batchbuffer significantly before we reach
1590 * the limit, as a series of batchbuffers each of which references buffers
1591 * covering almost all of the aperture means that at each emit we end up
1592 * waiting to evict a buffer from the last rendering, and we get synchronous
1593 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1594 * get better parallelism.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001595 */
1596static int
Eric Anholt4b982642008-10-30 09:33:07 -07001597drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001598{
Eric Anholtd70d6052009-10-06 12:40:42 -07001599 drm_intel_bufmgr_gem *bufmgr_gem =
1600 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1601 unsigned int total = 0;
1602 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1603 int total_fences;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001604
Eric Anholtd70d6052009-10-06 12:40:42 -07001605 /* Check for fence reg constraints if necessary */
1606 if (bufmgr_gem->available_fences) {
1607 total_fences = drm_intel_gem_total_fences(bo_array, count);
1608 if (total_fences > bufmgr_gem->available_fences)
1609 return -1;
1610 }
Eric Anholt0e867312008-10-21 00:10:54 -07001611
Eric Anholtd70d6052009-10-06 12:40:42 -07001612 total = drm_intel_gem_estimate_batch_space(bo_array, count);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001613
Eric Anholtd70d6052009-10-06 12:40:42 -07001614 if (total > threshold)
1615 total = drm_intel_gem_compute_batch_space(bo_array, count);
Eric Anholt0e867312008-10-21 00:10:54 -07001616
Eric Anholtd70d6052009-10-06 12:40:42 -07001617 if (total > threshold) {
1618 DBG("check_space: overflowed available aperture, "
1619 "%dkb vs %dkb\n",
1620 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1621 return -1;
1622 } else {
1623 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1624 (int)bufmgr_gem->gtt_size / 1024);
1625 return 0;
1626 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001627}
1628
Keith Packard5b5ce302009-05-11 13:42:12 -07001629/*
1630 * Disable buffer reuse for objects which are shared with the kernel
1631 * as scanout buffers
1632 */
1633static int
1634drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1635{
Eric Anholtd70d6052009-10-06 12:40:42 -07001636 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Keith Packard5b5ce302009-05-11 13:42:12 -07001637
Eric Anholtd70d6052009-10-06 12:40:42 -07001638 bo_gem->reusable = 0;
1639 return 0;
Keith Packard5b5ce302009-05-11 13:42:12 -07001640}
1641
Eric Anholt769b1052009-10-01 19:09:26 -07001642static int
Eric Anholt66d27142009-10-20 13:20:55 -07001643_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
Eric Anholt769b1052009-10-01 19:09:26 -07001644{
Eric Anholtd70d6052009-10-06 12:40:42 -07001645 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1646 int i;
Eric Anholt769b1052009-10-01 19:09:26 -07001647
Eric Anholtd70d6052009-10-06 12:40:42 -07001648 for (i = 0; i < bo_gem->reloc_count; i++) {
1649 if (bo_gem->reloc_target_bo[i] == target_bo)
1650 return 1;
Eric Anholt66d27142009-10-20 13:20:55 -07001651 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
Eric Anholtd70d6052009-10-06 12:40:42 -07001652 target_bo))
1653 return 1;
1654 }
1655
Eric Anholt769b1052009-10-01 19:09:26 -07001656 return 0;
Eric Anholt769b1052009-10-01 19:09:26 -07001657}
1658
Eric Anholt66d27142009-10-20 13:20:55 -07001659/** Return true if target_bo is referenced by bo's relocation tree. */
1660static int
1661drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1662{
1663 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1664
1665 if (bo == NULL || target_bo == NULL)
1666 return 0;
1667 if (target_bo_gem->used_as_reloc_target)
1668 return _drm_intel_gem_bo_references(bo, target_bo);
1669 return 0;
1670}
1671
Eric Anholt769b1052009-10-01 19:09:26 -07001672/**
Eric Anholt6a9eb082008-06-03 09:27:37 -07001673 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1674 * and manage map buffer objections.
1675 *
1676 * \param fd File descriptor of the opened DRM device.
1677 */
Eric Anholt4b982642008-10-30 09:33:07 -07001678drm_intel_bufmgr *
1679drm_intel_bufmgr_gem_init(int fd, int batch_size)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001680{
Eric Anholtd70d6052009-10-06 12:40:42 -07001681 drm_intel_bufmgr_gem *bufmgr_gem;
1682 struct drm_i915_gem_get_aperture aperture;
1683 drm_i915_getparam_t gp;
1684 int ret, i;
1685 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001686
Eric Anholtd70d6052009-10-06 12:40:42 -07001687 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1688 bufmgr_gem->fd = fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001689
Eric Anholtd70d6052009-10-06 12:40:42 -07001690 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1691 free(bufmgr_gem);
1692 return NULL;
1693 }
Eric Anholt6df7b072008-06-12 23:22:26 -07001694
Eric Anholtd70d6052009-10-06 12:40:42 -07001695 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
Eric Anholt0e867312008-10-21 00:10:54 -07001696
Eric Anholtd70d6052009-10-06 12:40:42 -07001697 if (ret == 0)
1698 bufmgr_gem->gtt_size = aperture.aper_available_size;
1699 else {
1700 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1701 strerror(errno));
1702 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1703 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1704 "May lead to reduced performance or incorrect "
1705 "rendering.\n",
1706 (int)bufmgr_gem->gtt_size / 1024);
1707 }
Eric Anholt0e867312008-10-21 00:10:54 -07001708
Eric Anholtd70d6052009-10-06 12:40:42 -07001709 gp.param = I915_PARAM_CHIPSET_ID;
1710 gp.value = &bufmgr_gem->pci_device;
Eric Anholtcbdd6272009-01-27 17:16:11 -08001711 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1712 if (ret) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001713 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
1714 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
Eric Anholtcbdd6272009-01-27 17:16:11 -08001715 }
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001716
Eric Anholtd70d6052009-10-06 12:40:42 -07001717 if (!IS_I965G(bufmgr_gem)) {
1718 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1719 gp.value = &bufmgr_gem->available_fences;
1720 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1721 if (ret) {
1722 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
1723 errno);
1724 fprintf(stderr, "param: %d, val: %d\n", gp.param,
1725 *gp.value);
1726 bufmgr_gem->available_fences = 0;
1727 }
1728 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001729
Eric Anholtd70d6052009-10-06 12:40:42 -07001730 /* Let's go with one relocation per every 2 dwords (but round down a bit
1731 * since a power of two will mean an extra page allocation for the reloc
1732 * buffer).
1733 *
1734 * Every 4 was too few for the blender benchmark.
1735 */
1736 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
Eric Anholt769b1052009-10-01 19:09:26 -07001737
Eric Anholtd70d6052009-10-06 12:40:42 -07001738 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1739 bufmgr_gem->bufmgr.bo_alloc_for_render =
1740 drm_intel_gem_bo_alloc_for_render;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07001741 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07001742 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1743 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1744 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1745 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1746 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1747 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1748 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1749 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1750 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1751 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1752 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1753 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1754 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1755 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1756 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
Chris Wilson83a35b62009-11-11 13:04:38 +00001757 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
Eric Anholtd70d6052009-10-06 12:40:42 -07001758 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1759 bufmgr_gem->bufmgr.debug = 0;
1760 bufmgr_gem->bufmgr.check_aperture_space =
1761 drm_intel_gem_check_aperture_space;
1762 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
1763 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
1764 drm_intel_gem_get_pipe_from_crtc_id;
1765 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001766
Eric Anholtd70d6052009-10-06 12:40:42 -07001767 /* Initialize the linked lists for BO reuse cache. */
1768 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
1769 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
1770 bufmgr_gem->cache_bucket[i].size = size;
1771 }
1772
1773 return &bufmgr_gem->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001774}