blob: 5b76340a5e6ea9a6a374b8353d4f3bb2ee7a2bc0 [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholt6a9eb082008-06-03 09:27:37 -070041#include <xf86drm.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080042#include <fcntl.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070043#include <stdio.h>
44#include <stdlib.h>
45#include <string.h>
46#include <unistd.h>
47#include <assert.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070048#include <pthread.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070049#include <sys/ioctl.h>
50#include <sys/mman.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080051#include <sys/stat.h>
52#include <sys/types.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070053
54#include "errno.h"
Eric Anholt72abe982009-02-18 13:06:35 -080055#include "libdrm_lists.h"
Chris Wilson04495ee2009-10-02 04:39:22 +010056#include "intel_atomic.h"
Eric Anholtc4857422008-06-03 10:20:49 -070057#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010058#include "intel_bufmgr_priv.h"
Eric Anholtcbdd6272009-01-27 17:16:11 -080059#include "intel_chipset.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070060#include "string.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070061
62#include "i915_drm.h"
63
Eric Anholt6a9eb082008-06-03 09:27:37 -070064#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070065 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070067} while (0)
68
Eric Anholt4b982642008-10-30 09:33:07 -070069typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
Keith Packarda919ff52008-06-05 15:58:09 -070070
Eric Anholt4b982642008-10-30 09:33:07 -070071struct drm_intel_gem_bo_bucket {
Eric Anholtd70d6052009-10-06 12:40:42 -070072 drmMMListHead head;
73 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -070074};
75
Eric Anholt469655f2009-05-18 16:07:45 -070076/* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
Eric Anholt6a9eb082008-06-03 09:27:37 -070078 */
Eric Anholt469655f2009-05-18 16:07:45 -070079#define DRM_INTEL_GEM_BO_BUCKETS 14
Eric Anholt4b982642008-10-30 09:33:07 -070080typedef struct _drm_intel_bufmgr_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -070081 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -070082
Eric Anholtd70d6052009-10-06 12:40:42 -070083 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -070084
Eric Anholtd70d6052009-10-06 12:40:42 -070085 int max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -070086
Eric Anholtd70d6052009-10-06 12:40:42 -070087 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -070088
Eric Anholtd70d6052009-10-06 12:40:42 -070089 struct drm_i915_gem_exec_object *exec_objects;
90 drm_intel_bo **exec_bos;
91 int exec_size;
92 int exec_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -070093
Eric Anholtd70d6052009-10-06 12:40:42 -070094 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
Eric Anholt6a9eb082008-06-03 09:27:37 -070096
Eric Anholtd70d6052009-10-06 12:40:42 -070097 uint64_t gtt_size;
98 int available_fences;
99 int pci_device;
100 char bo_reuse;
Eric Anholt4b982642008-10-30 09:33:07 -0700101} drm_intel_bufmgr_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700102
Eric Anholt4b982642008-10-30 09:33:07 -0700103struct _drm_intel_bo_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -0700104 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700105
Eric Anholtd70d6052009-10-06 12:40:42 -0700106 atomic_t refcount;
107 uint32_t gem_handle;
108 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700109
Eric Anholtd70d6052009-10-06 12:40:42 -0700110 /**
111 * Kenel-assigned global name for this object
112 */
113 unsigned int global_name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700114
Eric Anholtd70d6052009-10-06 12:40:42 -0700115 /**
116 * Index of the buffer within the validation list while preparing a
117 * batchbuffer execution.
118 */
119 int validate_index;
Keith Packard18f091d2008-12-15 15:08:12 -0800120
Eric Anholtd70d6052009-10-06 12:40:42 -0700121 /**
122 * Current tiling mode
123 */
124 uint32_t tiling_mode;
125 uint32_t swizzle_mode;
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700126
Eric Anholtd70d6052009-10-06 12:40:42 -0700127 time_t free_time;
Keith Packard329e0862008-06-05 16:05:35 -0700128
Eric Anholtd70d6052009-10-06 12:40:42 -0700129 /** Array passed to the DRM containing relocation information. */
130 struct drm_i915_gem_relocation_entry *relocs;
131 /** Array of bos corresponding to relocs[i].target_handle */
132 drm_intel_bo **reloc_target_bo;
133 /** Number of entries in relocs */
134 int reloc_count;
135 /** Mapped address for the buffer, saved across map/unmap cycles */
136 void *mem_virtual;
137 /** GTT virtual address for the buffer, saved across map/unmap cycles */
138 void *gtt_virtual;
Eric Anholt0e867312008-10-21 00:10:54 -0700139
Eric Anholtd70d6052009-10-06 12:40:42 -0700140 /** BO cache list */
141 drmMMListHead head;
Eric Anholt0e867312008-10-21 00:10:54 -0700142
Eric Anholtd70d6052009-10-06 12:40:42 -0700143 /**
144 * Boolean of whether this BO and its children have been included in
145 * the current drm_intel_bufmgr_check_aperture_space() total.
146 */
147 char included_in_check_aperture;
Eric Anholt0e867312008-10-21 00:10:54 -0700148
Eric Anholtd70d6052009-10-06 12:40:42 -0700149 /**
150 * Boolean of whether this buffer has been used as a relocation
151 * target and had its size accounted for, and thus can't have any
152 * further relocations added to it.
153 */
154 char used_as_reloc_target;
Keith Packard5b5ce302009-05-11 13:42:12 -0700155
Eric Anholtd70d6052009-10-06 12:40:42 -0700156 /**
Chris Wilson792fed12009-12-02 13:12:39 +0000157 * Boolean of whether we have encountered an error whilst building the relocation tree.
158 */
159 char has_error;
160
161 /**
Eric Anholtd70d6052009-10-06 12:40:42 -0700162 * Boolean of whether this buffer can be re-used
163 */
164 char reusable;
165
166 /**
167 * Size in bytes of this buffer and its relocation descendents.
168 *
169 * Used to avoid costly tree walking in
170 * drm_intel_bufmgr_check_aperture in the common case.
171 */
172 int reloc_tree_size;
173
174 /**
175 * Number of potential fence registers required by this buffer and its
176 * relocations.
177 */
178 int reloc_tree_fences;
Keith Packarda919ff52008-06-05 15:58:09 -0700179};
Eric Anholt6a9eb082008-06-03 09:27:37 -0700180
Keith Packardb13f4e12008-11-21 01:49:39 -0800181static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700182drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800183
184static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700185drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800186
Eric Anholt6a9eb082008-06-03 09:27:37 -0700187static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700188drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
189 uint32_t * swizzle_mode);
Keith Packard18f091d2008-12-15 15:08:12 -0800190
191static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700192drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
Keith Packard18f091d2008-12-15 15:08:12 -0800193 uint32_t stride);
194
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700195static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
196 time_t time);
Chris Wilson04495ee2009-10-02 04:39:22 +0100197
Eric Anholtd70d6052009-10-06 12:40:42 -0700198static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
Keith Packard18f091d2008-12-15 15:08:12 -0800199
Eric Anholtd70d6052009-10-06 12:40:42 -0700200static void drm_intel_gem_bo_free(drm_intel_bo *bo);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100201
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700202static unsigned long
203drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
204 uint32_t *tiling_mode)
205{
206 unsigned long min_size, max_size;
207 unsigned long i;
208
209 if (*tiling_mode == I915_TILING_NONE)
210 return size;
211
212 /* 965+ just need multiples of page size for tiling */
213 if (IS_I965G(bufmgr_gem))
214 return ROUND_UP_TO(size, 4096);
215
216 /* Older chips need powers of two, of at least 512k or 1M */
217 if (IS_I9XX(bufmgr_gem)) {
218 min_size = 1024*1024;
219 max_size = 128*1024*1024;
220 } else {
221 min_size = 512*1024;
222 max_size = 64*1024*1024;
223 }
224
225 if (size > max_size) {
226 *tiling_mode = I915_TILING_NONE;
227 return size;
228 }
229
230 for (i = min_size; i < size; i <<= 1)
231 ;
232
233 return i;
234}
235
236/*
237 * Round a given pitch up to the minimum required for X tiling on a
238 * given chip. We use 512 as the minimum to allow for a later tiling
239 * change.
240 */
241static unsigned long
242drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
243 unsigned long pitch, uint32_t tiling_mode)
244{
245 unsigned long tile_width = 512;
246 unsigned long i;
247
248 if (tiling_mode == I915_TILING_NONE)
249 return ROUND_UP_TO(pitch, tile_width);
250
251 /* 965 is flexible */
252 if (IS_I965G(bufmgr_gem))
253 return ROUND_UP_TO(pitch, tile_width);
254
255 /* Pre-965 needs power of two tile width */
256 for (i = tile_width; i < pitch; i <<= 1)
257 ;
258
259 return i;
260}
261
Eric Anholt4b982642008-10-30 09:33:07 -0700262static struct drm_intel_gem_bo_bucket *
263drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
264 unsigned long size)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700265{
Eric Anholtd70d6052009-10-06 12:40:42 -0700266 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700267
Eric Anholtd70d6052009-10-06 12:40:42 -0700268 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
269 struct drm_intel_gem_bo_bucket *bucket =
270 &bufmgr_gem->cache_bucket[i];
271 if (bucket->size >= size) {
272 return bucket;
273 }
Eric Anholt78fa5902009-07-06 11:55:28 -0700274 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700275
Eric Anholtd70d6052009-10-06 12:40:42 -0700276 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700277}
278
Eric Anholtd70d6052009-10-06 12:40:42 -0700279static void
280drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700281{
Eric Anholtd70d6052009-10-06 12:40:42 -0700282 int i, j;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700283
Eric Anholtd70d6052009-10-06 12:40:42 -0700284 for (i = 0; i < bufmgr_gem->exec_count; i++) {
285 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
286 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700287
Eric Anholtd70d6052009-10-06 12:40:42 -0700288 if (bo_gem->relocs == NULL) {
289 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
290 bo_gem->name);
291 continue;
292 }
293
294 for (j = 0; j < bo_gem->reloc_count; j++) {
295 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
296 drm_intel_bo_gem *target_gem =
297 (drm_intel_bo_gem *) target_bo;
298
299 DBG("%2d: %d (%s)@0x%08llx -> "
300 "%d (%s)@0x%08lx + 0x%08x\n",
301 i,
302 bo_gem->gem_handle, bo_gem->name,
303 (unsigned long long)bo_gem->relocs[j].offset,
304 target_gem->gem_handle,
305 target_gem->name,
306 target_bo->offset,
307 bo_gem->relocs[j].delta);
308 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700309 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700310}
311
Chris Wilson9fec2a82009-12-02 10:42:51 +0000312static inline void
Chris Wilson04495ee2009-10-02 04:39:22 +0100313drm_intel_gem_bo_reference(drm_intel_bo *bo)
314{
Eric Anholtd70d6052009-10-06 12:40:42 -0700315 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Chris Wilson04495ee2009-10-02 04:39:22 +0100316
Eric Anholtd70d6052009-10-06 12:40:42 -0700317 assert(atomic_read(&bo_gem->refcount) > 0);
318 atomic_inc(&bo_gem->refcount);
Chris Wilson04495ee2009-10-02 04:39:22 +0100319}
320
Eric Anholt6a9eb082008-06-03 09:27:37 -0700321/**
322 * Adds the given buffer to the list of buffers to be validated (moved into the
323 * appropriate memory type) with the next batch submission.
324 *
325 * If a buffer is validated multiple times in a batch submission, it ends up
326 * with the intersection of the memory type flags and the union of the
327 * access flags.
328 */
329static void
Eric Anholt4b982642008-10-30 09:33:07 -0700330drm_intel_add_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700331{
Eric Anholtd70d6052009-10-06 12:40:42 -0700332 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
333 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
334 int index;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700335
Eric Anholtd70d6052009-10-06 12:40:42 -0700336 if (bo_gem->validate_index != -1)
337 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700338
Eric Anholtd70d6052009-10-06 12:40:42 -0700339 /* Extend the array of validation entries as necessary. */
340 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
341 int new_size = bufmgr_gem->exec_size * 2;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700342
Eric Anholtd70d6052009-10-06 12:40:42 -0700343 if (new_size == 0)
344 new_size = 5;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700345
Eric Anholtd70d6052009-10-06 12:40:42 -0700346 bufmgr_gem->exec_objects =
347 realloc(bufmgr_gem->exec_objects,
348 sizeof(*bufmgr_gem->exec_objects) * new_size);
349 bufmgr_gem->exec_bos =
350 realloc(bufmgr_gem->exec_bos,
351 sizeof(*bufmgr_gem->exec_bos) * new_size);
352 bufmgr_gem->exec_size = new_size;
353 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700354
Eric Anholtd70d6052009-10-06 12:40:42 -0700355 index = bufmgr_gem->exec_count;
356 bo_gem->validate_index = index;
357 /* Fill in array entry */
358 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
359 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
360 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
361 bufmgr_gem->exec_objects[index].alignment = 0;
362 bufmgr_gem->exec_objects[index].offset = 0;
363 bufmgr_gem->exec_bos[index] = bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700364 bufmgr_gem->exec_count++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700365}
366
Eric Anholt6a9eb082008-06-03 09:27:37 -0700367#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
368 sizeof(uint32_t))
369
Chris Wilsone22fb792009-11-30 22:14:30 +0000370static void
371drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
372 drm_intel_bo_gem *bo_gem)
373{
374 int size;
375
376 assert(!bo_gem->used_as_reloc_target);
377
378 /* The older chipsets are far-less flexible in terms of tiling,
379 * and require tiled buffer to be size aligned in the aperture.
380 * This means that in the worst possible case we will need a hole
381 * twice as large as the object in order for it to fit into the
382 * aperture. Optimal packing is for wimps.
383 */
384 size = bo_gem->bo.size;
385 if (!IS_I965G(bufmgr_gem) && bo_gem->tiling_mode != I915_TILING_NONE)
386 size *= 2;
387
388 bo_gem->reloc_tree_size = size;
389}
390
Eric Anholt6a9eb082008-06-03 09:27:37 -0700391static int
Eric Anholt4b982642008-10-30 09:33:07 -0700392drm_intel_setup_reloc_list(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700393{
Eric Anholtd70d6052009-10-06 12:40:42 -0700394 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
395 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
396 unsigned int max_relocs = bufmgr_gem->max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700397
Eric Anholtd70d6052009-10-06 12:40:42 -0700398 if (bo->size / 4 < max_relocs)
399 max_relocs = bo->size / 4;
Eric Anholt3c9bd062009-10-05 16:35:32 -0700400
Eric Anholtd70d6052009-10-06 12:40:42 -0700401 bo_gem->relocs = malloc(max_relocs *
402 sizeof(struct drm_i915_gem_relocation_entry));
403 bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
Chris Wilson792fed12009-12-02 13:12:39 +0000404 if (bo_gem->relocs == NULL || bo_gem->reloc_target_bo == NULL) {
405 bo_gem->has_error = 1;
406
407 free (bo_gem->relocs);
408 bo_gem->relocs = NULL;
409
410 free (bo_gem->reloc_target_bo);
411 bo_gem->reloc_target_bo = NULL;
412
413 return 1;
414 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700415
Eric Anholtd70d6052009-10-06 12:40:42 -0700416 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700417}
418
Eric Anholt8214a652009-08-27 18:32:07 -0700419static int
420drm_intel_gem_bo_busy(drm_intel_bo *bo)
421{
Eric Anholtd70d6052009-10-06 12:40:42 -0700422 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
423 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
424 struct drm_i915_gem_busy busy;
425 int ret;
Eric Anholt8214a652009-08-27 18:32:07 -0700426
Eric Anholtd70d6052009-10-06 12:40:42 -0700427 memset(&busy, 0, sizeof(busy));
428 busy.handle = bo_gem->gem_handle;
Eric Anholt8214a652009-08-27 18:32:07 -0700429
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000430 do {
431 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
432 } while (ret == -1 && errno == EINTR);
Eric Anholt8214a652009-08-27 18:32:07 -0700433
Eric Anholtd70d6052009-10-06 12:40:42 -0700434 return (ret == 0 && busy.busy);
Eric Anholt8214a652009-08-27 18:32:07 -0700435}
436
Chris Wilson0fb215a2009-10-02 04:31:34 +0100437static int
Chris Wilson83a35b62009-11-11 13:04:38 +0000438drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
439 drm_intel_bo_gem *bo_gem, int state)
Chris Wilson0fb215a2009-10-02 04:31:34 +0100440{
Eric Anholtd70d6052009-10-06 12:40:42 -0700441 struct drm_i915_gem_madvise madv;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100442
Eric Anholtd70d6052009-10-06 12:40:42 -0700443 madv.handle = bo_gem->gem_handle;
444 madv.madv = state;
445 madv.retained = 1;
446 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100447
Eric Anholtd70d6052009-10-06 12:40:42 -0700448 return madv.retained;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100449}
450
Chris Wilson83a35b62009-11-11 13:04:38 +0000451static int
452drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
453{
454 return drm_intel_gem_bo_madvise_internal
455 ((drm_intel_bufmgr_gem *) bo->bufmgr,
456 (drm_intel_bo_gem *) bo,
457 madv);
458}
459
Chris Wilson0fb215a2009-10-02 04:31:34 +0100460/* drop the oldest entries that have been purged by the kernel */
461static void
462drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
463 struct drm_intel_gem_bo_bucket *bucket)
464{
Eric Anholtd70d6052009-10-06 12:40:42 -0700465 while (!DRMLISTEMPTY(&bucket->head)) {
466 drm_intel_bo_gem *bo_gem;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100467
Eric Anholtd70d6052009-10-06 12:40:42 -0700468 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
469 bucket->head.next, head);
Chris Wilson83a35b62009-11-11 13:04:38 +0000470 if (drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700471 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
472 break;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100473
Eric Anholtd70d6052009-10-06 12:40:42 -0700474 DRMLISTDEL(&bo_gem->head);
475 drm_intel_gem_bo_free(&bo_gem->bo);
476 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100477}
478
Eric Anholt4b982642008-10-30 09:33:07 -0700479static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700480drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
481 const char *name,
482 unsigned long size,
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700483 unsigned long flags)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700484{
Eric Anholtd70d6052009-10-06 12:40:42 -0700485 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
486 drm_intel_bo_gem *bo_gem;
487 unsigned int page_size = getpagesize();
488 int ret;
489 struct drm_intel_gem_bo_bucket *bucket;
490 int alloc_from_cache;
491 unsigned long bo_size;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700492 int for_render = 0;
493
494 if (flags & BO_ALLOC_FOR_RENDER)
495 for_render = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700496
Eric Anholtd70d6052009-10-06 12:40:42 -0700497 /* Round the allocated size up to a power of two number of pages. */
498 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700499
Eric Anholtd70d6052009-10-06 12:40:42 -0700500 /* If we don't have caching at this size, don't actually round the
501 * allocation up.
502 */
503 if (bucket == NULL) {
504 bo_size = size;
505 if (bo_size < page_size)
506 bo_size = page_size;
Eric Anholt72abe982009-02-18 13:06:35 -0800507 } else {
Eric Anholtd70d6052009-10-06 12:40:42 -0700508 bo_size = bucket->size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700509 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100510
Eric Anholtd70d6052009-10-06 12:40:42 -0700511 pthread_mutex_lock(&bufmgr_gem->lock);
512 /* Get a buffer out of the cache if available */
513retry:
514 alloc_from_cache = 0;
515 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
516 if (for_render) {
517 /* Allocate new render-target BOs from the tail (MRU)
518 * of the list, as it will likely be hot in the GPU
519 * cache and in the aperture for us.
520 */
521 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
522 bucket->head.prev, head);
523 DRMLISTDEL(&bo_gem->head);
524 alloc_from_cache = 1;
525 } else {
526 /* For non-render-target BOs (where we're probably
527 * going to map it first thing in order to fill it
528 * with data), check if the last BO in the cache is
529 * unbusy, and only reuse in that case. Otherwise,
530 * allocating a new buffer is probably faster than
531 * waiting for the GPU to finish.
532 */
533 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
534 bucket->head.next, head);
535 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
536 alloc_from_cache = 1;
537 DRMLISTDEL(&bo_gem->head);
538 }
539 }
540
541 if (alloc_from_cache) {
Chris Wilson83a35b62009-11-11 13:04:38 +0000542 if (!drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700543 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
544 drm_intel_gem_bo_free(&bo_gem->bo);
545 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
546 bucket);
547 goto retry;
548 }
549 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100550 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700551 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700552
Eric Anholtd70d6052009-10-06 12:40:42 -0700553 if (!alloc_from_cache) {
554 struct drm_i915_gem_create create;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700555
Eric Anholtd70d6052009-10-06 12:40:42 -0700556 bo_gem = calloc(1, sizeof(*bo_gem));
557 if (!bo_gem)
558 return NULL;
Keith Packarda919ff52008-06-05 15:58:09 -0700559
Eric Anholtd70d6052009-10-06 12:40:42 -0700560 bo_gem->bo.size = bo_size;
561 memset(&create, 0, sizeof(create));
562 create.size = bo_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700563
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000564 do {
565 ret = ioctl(bufmgr_gem->fd,
566 DRM_IOCTL_I915_GEM_CREATE,
567 &create);
568 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700569 bo_gem->gem_handle = create.handle;
570 bo_gem->bo.handle = bo_gem->gem_handle;
571 if (ret != 0) {
572 free(bo_gem);
573 return NULL;
574 }
575 bo_gem->bo.bufmgr = bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700576 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700577
Eric Anholtd70d6052009-10-06 12:40:42 -0700578 bo_gem->name = name;
579 atomic_set(&bo_gem->refcount, 1);
580 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700581 bo_gem->reloc_tree_fences = 0;
582 bo_gem->used_as_reloc_target = 0;
Chris Wilson792fed12009-12-02 13:12:39 +0000583 bo_gem->has_error = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -0700584 bo_gem->tiling_mode = I915_TILING_NONE;
585 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
586 bo_gem->reusable = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700587
Chris Wilsone22fb792009-11-30 22:14:30 +0000588 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
589
Eric Anholtd70d6052009-10-06 12:40:42 -0700590 DBG("bo_create: buf %d (%s) %ldb\n",
591 bo_gem->gem_handle, bo_gem->name, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700592
Eric Anholtd70d6052009-10-06 12:40:42 -0700593 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700594}
595
Eric Anholt72abe982009-02-18 13:06:35 -0800596static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700597drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
598 const char *name,
599 unsigned long size,
600 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800601{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700602 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
603 BO_ALLOC_FOR_RENDER);
Eric Anholt72abe982009-02-18 13:06:35 -0800604}
605
606static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700607drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
608 const char *name,
609 unsigned long size,
610 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800611{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700612 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
613}
614
615static drm_intel_bo *
616drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
617 int x, int y, int cpp, uint32_t *tiling_mode,
618 unsigned long *pitch, unsigned long flags)
619{
620 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
621 drm_intel_bo *bo;
622 unsigned long size, stride, aligned_y = y;
623 int ret;
624
625 if (*tiling_mode == I915_TILING_NONE)
626 aligned_y = ALIGN(y, 2);
627 else if (*tiling_mode == I915_TILING_X)
628 aligned_y = ALIGN(y, 8);
629 else if (*tiling_mode == I915_TILING_Y)
630 aligned_y = ALIGN(y, 32);
631
632 stride = x * cpp;
633 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
634 size = stride * aligned_y;
635 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
636
637 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
638 if (!bo)
639 return NULL;
640
641 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
642 if (ret != 0) {
643 drm_intel_gem_bo_unreference(bo);
644 return NULL;
645 }
646
647 *pitch = stride;
648
649 return bo;
Eric Anholt72abe982009-02-18 13:06:35 -0800650}
651
Eric Anholt6a9eb082008-06-03 09:27:37 -0700652/**
Eric Anholt4b982642008-10-30 09:33:07 -0700653 * Returns a drm_intel_bo wrapping the given buffer object handle.
Eric Anholt6a9eb082008-06-03 09:27:37 -0700654 *
655 * This can be used when one application needs to pass a buffer object
656 * to another.
657 */
Eric Anholt4b982642008-10-30 09:33:07 -0700658drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700659drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
660 const char *name,
Eric Anholt4b982642008-10-30 09:33:07 -0700661 unsigned int handle)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700662{
Eric Anholtd70d6052009-10-06 12:40:42 -0700663 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
664 drm_intel_bo_gem *bo_gem;
665 int ret;
666 struct drm_gem_open open_arg;
667 struct drm_i915_gem_get_tiling get_tiling;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700668
Eric Anholtd70d6052009-10-06 12:40:42 -0700669 bo_gem = calloc(1, sizeof(*bo_gem));
670 if (!bo_gem)
671 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700672
Eric Anholtd70d6052009-10-06 12:40:42 -0700673 memset(&open_arg, 0, sizeof(open_arg));
674 open_arg.name = handle;
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000675 do {
676 ret = ioctl(bufmgr_gem->fd,
677 DRM_IOCTL_GEM_OPEN,
678 &open_arg);
679 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700680 if (ret != 0) {
681 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
682 name, handle, strerror(errno));
683 free(bo_gem);
684 return NULL;
685 }
686 bo_gem->bo.size = open_arg.size;
687 bo_gem->bo.offset = 0;
688 bo_gem->bo.virtual = NULL;
689 bo_gem->bo.bufmgr = bufmgr;
690 bo_gem->name = name;
691 atomic_set(&bo_gem->refcount, 1);
692 bo_gem->validate_index = -1;
693 bo_gem->gem_handle = open_arg.handle;
694 bo_gem->global_name = handle;
695 bo_gem->reusable = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700696
Eric Anholtd70d6052009-10-06 12:40:42 -0700697 memset(&get_tiling, 0, sizeof(get_tiling));
698 get_tiling.handle = bo_gem->gem_handle;
699 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
700 if (ret != 0) {
701 drm_intel_gem_bo_unreference(&bo_gem->bo);
702 return NULL;
703 }
704 bo_gem->tiling_mode = get_tiling.tiling_mode;
705 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
706 if (bo_gem->tiling_mode == I915_TILING_NONE)
707 bo_gem->reloc_tree_fences = 0;
708 else
709 bo_gem->reloc_tree_fences = 1;
Chris Wilsone22fb792009-11-30 22:14:30 +0000710 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
Keith Packard18f091d2008-12-15 15:08:12 -0800711
Eric Anholtd70d6052009-10-06 12:40:42 -0700712 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700713
Eric Anholtd70d6052009-10-06 12:40:42 -0700714 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700715}
716
717static void
Eric Anholt4b982642008-10-30 09:33:07 -0700718drm_intel_gem_bo_free(drm_intel_bo *bo)
Eric Anholt500c81d2008-06-06 17:13:16 -0700719{
Eric Anholtd70d6052009-10-06 12:40:42 -0700720 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
721 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
722 struct drm_gem_close close;
723 int ret;
Eric Anholt500c81d2008-06-06 17:13:16 -0700724
Eric Anholtd70d6052009-10-06 12:40:42 -0700725 if (bo_gem->mem_virtual)
726 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
727 if (bo_gem->gtt_virtual)
728 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
Eric Anholt500c81d2008-06-06 17:13:16 -0700729
Eric Anholtd70d6052009-10-06 12:40:42 -0700730 /* Close this object */
731 memset(&close, 0, sizeof(close));
732 close.handle = bo_gem->gem_handle;
733 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
734 if (ret != 0) {
735 fprintf(stderr,
736 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
737 bo_gem->gem_handle, bo_gem->name, strerror(errno));
738 }
739 free(bo);
Eric Anholt500c81d2008-06-06 17:13:16 -0700740}
741
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700742/** Frees all cached buffers significantly older than @time. */
743static void
744drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
745{
Chris Wilson04495ee2009-10-02 04:39:22 +0100746 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700747
Eric Anholtd70d6052009-10-06 12:40:42 -0700748 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
749 struct drm_intel_gem_bo_bucket *bucket =
750 &bufmgr_gem->cache_bucket[i];
Chris Wilson04495ee2009-10-02 04:39:22 +0100751
Eric Anholtd70d6052009-10-06 12:40:42 -0700752 while (!DRMLISTEMPTY(&bucket->head)) {
753 drm_intel_bo_gem *bo_gem;
Chris Wilson04495ee2009-10-02 04:39:22 +0100754
Eric Anholtd70d6052009-10-06 12:40:42 -0700755 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
756 bucket->head.next, head);
757 if (time - bo_gem->free_time <= 1)
758 break;
Chris Wilson04495ee2009-10-02 04:39:22 +0100759
Eric Anholtd70d6052009-10-06 12:40:42 -0700760 DRMLISTDEL(&bo_gem->head);
Chris Wilson04495ee2009-10-02 04:39:22 +0100761
Eric Anholtd70d6052009-10-06 12:40:42 -0700762 drm_intel_gem_bo_free(&bo_gem->bo);
763 }
764 }
Chris Wilson04495ee2009-10-02 04:39:22 +0100765}
766
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700767static void
768drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
Chris Wilson04495ee2009-10-02 04:39:22 +0100769{
Eric Anholtd70d6052009-10-06 12:40:42 -0700770 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
771 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
772 struct drm_intel_gem_bo_bucket *bucket;
773 uint32_t tiling_mode;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700774 int i;
Chris Wilson04495ee2009-10-02 04:39:22 +0100775
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700776 /* Unreference all the target buffers */
777 for (i = 0; i < bo_gem->reloc_count; i++) {
778 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
779 reloc_target_bo[i],
780 time);
Eric Anholtd70d6052009-10-06 12:40:42 -0700781 }
Chris Wilsonb666f412009-11-30 23:07:19 +0000782 bo_gem->reloc_count = 0;
783 bo_gem->used_as_reloc_target = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -0700784
785 DBG("bo_unreference final: %d (%s)\n",
786 bo_gem->gem_handle, bo_gem->name);
787
Chris Wilson57473c72009-12-02 13:36:22 +0000788 /* release memory associated with this object */
789 if (bo_gem->reloc_target_bo) {
790 free(bo_gem->reloc_target_bo);
791 bo_gem->reloc_target_bo = NULL;
792 }
793 if (bo_gem->relocs) {
794 free(bo_gem->relocs);
795 bo_gem->relocs = NULL;
796 }
797
Eric Anholtd70d6052009-10-06 12:40:42 -0700798 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
799 /* Put the buffer into our internal cache for reuse if we can. */
800 tiling_mode = I915_TILING_NONE;
801 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
Chris Wilson60aa8032009-11-30 20:02:05 +0000802 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
803 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
804 I915_MADV_DONTNEED)) {
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700805 bo_gem->free_time = time;
Eric Anholtd70d6052009-10-06 12:40:42 -0700806
807 bo_gem->name = NULL;
808 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700809
810 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
811
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700812 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
Eric Anholtd70d6052009-10-06 12:40:42 -0700813 } else {
814 drm_intel_gem_bo_free(bo);
815 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700816}
817
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700818static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
819 time_t time)
820{
821 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
822
823 assert(atomic_read(&bo_gem->refcount) > 0);
Eric Anholtd70d6052009-10-06 12:40:42 -0700824 if (atomic_dec_and_test(&bo_gem->refcount))
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700825 drm_intel_gem_bo_unreference_final(bo, time);
Eric Anholtd70d6052009-10-06 12:40:42 -0700826}
827
828static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
829{
830 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
831
832 assert(atomic_read(&bo_gem->refcount) > 0);
833 if (atomic_dec_and_test(&bo_gem->refcount)) {
834 drm_intel_bufmgr_gem *bufmgr_gem =
835 (drm_intel_bufmgr_gem *) bo->bufmgr;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700836 struct timespec time;
837
838 clock_gettime(CLOCK_MONOTONIC, &time);
839
Eric Anholtd70d6052009-10-06 12:40:42 -0700840 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700841 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
Eric Anholtd70d6052009-10-06 12:40:42 -0700842 pthread_mutex_unlock(&bufmgr_gem->lock);
843 }
844}
845
846static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
847{
848 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
849 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
850 struct drm_i915_gem_set_domain set_domain;
851 int ret;
852
Chris Wilson04495ee2009-10-02 04:39:22 +0100853 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -0700854
Eric Anholtd70d6052009-10-06 12:40:42 -0700855 /* Allow recursive mapping. Mesa may recursively map buffers with
856 * nested display loops.
Carl Worthafd245d2009-04-29 14:43:55 -0700857 */
Eric Anholtd70d6052009-10-06 12:40:42 -0700858 if (!bo_gem->mem_virtual) {
859 struct drm_i915_gem_mmap mmap_arg;
Carl Worthafd245d2009-04-29 14:43:55 -0700860
Eric Anholtd70d6052009-10-06 12:40:42 -0700861 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
862
863 memset(&mmap_arg, 0, sizeof(mmap_arg));
864 mmap_arg.handle = bo_gem->gem_handle;
865 mmap_arg.offset = 0;
866 mmap_arg.size = bo->size;
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000867 do {
868 ret = ioctl(bufmgr_gem->fd,
869 DRM_IOCTL_I915_GEM_MMAP,
870 &mmap_arg);
871 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700872 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +0000873 ret = -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -0700874 fprintf(stderr,
875 "%s:%d: Error mapping buffer %d (%s): %s .\n",
876 __FILE__, __LINE__, bo_gem->gem_handle,
877 bo_gem->name, strerror(errno));
878 pthread_mutex_unlock(&bufmgr_gem->lock);
879 return ret;
880 }
881 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
882 }
883 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
884 bo_gem->mem_virtual);
885 bo->virtual = bo_gem->mem_virtual;
886
887 set_domain.handle = bo_gem->gem_handle;
888 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
889 if (write_enable)
890 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
891 else
892 set_domain.write_domain = 0;
893 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000894 ret = ioctl(bufmgr_gem->fd,
895 DRM_IOCTL_I915_GEM_SET_DOMAIN,
Eric Anholtd70d6052009-10-06 12:40:42 -0700896 &set_domain);
897 } while (ret == -1 && errno == EINTR);
898 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +0000899 ret = -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -0700900 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
901 __FILE__, __LINE__, bo_gem->gem_handle,
902 strerror(errno));
903 pthread_mutex_unlock(&bufmgr_gem->lock);
904 return ret;
905 }
906
907 pthread_mutex_unlock(&bufmgr_gem->lock);
908
909 return 0;
910}
911
912int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
913{
914 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
915 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
916 struct drm_i915_gem_set_domain set_domain;
917 int ret;
918
919 pthread_mutex_lock(&bufmgr_gem->lock);
920
921 /* Get a mapping of the buffer if we haven't before. */
922 if (bo_gem->gtt_virtual == NULL) {
923 struct drm_i915_gem_mmap_gtt mmap_arg;
924
925 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
926 bo_gem->name);
927
928 memset(&mmap_arg, 0, sizeof(mmap_arg));
929 mmap_arg.handle = bo_gem->gem_handle;
930
931 /* Get the fake offset back... */
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000932 do {
933 ret = ioctl(bufmgr_gem->fd,
934 DRM_IOCTL_I915_GEM_MMAP_GTT,
935 &mmap_arg);
936 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -0700937 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +0000938 ret = -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -0700939 fprintf(stderr,
940 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
941 __FILE__, __LINE__,
942 bo_gem->gem_handle, bo_gem->name,
943 strerror(errno));
944 pthread_mutex_unlock(&bufmgr_gem->lock);
945 return ret;
946 }
947
948 /* and mmap it */
949 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
950 MAP_SHARED, bufmgr_gem->fd,
951 mmap_arg.offset);
952 if (bo_gem->gtt_virtual == MAP_FAILED) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +0000953 ret = -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -0700954 fprintf(stderr,
955 "%s:%d: Error mapping buffer %d (%s): %s .\n",
956 __FILE__, __LINE__,
957 bo_gem->gem_handle, bo_gem->name,
958 strerror(errno));
959 pthread_mutex_unlock(&bufmgr_gem->lock);
Chris Wilsonacb4aa62009-12-02 12:40:26 +0000960 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -0700961 }
962 }
963
964 bo->virtual = bo_gem->gtt_virtual;
965
966 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
967 bo_gem->gtt_virtual);
968
969 /* Now move it to the GTT domain so that the CPU caches are flushed */
970 set_domain.handle = bo_gem->gem_handle;
971 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
972 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
973 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +0000974 ret = ioctl(bufmgr_gem->fd,
975 DRM_IOCTL_I915_GEM_SET_DOMAIN,
Eric Anholtd70d6052009-10-06 12:40:42 -0700976 &set_domain);
977 } while (ret == -1 && errno == EINTR);
978
979 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +0000980 ret = -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -0700981 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
982 __FILE__, __LINE__, bo_gem->gem_handle,
983 strerror(errno));
984 }
985
986 pthread_mutex_unlock(&bufmgr_gem->lock);
987
Chris Wilson60aa8032009-11-30 20:02:05 +0000988 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -0700989}
990
991int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
992{
993 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
994 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
995 int ret = 0;
996
997 if (bo == NULL)
998 return 0;
999
1000 assert(bo_gem->gtt_virtual != NULL);
1001
1002 pthread_mutex_lock(&bufmgr_gem->lock);
1003 bo->virtual = NULL;
1004 pthread_mutex_unlock(&bufmgr_gem->lock);
1005
1006 return ret;
1007}
1008
1009static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1010{
1011 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1012 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1013 struct drm_i915_gem_sw_finish sw_finish;
1014 int ret;
1015
1016 if (bo == NULL)
1017 return 0;
1018
1019 assert(bo_gem->mem_virtual != NULL);
1020
1021 pthread_mutex_lock(&bufmgr_gem->lock);
1022
1023 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1024 * results show up in a timely manner.
1025 */
1026 sw_finish.handle = bo_gem->gem_handle;
1027 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001028 ret = ioctl(bufmgr_gem->fd,
1029 DRM_IOCTL_I915_GEM_SW_FINISH,
Eric Anholtd70d6052009-10-06 12:40:42 -07001030 &sw_finish);
1031 } while (ret == -1 && errno == EINTR);
1032
1033 bo->virtual = NULL;
1034 pthread_mutex_unlock(&bufmgr_gem->lock);
1035 return 0;
Carl Worthafd245d2009-04-29 14:43:55 -07001036}
1037
Eric Anholt6a9eb082008-06-03 09:27:37 -07001038static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001039drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1040 unsigned long size, const void *data)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001041{
Eric Anholtd70d6052009-10-06 12:40:42 -07001042 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1043 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1044 struct drm_i915_gem_pwrite pwrite;
1045 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001046
Eric Anholtd70d6052009-10-06 12:40:42 -07001047 memset(&pwrite, 0, sizeof(pwrite));
1048 pwrite.handle = bo_gem->gem_handle;
1049 pwrite.offset = offset;
1050 pwrite.size = size;
1051 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1052 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001053 ret = ioctl(bufmgr_gem->fd,
1054 DRM_IOCTL_I915_GEM_PWRITE,
1055 &pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001056 } while (ret == -1 && errno == EINTR);
1057 if (ret != 0) {
1058 fprintf(stderr,
1059 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1060 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1061 (int)size, strerror(errno));
1062 }
1063 return 0;
1064}
1065
1066static int
1067drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1068{
1069 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1070 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1071 int ret;
1072
1073 get_pipe_from_crtc_id.crtc_id = crtc_id;
1074 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1075 &get_pipe_from_crtc_id);
1076 if (ret != 0) {
1077 /* We return -1 here to signal that we don't
1078 * know which pipe is associated with this crtc.
1079 * This lets the caller know that this information
1080 * isn't available; using the wrong pipe for
1081 * vblank waiting can cause the chipset to lock up
1082 */
1083 return -1;
1084 }
1085
1086 return get_pipe_from_crtc_id.pipe;
1087}
1088
1089static int
1090drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1091 unsigned long size, void *data)
1092{
1093 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1094 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1095 struct drm_i915_gem_pread pread;
1096 int ret;
1097
1098 memset(&pread, 0, sizeof(pread));
1099 pread.handle = bo_gem->gem_handle;
1100 pread.offset = offset;
1101 pread.size = size;
1102 pread.data_ptr = (uint64_t) (uintptr_t) data;
1103 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001104 ret = ioctl(bufmgr_gem->fd,
1105 DRM_IOCTL_I915_GEM_PREAD,
1106 &pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001107 } while (ret == -1 && errno == EINTR);
1108 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001109 ret = -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -07001110 fprintf(stderr,
1111 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1112 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1113 (int)size, strerror(errno));
1114 }
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001115 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001116}
1117
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001118/** Waits for all GPU rendering to the object to have completed. */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001119static void
Eric Anholt4b982642008-10-30 09:33:07 -07001120drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001121{
Eric Anholtd70d6052009-10-06 12:40:42 -07001122 drm_intel_gem_bo_start_gtt_access(bo, 0);
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001123}
1124
1125/**
1126 * Sets the object to the GTT read and possibly write domain, used by the X
1127 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1128 *
1129 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1130 * can do tiled pixmaps this way.
1131 */
1132void
1133drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1134{
Eric Anholtd70d6052009-10-06 12:40:42 -07001135 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1136 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1137 struct drm_i915_gem_set_domain set_domain;
1138 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001139
Eric Anholtd70d6052009-10-06 12:40:42 -07001140 set_domain.handle = bo_gem->gem_handle;
1141 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1142 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1143 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001144 ret = ioctl(bufmgr_gem->fd,
1145 DRM_IOCTL_I915_GEM_SET_DOMAIN,
Eric Anholtd70d6052009-10-06 12:40:42 -07001146 &set_domain);
1147 } while (ret == -1 && errno == EINTR);
1148 if (ret != 0) {
1149 fprintf(stderr,
1150 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1151 __FILE__, __LINE__, bo_gem->gem_handle,
1152 set_domain.read_domains, set_domain.write_domain,
1153 strerror(errno));
1154 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001155}
1156
1157static void
Eric Anholt4b982642008-10-30 09:33:07 -07001158drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001159{
Eric Anholtd70d6052009-10-06 12:40:42 -07001160 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1161 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001162
Eric Anholtd70d6052009-10-06 12:40:42 -07001163 free(bufmgr_gem->exec_objects);
1164 free(bufmgr_gem->exec_bos);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001165
Eric Anholtd70d6052009-10-06 12:40:42 -07001166 pthread_mutex_destroy(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001167
Eric Anholtd70d6052009-10-06 12:40:42 -07001168 /* Free any cached buffer objects we were going to reuse */
1169 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1170 struct drm_intel_gem_bo_bucket *bucket =
1171 &bufmgr_gem->cache_bucket[i];
1172 drm_intel_bo_gem *bo_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001173
Eric Anholtd70d6052009-10-06 12:40:42 -07001174 while (!DRMLISTEMPTY(&bucket->head)) {
1175 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1176 bucket->head.next, head);
1177 DRMLISTDEL(&bo_gem->head);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001178
Eric Anholtd70d6052009-10-06 12:40:42 -07001179 drm_intel_gem_bo_free(&bo_gem->bo);
1180 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001181 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001182
Eric Anholtd70d6052009-10-06 12:40:42 -07001183 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001184}
1185
1186/**
1187 * Adds the target buffer to the validation list and adds the relocation
1188 * to the reloc_buffer's relocation list.
1189 *
1190 * The relocation entry at the given offset must already contain the
1191 * precomputed relocation value, because the kernel will optimize out
1192 * the relocation entry write when the buffer hasn't moved from the
1193 * last known offset in target_bo.
1194 */
1195static int
Eric Anholt4b982642008-10-30 09:33:07 -07001196drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1197 drm_intel_bo *target_bo, uint32_t target_offset,
1198 uint32_t read_domains, uint32_t write_domain)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001199{
Eric Anholtd70d6052009-10-06 12:40:42 -07001200 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1201 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1202 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001203
Chris Wilson97077332009-12-01 23:01:34 +00001204 if (bo_gem->has_error)
Chris Wilson792fed12009-12-02 13:12:39 +00001205 return -ENOMEM;
Chris Wilson792fed12009-12-02 13:12:39 +00001206
1207 if (target_bo_gem->has_error) {
1208 bo_gem->has_error = 1;
Chris Wilson792fed12009-12-02 13:12:39 +00001209 return -ENOMEM;
1210 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001211
Eric Anholtd70d6052009-10-06 12:40:42 -07001212 /* Create a new relocation list if needed */
Chris Wilson97077332009-12-01 23:01:34 +00001213 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
Chris Wilson792fed12009-12-02 13:12:39 +00001214 return -ENOMEM;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001215
Eric Anholtd70d6052009-10-06 12:40:42 -07001216 /* Check overflow */
1217 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001218
Eric Anholtd70d6052009-10-06 12:40:42 -07001219 /* Check args */
1220 assert(offset <= bo->size - 4);
1221 assert((write_domain & (write_domain - 1)) == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001222
Eric Anholtd70d6052009-10-06 12:40:42 -07001223 /* Make sure that we're not adding a reloc to something whose size has
1224 * already been accounted for.
1225 */
1226 assert(!bo_gem->used_as_reloc_target);
1227 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1228 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
Eric Anholt0e867312008-10-21 00:10:54 -07001229
Eric Anholtd70d6052009-10-06 12:40:42 -07001230 /* Flag the target to disallow further relocations in it. */
1231 target_bo_gem->used_as_reloc_target = 1;
Eric Anholt0e867312008-10-21 00:10:54 -07001232
Eric Anholtd70d6052009-10-06 12:40:42 -07001233 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1234 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1235 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1236 target_bo_gem->gem_handle;
1237 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1238 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1239 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001240
Eric Anholtd70d6052009-10-06 12:40:42 -07001241 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
1242 drm_intel_gem_bo_reference(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001243
Eric Anholtd70d6052009-10-06 12:40:42 -07001244 bo_gem->reloc_count++;
Eric Anholt6df7b072008-06-12 23:22:26 -07001245
Eric Anholtd70d6052009-10-06 12:40:42 -07001246 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001247}
1248
1249/**
1250 * Walk the tree of relocations rooted at BO and accumulate the list of
1251 * validations to be performed and update the relocation buffers with
1252 * index values into the validation list.
1253 */
1254static void
Eric Anholt4b982642008-10-30 09:33:07 -07001255drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001256{
Eric Anholtd70d6052009-10-06 12:40:42 -07001257 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1258 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001259
Eric Anholtd70d6052009-10-06 12:40:42 -07001260 if (bo_gem->relocs == NULL)
1261 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001262
Eric Anholtd70d6052009-10-06 12:40:42 -07001263 for (i = 0; i < bo_gem->reloc_count; i++) {
1264 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
Eric Anholt6a9eb082008-06-03 09:27:37 -07001265
Eric Anholtd70d6052009-10-06 12:40:42 -07001266 /* Continue walking the tree depth-first. */
1267 drm_intel_gem_bo_process_reloc(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001268
Eric Anholtd70d6052009-10-06 12:40:42 -07001269 /* Add the target to the validate list */
1270 drm_intel_add_validate_buffer(target_bo);
1271 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001272}
1273
Eric Anholt6a9eb082008-06-03 09:27:37 -07001274static void
Eric Anholtd70d6052009-10-06 12:40:42 -07001275drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001276{
Eric Anholtd70d6052009-10-06 12:40:42 -07001277 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001278
Eric Anholtd70d6052009-10-06 12:40:42 -07001279 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1280 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1281 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001282
Eric Anholtd70d6052009-10-06 12:40:42 -07001283 /* Update the buffer offset */
1284 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1285 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1286 bo_gem->gem_handle, bo_gem->name, bo->offset,
1287 (unsigned long long)bufmgr_gem->exec_objects[i].
1288 offset);
1289 bo->offset = bufmgr_gem->exec_objects[i].offset;
1290 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001291 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001292}
1293
Eric Anholtf9d98be2008-09-08 08:51:40 -07001294static int
Eric Anholt4b982642008-10-30 09:33:07 -07001295drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07001296 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001297{
Eric Anholtd70d6052009-10-06 12:40:42 -07001298 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
Chris Wilson792fed12009-12-02 13:12:39 +00001299 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholtd70d6052009-10-06 12:40:42 -07001300 struct drm_i915_gem_execbuffer execbuf;
1301 int ret, i;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001302
Chris Wilson792fed12009-12-02 13:12:39 +00001303 if (bo_gem->has_error)
1304 return -ENOMEM;
1305
Eric Anholtd70d6052009-10-06 12:40:42 -07001306 pthread_mutex_lock(&bufmgr_gem->lock);
1307 /* Update indices and set up the validate list. */
1308 drm_intel_gem_bo_process_reloc(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001309
Eric Anholtd70d6052009-10-06 12:40:42 -07001310 /* Add the batch buffer to the validation list. There are no
1311 * relocations pointing to it.
1312 */
1313 drm_intel_add_validate_buffer(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001314
Eric Anholtd70d6052009-10-06 12:40:42 -07001315 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1316 execbuf.buffer_count = bufmgr_gem->exec_count;
1317 execbuf.batch_start_offset = 0;
1318 execbuf.batch_len = used;
1319 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1320 execbuf.num_cliprects = num_cliprects;
1321 execbuf.DR1 = 0;
1322 execbuf.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001323
Eric Anholtd70d6052009-10-06 12:40:42 -07001324 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001325 ret = ioctl(bufmgr_gem->fd,
1326 DRM_IOCTL_I915_GEM_EXECBUFFER,
Eric Anholtd70d6052009-10-06 12:40:42 -07001327 &execbuf);
Chris Wilsonb73612e2009-12-02 12:58:00 +00001328 } while (ret != 0 && errno == EINTR);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001329
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001330 if (ret != 0) {
1331 ret = -errno;
1332 if (errno == ENOSPC) {
1333 fprintf(stderr,
1334 "Execbuffer fails to pin. "
1335 "Estimate: %u. Actual: %u. Available: %u\n",
1336 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1337 bufmgr_gem->
1338 exec_count),
1339 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1340 bufmgr_gem->
1341 exec_count),
1342 (unsigned int)bufmgr_gem->gtt_size);
1343 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001344 }
1345 drm_intel_update_buffer_offsets(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001346
Eric Anholtd70d6052009-10-06 12:40:42 -07001347 if (bufmgr_gem->bufmgr.debug)
1348 drm_intel_gem_dump_validation_list(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001349
Eric Anholtd70d6052009-10-06 12:40:42 -07001350 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1351 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1352 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001353
Eric Anholtd70d6052009-10-06 12:40:42 -07001354 /* Disconnect the buffer from the validate list */
1355 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001356 bufmgr_gem->exec_bos[i] = NULL;
1357 }
1358 bufmgr_gem->exec_count = 0;
1359 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001360
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001361 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001362}
1363
Keith Packard8e41ce12008-08-04 00:34:08 -07001364static int
Eric Anholt4b982642008-10-30 09:33:07 -07001365drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
Keith Packard8e41ce12008-08-04 00:34:08 -07001366{
Eric Anholtd70d6052009-10-06 12:40:42 -07001367 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1368 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1369 struct drm_i915_gem_pin pin;
1370 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001371
Eric Anholtd70d6052009-10-06 12:40:42 -07001372 memset(&pin, 0, sizeof(pin));
1373 pin.handle = bo_gem->gem_handle;
1374 pin.alignment = alignment;
Keith Packard8e41ce12008-08-04 00:34:08 -07001375
Eric Anholtd70d6052009-10-06 12:40:42 -07001376 do {
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001377 ret = ioctl(bufmgr_gem->fd,
1378 DRM_IOCTL_I915_GEM_PIN,
1379 &pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07001380 } while (ret == -1 && errno == EINTR);
Eric Anholt02445ea2009-01-04 17:37:18 -08001381
Eric Anholtd70d6052009-10-06 12:40:42 -07001382 if (ret != 0)
1383 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07001384
Eric Anholtd70d6052009-10-06 12:40:42 -07001385 bo->offset = pin.offset;
1386 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001387}
1388
1389static int
Eric Anholt4b982642008-10-30 09:33:07 -07001390drm_intel_gem_bo_unpin(drm_intel_bo *bo)
Keith Packard8e41ce12008-08-04 00:34:08 -07001391{
Eric Anholtd70d6052009-10-06 12:40:42 -07001392 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1393 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1394 struct drm_i915_gem_unpin unpin;
1395 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001396
Eric Anholtd70d6052009-10-06 12:40:42 -07001397 memset(&unpin, 0, sizeof(unpin));
1398 unpin.handle = bo_gem->gem_handle;
Keith Packard8e41ce12008-08-04 00:34:08 -07001399
Eric Anholtd70d6052009-10-06 12:40:42 -07001400 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1401 if (ret != 0)
1402 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07001403
Eric Anholtd70d6052009-10-06 12:40:42 -07001404 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001405}
1406
1407static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001408drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
Eric Anholt4b982642008-10-30 09:33:07 -07001409 uint32_t stride)
Keith Packard8e41ce12008-08-04 00:34:08 -07001410{
Eric Anholtd70d6052009-10-06 12:40:42 -07001411 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1412 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1413 struct drm_i915_gem_set_tiling set_tiling;
1414 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001415
Eric Anholtd70d6052009-10-06 12:40:42 -07001416 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1417 return 0;
Keith Packard18f091d2008-12-15 15:08:12 -08001418
Eric Anholtd70d6052009-10-06 12:40:42 -07001419 /* If we're going from non-tiling to tiling, bump fence count */
1420 if (bo_gem->tiling_mode == I915_TILING_NONE)
1421 bo_gem->reloc_tree_fences++;
Eric Anholt9209c9a2009-01-27 16:54:11 -08001422
Eric Anholtd70d6052009-10-06 12:40:42 -07001423 memset(&set_tiling, 0, sizeof(set_tiling));
1424 set_tiling.handle = bo_gem->gem_handle;
1425 set_tiling.tiling_mode = *tiling_mode;
1426 set_tiling.stride = stride;
Keith Packard8e41ce12008-08-04 00:34:08 -07001427
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001428 do {
1429 ret = ioctl(bufmgr_gem->fd,
1430 DRM_IOCTL_I915_GEM_SET_TILING,
1431 &set_tiling);
1432 } while (ret == -1 && errno == EINTR);
Eric Anholtd70d6052009-10-06 12:40:42 -07001433 if (ret != 0) {
1434 *tiling_mode = bo_gem->tiling_mode;
1435 return -errno;
1436 }
1437 bo_gem->tiling_mode = set_tiling.tiling_mode;
1438 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1439
1440 /* If we're going from tiling to non-tiling, drop fence count */
1441 if (bo_gem->tiling_mode == I915_TILING_NONE)
1442 bo_gem->reloc_tree_fences--;
1443
Chris Wilsone22fb792009-11-30 22:14:30 +00001444 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1445
Keith Packard18f091d2008-12-15 15:08:12 -08001446 *tiling_mode = bo_gem->tiling_mode;
Eric Anholtd70d6052009-10-06 12:40:42 -07001447 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001448}
1449
1450static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001451drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1452 uint32_t * swizzle_mode)
Keith Packard8e41ce12008-08-04 00:34:08 -07001453{
Eric Anholtd70d6052009-10-06 12:40:42 -07001454 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt99338382008-10-14 13:18:11 -07001455
Eric Anholtd70d6052009-10-06 12:40:42 -07001456 *tiling_mode = bo_gem->tiling_mode;
1457 *swizzle_mode = bo_gem->swizzle_mode;
1458 return 0;
Eric Anholt99338382008-10-14 13:18:11 -07001459}
1460
1461static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001462drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
Keith Packard8e41ce12008-08-04 00:34:08 -07001463{
Eric Anholtd70d6052009-10-06 12:40:42 -07001464 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1465 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1466 struct drm_gem_flink flink;
1467 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001468
Eric Anholtd70d6052009-10-06 12:40:42 -07001469 if (!bo_gem->global_name) {
1470 memset(&flink, 0, sizeof(flink));
1471 flink.handle = bo_gem->gem_handle;
1472
1473 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1474 if (ret != 0)
1475 return -errno;
1476 bo_gem->global_name = flink.name;
1477 bo_gem->reusable = 0;
1478 }
1479
1480 *name = bo_gem->global_name;
1481 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001482}
1483
Eric Anholt6a9eb082008-06-03 09:27:37 -07001484/**
1485 * Enables unlimited caching of buffer objects for reuse.
1486 *
1487 * This is potentially very memory expensive, as the cache at each bucket
1488 * size is only bounded by how many buffers of that size we've managed to have
1489 * in flight at once.
1490 */
1491void
Eric Anholt4b982642008-10-30 09:33:07 -07001492drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001493{
Eric Anholtd70d6052009-10-06 12:40:42 -07001494 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001495
Eric Anholtd70d6052009-10-06 12:40:42 -07001496 bufmgr_gem->bo_reuse = 1;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001497}
1498
Eric Anholt0e867312008-10-21 00:10:54 -07001499/**
1500 * Return the additional aperture space required by the tree of buffer objects
1501 * rooted at bo.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001502 */
1503static int
Eric Anholt4b982642008-10-30 09:33:07 -07001504drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001505{
Eric Anholtd70d6052009-10-06 12:40:42 -07001506 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1507 int i;
1508 int total = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07001509
Eric Anholtd70d6052009-10-06 12:40:42 -07001510 if (bo == NULL || bo_gem->included_in_check_aperture)
1511 return 0;
Eric Anholt0e867312008-10-21 00:10:54 -07001512
Eric Anholtd70d6052009-10-06 12:40:42 -07001513 total += bo->size;
1514 bo_gem->included_in_check_aperture = 1;
Eric Anholt0e867312008-10-21 00:10:54 -07001515
Eric Anholtd70d6052009-10-06 12:40:42 -07001516 for (i = 0; i < bo_gem->reloc_count; i++)
1517 total +=
1518 drm_intel_gem_bo_get_aperture_space(bo_gem->
1519 reloc_target_bo[i]);
Eric Anholt0e867312008-10-21 00:10:54 -07001520
Eric Anholtd70d6052009-10-06 12:40:42 -07001521 return total;
Eric Anholt0e867312008-10-21 00:10:54 -07001522}
1523
1524/**
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001525 * Count the number of buffers in this list that need a fence reg
1526 *
1527 * If the count is greater than the number of available regs, we'll have
1528 * to ask the caller to resubmit a batch with fewer tiled buffers.
1529 *
Eric Anholt9209c9a2009-01-27 16:54:11 -08001530 * This function over-counts if the same buffer is used multiple times.
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001531 */
1532static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -07001533drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001534{
Eric Anholtd70d6052009-10-06 12:40:42 -07001535 int i;
1536 unsigned int total = 0;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001537
Eric Anholtd70d6052009-10-06 12:40:42 -07001538 for (i = 0; i < count; i++) {
1539 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001540
Eric Anholtd70d6052009-10-06 12:40:42 -07001541 if (bo_gem == NULL)
1542 continue;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001543
Eric Anholtd70d6052009-10-06 12:40:42 -07001544 total += bo_gem->reloc_tree_fences;
1545 }
1546 return total;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001547}
1548
1549/**
Eric Anholt4b982642008-10-30 09:33:07 -07001550 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1551 * for the next drm_intel_bufmgr_check_aperture_space() call.
Eric Anholt0e867312008-10-21 00:10:54 -07001552 */
1553static void
Eric Anholt4b982642008-10-30 09:33:07 -07001554drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
Eric Anholt0e867312008-10-21 00:10:54 -07001555{
Eric Anholtd70d6052009-10-06 12:40:42 -07001556 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1557 int i;
Eric Anholt0e867312008-10-21 00:10:54 -07001558
Eric Anholtd70d6052009-10-06 12:40:42 -07001559 if (bo == NULL || !bo_gem->included_in_check_aperture)
1560 return;
Eric Anholt0e867312008-10-21 00:10:54 -07001561
Eric Anholtd70d6052009-10-06 12:40:42 -07001562 bo_gem->included_in_check_aperture = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07001563
Eric Anholtd70d6052009-10-06 12:40:42 -07001564 for (i = 0; i < bo_gem->reloc_count; i++)
1565 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1566 reloc_target_bo[i]);
Eric Anholt0e867312008-10-21 00:10:54 -07001567}
1568
1569/**
Keith Packardb13f4e12008-11-21 01:49:39 -08001570 * Return a conservative estimate for the amount of aperture required
1571 * for a collection of buffers. This may double-count some buffers.
1572 */
1573static unsigned int
1574drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1575{
Eric Anholtd70d6052009-10-06 12:40:42 -07001576 int i;
1577 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08001578
Eric Anholtd70d6052009-10-06 12:40:42 -07001579 for (i = 0; i < count; i++) {
1580 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1581 if (bo_gem != NULL)
1582 total += bo_gem->reloc_tree_size;
1583 }
1584 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08001585}
1586
1587/**
1588 * Return the amount of aperture needed for a collection of buffers.
1589 * This avoids double counting any buffers, at the cost of looking
1590 * at every buffer in the set.
1591 */
1592static unsigned int
1593drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1594{
Eric Anholtd70d6052009-10-06 12:40:42 -07001595 int i;
1596 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08001597
Eric Anholtd70d6052009-10-06 12:40:42 -07001598 for (i = 0; i < count; i++) {
1599 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1600 /* For the first buffer object in the array, we get an
1601 * accurate count back for its reloc_tree size (since nothing
1602 * had been flagged as being counted yet). We can save that
1603 * value out as a more conservative reloc_tree_size that
1604 * avoids double-counting target buffers. Since the first
1605 * buffer happens to usually be the batch buffer in our
1606 * callers, this can pull us back from doing the tree
1607 * walk on every new batch emit.
1608 */
1609 if (i == 0) {
1610 drm_intel_bo_gem *bo_gem =
1611 (drm_intel_bo_gem *) bo_array[i];
1612 bo_gem->reloc_tree_size = total;
1613 }
Eric Anholt7ce8d4c2009-02-27 13:46:31 -08001614 }
Keith Packardb13f4e12008-11-21 01:49:39 -08001615
Eric Anholtd70d6052009-10-06 12:40:42 -07001616 for (i = 0; i < count; i++)
1617 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1618 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08001619}
1620
1621/**
Eric Anholt0e867312008-10-21 00:10:54 -07001622 * Return -1 if the batchbuffer should be flushed before attempting to
1623 * emit rendering referencing the buffers pointed to by bo_array.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001624 *
Eric Anholt0e867312008-10-21 00:10:54 -07001625 * This is required because if we try to emit a batchbuffer with relocations
1626 * to a tree of buffers that won't simultaneously fit in the aperture,
1627 * the rendering will return an error at a point where the software is not
1628 * prepared to recover from it.
1629 *
1630 * However, we also want to emit the batchbuffer significantly before we reach
1631 * the limit, as a series of batchbuffers each of which references buffers
1632 * covering almost all of the aperture means that at each emit we end up
1633 * waiting to evict a buffer from the last rendering, and we get synchronous
1634 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1635 * get better parallelism.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001636 */
1637static int
Eric Anholt4b982642008-10-30 09:33:07 -07001638drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001639{
Eric Anholtd70d6052009-10-06 12:40:42 -07001640 drm_intel_bufmgr_gem *bufmgr_gem =
1641 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1642 unsigned int total = 0;
1643 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1644 int total_fences;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001645
Eric Anholtd70d6052009-10-06 12:40:42 -07001646 /* Check for fence reg constraints if necessary */
1647 if (bufmgr_gem->available_fences) {
1648 total_fences = drm_intel_gem_total_fences(bo_array, count);
1649 if (total_fences > bufmgr_gem->available_fences)
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001650 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07001651 }
Eric Anholt0e867312008-10-21 00:10:54 -07001652
Eric Anholtd70d6052009-10-06 12:40:42 -07001653 total = drm_intel_gem_estimate_batch_space(bo_array, count);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001654
Eric Anholtd70d6052009-10-06 12:40:42 -07001655 if (total > threshold)
1656 total = drm_intel_gem_compute_batch_space(bo_array, count);
Eric Anholt0e867312008-10-21 00:10:54 -07001657
Eric Anholtd70d6052009-10-06 12:40:42 -07001658 if (total > threshold) {
1659 DBG("check_space: overflowed available aperture, "
1660 "%dkb vs %dkb\n",
1661 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001662 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07001663 } else {
1664 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1665 (int)bufmgr_gem->gtt_size / 1024);
1666 return 0;
1667 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001668}
1669
Keith Packard5b5ce302009-05-11 13:42:12 -07001670/*
1671 * Disable buffer reuse for objects which are shared with the kernel
1672 * as scanout buffers
1673 */
1674static int
1675drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1676{
Eric Anholtd70d6052009-10-06 12:40:42 -07001677 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Keith Packard5b5ce302009-05-11 13:42:12 -07001678
Eric Anholtd70d6052009-10-06 12:40:42 -07001679 bo_gem->reusable = 0;
1680 return 0;
Keith Packard5b5ce302009-05-11 13:42:12 -07001681}
1682
Eric Anholt769b1052009-10-01 19:09:26 -07001683static int
Eric Anholt66d27142009-10-20 13:20:55 -07001684_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
Eric Anholt769b1052009-10-01 19:09:26 -07001685{
Eric Anholtd70d6052009-10-06 12:40:42 -07001686 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1687 int i;
Eric Anholt769b1052009-10-01 19:09:26 -07001688
Eric Anholtd70d6052009-10-06 12:40:42 -07001689 for (i = 0; i < bo_gem->reloc_count; i++) {
1690 if (bo_gem->reloc_target_bo[i] == target_bo)
1691 return 1;
Eric Anholt66d27142009-10-20 13:20:55 -07001692 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
Eric Anholtd70d6052009-10-06 12:40:42 -07001693 target_bo))
1694 return 1;
1695 }
1696
Eric Anholt769b1052009-10-01 19:09:26 -07001697 return 0;
Eric Anholt769b1052009-10-01 19:09:26 -07001698}
1699
Eric Anholt66d27142009-10-20 13:20:55 -07001700/** Return true if target_bo is referenced by bo's relocation tree. */
1701static int
1702drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1703{
1704 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1705
1706 if (bo == NULL || target_bo == NULL)
1707 return 0;
1708 if (target_bo_gem->used_as_reloc_target)
1709 return _drm_intel_gem_bo_references(bo, target_bo);
1710 return 0;
1711}
1712
Eric Anholt769b1052009-10-01 19:09:26 -07001713/**
Eric Anholt6a9eb082008-06-03 09:27:37 -07001714 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1715 * and manage map buffer objections.
1716 *
1717 * \param fd File descriptor of the opened DRM device.
1718 */
Eric Anholt4b982642008-10-30 09:33:07 -07001719drm_intel_bufmgr *
1720drm_intel_bufmgr_gem_init(int fd, int batch_size)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001721{
Eric Anholtd70d6052009-10-06 12:40:42 -07001722 drm_intel_bufmgr_gem *bufmgr_gem;
1723 struct drm_i915_gem_get_aperture aperture;
1724 drm_i915_getparam_t gp;
1725 int ret, i;
1726 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001727
Eric Anholtd70d6052009-10-06 12:40:42 -07001728 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1729 bufmgr_gem->fd = fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001730
Eric Anholtd70d6052009-10-06 12:40:42 -07001731 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1732 free(bufmgr_gem);
1733 return NULL;
1734 }
Eric Anholt6df7b072008-06-12 23:22:26 -07001735
Eric Anholtd70d6052009-10-06 12:40:42 -07001736 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
Eric Anholt0e867312008-10-21 00:10:54 -07001737
Eric Anholtd70d6052009-10-06 12:40:42 -07001738 if (ret == 0)
1739 bufmgr_gem->gtt_size = aperture.aper_available_size;
1740 else {
1741 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1742 strerror(errno));
1743 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1744 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1745 "May lead to reduced performance or incorrect "
1746 "rendering.\n",
1747 (int)bufmgr_gem->gtt_size / 1024);
1748 }
Eric Anholt0e867312008-10-21 00:10:54 -07001749
Eric Anholtd70d6052009-10-06 12:40:42 -07001750 gp.param = I915_PARAM_CHIPSET_ID;
1751 gp.value = &bufmgr_gem->pci_device;
Eric Anholtcbdd6272009-01-27 17:16:11 -08001752 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1753 if (ret) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001754 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
1755 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
Eric Anholtcbdd6272009-01-27 17:16:11 -08001756 }
Jesse Barnes2fa5f282009-01-23 14:13:45 -08001757
Eric Anholtd70d6052009-10-06 12:40:42 -07001758 if (!IS_I965G(bufmgr_gem)) {
1759 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1760 gp.value = &bufmgr_gem->available_fences;
1761 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1762 if (ret) {
1763 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
1764 errno);
1765 fprintf(stderr, "param: %d, val: %d\n", gp.param,
1766 *gp.value);
1767 bufmgr_gem->available_fences = 0;
1768 }
1769 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001770
Eric Anholtd70d6052009-10-06 12:40:42 -07001771 /* Let's go with one relocation per every 2 dwords (but round down a bit
1772 * since a power of two will mean an extra page allocation for the reloc
1773 * buffer).
1774 *
1775 * Every 4 was too few for the blender benchmark.
1776 */
1777 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
Eric Anholt769b1052009-10-01 19:09:26 -07001778
Eric Anholtd70d6052009-10-06 12:40:42 -07001779 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1780 bufmgr_gem->bufmgr.bo_alloc_for_render =
1781 drm_intel_gem_bo_alloc_for_render;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07001782 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07001783 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1784 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1785 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1786 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1787 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1788 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1789 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1790 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1791 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1792 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1793 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1794 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1795 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1796 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1797 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
Chris Wilson83a35b62009-11-11 13:04:38 +00001798 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
Eric Anholtd70d6052009-10-06 12:40:42 -07001799 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1800 bufmgr_gem->bufmgr.debug = 0;
1801 bufmgr_gem->bufmgr.check_aperture_space =
1802 drm_intel_gem_check_aperture_space;
1803 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
1804 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
1805 drm_intel_gem_get_pipe_from_crtc_id;
1806 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001807
Eric Anholtd70d6052009-10-06 12:40:42 -07001808 /* Initialize the linked lists for BO reuse cache. */
1809 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
1810 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
1811 bufmgr_gem->cache_bucket[i].size = size;
1812 }
1813
1814 return &bufmgr_gem->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001815}