blob: 26e3a5ce2808244677be40b5a7fa3fbdeff2add0 [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholt6a9eb082008-06-03 09:27:37 -070041#include <xf86drm.h>
Pauli Nieminen21105bc2010-03-10 13:35:59 +020042#include <xf86atomic.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080043#include <fcntl.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070044#include <stdio.h>
45#include <stdlib.h>
46#include <string.h>
47#include <unistd.h>
48#include <assert.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070049#include <pthread.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070050#include <sys/ioctl.h>
51#include <sys/mman.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080052#include <sys/stat.h>
53#include <sys/types.h>
Eric Anholt2c2bdb32011-10-21 16:53:16 -070054#include <stdbool.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070055
56#include "errno.h"
Eric Anholt72abe982009-02-18 13:06:35 -080057#include "libdrm_lists.h"
Eric Anholtc4857422008-06-03 10:20:49 -070058#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010059#include "intel_bufmgr_priv.h"
Eric Anholtcbdd6272009-01-27 17:16:11 -080060#include "intel_chipset.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070061#include "string.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070062
63#include "i915_drm.h"
64
Eric Anholt6a9eb082008-06-03 09:27:37 -070065#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070066 if (bufmgr_gem->bufmgr.debug) \
67 fprintf(stderr, __VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070068} while (0)
69
Eric Anholt0ec768e2010-06-04 17:09:11 -070070#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
71
Eric Anholt4b982642008-10-30 09:33:07 -070072typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
Keith Packarda919ff52008-06-05 15:58:09 -070073
Eric Anholt4b982642008-10-30 09:33:07 -070074struct drm_intel_gem_bo_bucket {
Eric Anholtd70d6052009-10-06 12:40:42 -070075 drmMMListHead head;
76 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -070077};
78
Eric Anholt4b982642008-10-30 09:33:07 -070079typedef struct _drm_intel_bufmgr_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -070080 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -070081
Eric Anholtd70d6052009-10-06 12:40:42 -070082 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -070083
Eric Anholtd70d6052009-10-06 12:40:42 -070084 int max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -070085
Eric Anholtd70d6052009-10-06 12:40:42 -070086 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -070087
Eric Anholtd70d6052009-10-06 12:40:42 -070088 struct drm_i915_gem_exec_object *exec_objects;
Jesse Barnesb5096402009-09-15 11:02:58 -070089 struct drm_i915_gem_exec_object2 *exec2_objects;
Eric Anholtd70d6052009-10-06 12:40:42 -070090 drm_intel_bo **exec_bos;
91 int exec_size;
92 int exec_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -070093
Eric Anholtd70d6052009-10-06 12:40:42 -070094 /** Array of lists of cached gem objects of power-of-two sizes */
Eric Anholt0ec768e2010-06-04 17:09:11 -070095 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
96 int num_buckets;
Chris Wilsonf16b4162010-06-21 15:21:48 +010097 time_t time;
Eric Anholt6a9eb082008-06-03 09:27:37 -070098
Chris Wilson36d49392011-02-14 09:39:06 +000099 drmMMListHead named;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000100 drmMMListHead vma_cache;
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000101 int vma_count, vma_open, vma_max;
Chris Wilson36d49392011-02-14 09:39:06 +0000102
Eric Anholtd70d6052009-10-06 12:40:42 -0700103 uint64_t gtt_size;
104 int available_fences;
105 int pci_device;
Eric Anholta1f9ea72010-03-02 08:49:36 -0800106 int gen;
Chris Wilson36245772010-10-29 10:49:54 +0100107 unsigned int has_bsd : 1;
108 unsigned int has_blt : 1;
109 unsigned int has_relaxed_fencing : 1;
110 unsigned int bo_reuse : 1;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700111 bool fenced_relocs;
Eric Anholt4b982642008-10-30 09:33:07 -0700112} drm_intel_bufmgr_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700113
Jesse Barnesb5096402009-09-15 11:02:58 -0700114#define DRM_INTEL_RELOC_FENCE (1<<0)
115
116typedef struct _drm_intel_reloc_target_info {
117 drm_intel_bo *bo;
118 int flags;
119} drm_intel_reloc_target;
120
Eric Anholt4b982642008-10-30 09:33:07 -0700121struct _drm_intel_bo_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -0700122 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700123
Eric Anholtd70d6052009-10-06 12:40:42 -0700124 atomic_t refcount;
125 uint32_t gem_handle;
126 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700127
Eric Anholtd70d6052009-10-06 12:40:42 -0700128 /**
129 * Kenel-assigned global name for this object
130 */
131 unsigned int global_name;
Chris Wilson36d49392011-02-14 09:39:06 +0000132 drmMMListHead name_list;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700133
Eric Anholtd70d6052009-10-06 12:40:42 -0700134 /**
135 * Index of the buffer within the validation list while preparing a
136 * batchbuffer execution.
137 */
138 int validate_index;
Keith Packard18f091d2008-12-15 15:08:12 -0800139
Eric Anholtd70d6052009-10-06 12:40:42 -0700140 /**
141 * Current tiling mode
142 */
143 uint32_t tiling_mode;
144 uint32_t swizzle_mode;
Chris Wilson056aa9b2010-06-21 14:31:29 +0100145 unsigned long stride;
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700146
Eric Anholtd70d6052009-10-06 12:40:42 -0700147 time_t free_time;
Keith Packard329e0862008-06-05 16:05:35 -0700148
Eric Anholtd70d6052009-10-06 12:40:42 -0700149 /** Array passed to the DRM containing relocation information. */
150 struct drm_i915_gem_relocation_entry *relocs;
Jesse Barnesb5096402009-09-15 11:02:58 -0700151 /**
152 * Array of info structs corresponding to relocs[i].target_handle etc
153 */
154 drm_intel_reloc_target *reloc_target_info;
Eric Anholtd70d6052009-10-06 12:40:42 -0700155 /** Number of entries in relocs */
156 int reloc_count;
157 /** Mapped address for the buffer, saved across map/unmap cycles */
158 void *mem_virtual;
159 /** GTT virtual address for the buffer, saved across map/unmap cycles */
160 void *gtt_virtual;
Chris Wilsonc549a772011-12-05 10:14:34 +0000161 int map_count;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000162 drmMMListHead vma_list;
Eric Anholt0e867312008-10-21 00:10:54 -0700163
Eric Anholtd70d6052009-10-06 12:40:42 -0700164 /** BO cache list */
165 drmMMListHead head;
Eric Anholt0e867312008-10-21 00:10:54 -0700166
Eric Anholtd70d6052009-10-06 12:40:42 -0700167 /**
168 * Boolean of whether this BO and its children have been included in
169 * the current drm_intel_bufmgr_check_aperture_space() total.
170 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700171 bool included_in_check_aperture;
Eric Anholt0e867312008-10-21 00:10:54 -0700172
Eric Anholtd70d6052009-10-06 12:40:42 -0700173 /**
174 * Boolean of whether this buffer has been used as a relocation
175 * target and had its size accounted for, and thus can't have any
176 * further relocations added to it.
177 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700178 bool used_as_reloc_target;
Keith Packard5b5ce302009-05-11 13:42:12 -0700179
Eric Anholtd70d6052009-10-06 12:40:42 -0700180 /**
Chris Wilson792fed12009-12-02 13:12:39 +0000181 * Boolean of whether we have encountered an error whilst building the relocation tree.
182 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700183 bool has_error;
Chris Wilson792fed12009-12-02 13:12:39 +0000184
185 /**
Eric Anholtd70d6052009-10-06 12:40:42 -0700186 * Boolean of whether this buffer can be re-used
187 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700188 bool reusable;
Eric Anholtd70d6052009-10-06 12:40:42 -0700189
190 /**
191 * Size in bytes of this buffer and its relocation descendents.
192 *
193 * Used to avoid costly tree walking in
194 * drm_intel_bufmgr_check_aperture in the common case.
195 */
196 int reloc_tree_size;
197
198 /**
199 * Number of potential fence registers required by this buffer and its
200 * relocations.
201 */
202 int reloc_tree_fences;
Eric Anholt4cb01ee2011-10-28 13:12:16 -0700203
204 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
205 bool mapped_cpu_write;
Keith Packarda919ff52008-06-05 15:58:09 -0700206};
Eric Anholt6a9eb082008-06-03 09:27:37 -0700207
Keith Packardb13f4e12008-11-21 01:49:39 -0800208static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700209drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800210
211static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700212drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800213
Eric Anholt6a9eb082008-06-03 09:27:37 -0700214static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700215drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
216 uint32_t * swizzle_mode);
Keith Packard18f091d2008-12-15 15:08:12 -0800217
218static int
Chris Wilson1db22ff2010-06-21 14:27:23 +0100219drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
220 uint32_t tiling_mode,
221 uint32_t stride);
Keith Packard18f091d2008-12-15 15:08:12 -0800222
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700223static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
224 time_t time);
Chris Wilson04495ee2009-10-02 04:39:22 +0100225
Eric Anholtd70d6052009-10-06 12:40:42 -0700226static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
Keith Packard18f091d2008-12-15 15:08:12 -0800227
Eric Anholtd70d6052009-10-06 12:40:42 -0700228static void drm_intel_gem_bo_free(drm_intel_bo *bo);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100229
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700230static unsigned long
231drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
232 uint32_t *tiling_mode)
233{
234 unsigned long min_size, max_size;
235 unsigned long i;
236
237 if (*tiling_mode == I915_TILING_NONE)
238 return size;
239
240 /* 965+ just need multiples of page size for tiling */
Eric Anholta1f9ea72010-03-02 08:49:36 -0800241 if (bufmgr_gem->gen >= 4)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700242 return ROUND_UP_TO(size, 4096);
243
244 /* Older chips need powers of two, of at least 512k or 1M */
Eric Anholtacbaff22010-03-02 15:24:50 -0800245 if (bufmgr_gem->gen == 3) {
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700246 min_size = 1024*1024;
247 max_size = 128*1024*1024;
248 } else {
249 min_size = 512*1024;
250 max_size = 64*1024*1024;
251 }
252
253 if (size > max_size) {
254 *tiling_mode = I915_TILING_NONE;
255 return size;
256 }
257
Chris Wilson36245772010-10-29 10:49:54 +0100258 /* Do we need to allocate every page for the fence? */
259 if (bufmgr_gem->has_relaxed_fencing)
260 return ROUND_UP_TO(size, 4096);
261
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700262 for (i = min_size; i < size; i <<= 1)
263 ;
264
265 return i;
266}
267
268/*
269 * Round a given pitch up to the minimum required for X tiling on a
270 * given chip. We use 512 as the minimum to allow for a later tiling
271 * change.
272 */
273static unsigned long
274drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
Chris Wilson726210f2010-06-24 11:38:00 +0100275 unsigned long pitch, uint32_t *tiling_mode)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700276{
Eric Anholt1d4d1e62010-03-04 16:09:40 -0800277 unsigned long tile_width;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700278 unsigned long i;
279
Eric Anholt7c697b12010-03-17 10:05:55 -0700280 /* If untiled, then just align it so that we can do rendering
281 * to it with the 3D engine.
282 */
Chris Wilson726210f2010-06-24 11:38:00 +0100283 if (*tiling_mode == I915_TILING_NONE)
Eric Anholt7c697b12010-03-17 10:05:55 -0700284 return ALIGN(pitch, 64);
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700285
Daniel Vetter194aa1b2011-09-22 22:20:53 +0200286 if (*tiling_mode == I915_TILING_X
Eric Anholt078bc5b2011-12-20 13:10:36 -0800287 || (IS_915(bufmgr_gem->pci_device)
288 && *tiling_mode == I915_TILING_Y))
Eric Anholt1d4d1e62010-03-04 16:09:40 -0800289 tile_width = 512;
290 else
291 tile_width = 128;
292
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700293 /* 965 is flexible */
Eric Anholta1f9ea72010-03-02 08:49:36 -0800294 if (bufmgr_gem->gen >= 4)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700295 return ROUND_UP_TO(pitch, tile_width);
296
Chris Wilson726210f2010-06-24 11:38:00 +0100297 /* The older hardware has a maximum pitch of 8192 with tiled
298 * surfaces, so fallback to untiled if it's too large.
299 */
300 if (pitch > 8192) {
301 *tiling_mode = I915_TILING_NONE;
302 return ALIGN(pitch, 64);
303 }
304
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700305 /* Pre-965 needs power of two tile width */
306 for (i = tile_width; i < pitch; i <<= 1)
307 ;
308
309 return i;
310}
311
Eric Anholt4b982642008-10-30 09:33:07 -0700312static struct drm_intel_gem_bo_bucket *
313drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
314 unsigned long size)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700315{
Eric Anholtd70d6052009-10-06 12:40:42 -0700316 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700317
Eric Anholt0ec768e2010-06-04 17:09:11 -0700318 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -0700319 struct drm_intel_gem_bo_bucket *bucket =
320 &bufmgr_gem->cache_bucket[i];
321 if (bucket->size >= size) {
322 return bucket;
323 }
Eric Anholt78fa5902009-07-06 11:55:28 -0700324 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700325
Eric Anholtd70d6052009-10-06 12:40:42 -0700326 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700327}
328
Eric Anholtd70d6052009-10-06 12:40:42 -0700329static void
330drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700331{
Eric Anholtd70d6052009-10-06 12:40:42 -0700332 int i, j;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700333
Eric Anholtd70d6052009-10-06 12:40:42 -0700334 for (i = 0; i < bufmgr_gem->exec_count; i++) {
335 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
336 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700337
Eric Anholtd70d6052009-10-06 12:40:42 -0700338 if (bo_gem->relocs == NULL) {
339 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
340 bo_gem->name);
341 continue;
342 }
343
344 for (j = 0; j < bo_gem->reloc_count; j++) {
Jesse Barnesb5096402009-09-15 11:02:58 -0700345 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700346 drm_intel_bo_gem *target_gem =
347 (drm_intel_bo_gem *) target_bo;
348
349 DBG("%2d: %d (%s)@0x%08llx -> "
350 "%d (%s)@0x%08lx + 0x%08x\n",
351 i,
352 bo_gem->gem_handle, bo_gem->name,
353 (unsigned long long)bo_gem->relocs[j].offset,
354 target_gem->gem_handle,
355 target_gem->name,
356 target_bo->offset,
357 bo_gem->relocs[j].delta);
358 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700359 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700360}
361
Chris Wilson9fec2a82009-12-02 10:42:51 +0000362static inline void
Chris Wilson04495ee2009-10-02 04:39:22 +0100363drm_intel_gem_bo_reference(drm_intel_bo *bo)
364{
Eric Anholtd70d6052009-10-06 12:40:42 -0700365 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Chris Wilson04495ee2009-10-02 04:39:22 +0100366
Eric Anholtd70d6052009-10-06 12:40:42 -0700367 atomic_inc(&bo_gem->refcount);
Chris Wilson04495ee2009-10-02 04:39:22 +0100368}
369
Eric Anholt6a9eb082008-06-03 09:27:37 -0700370/**
371 * Adds the given buffer to the list of buffers to be validated (moved into the
372 * appropriate memory type) with the next batch submission.
373 *
374 * If a buffer is validated multiple times in a batch submission, it ends up
375 * with the intersection of the memory type flags and the union of the
376 * access flags.
377 */
378static void
Eric Anholt4b982642008-10-30 09:33:07 -0700379drm_intel_add_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700380{
Eric Anholtd70d6052009-10-06 12:40:42 -0700381 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
382 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
383 int index;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700384
Eric Anholtd70d6052009-10-06 12:40:42 -0700385 if (bo_gem->validate_index != -1)
386 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700387
Eric Anholtd70d6052009-10-06 12:40:42 -0700388 /* Extend the array of validation entries as necessary. */
389 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
390 int new_size = bufmgr_gem->exec_size * 2;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700391
Eric Anholtd70d6052009-10-06 12:40:42 -0700392 if (new_size == 0)
393 new_size = 5;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700394
Eric Anholtd70d6052009-10-06 12:40:42 -0700395 bufmgr_gem->exec_objects =
396 realloc(bufmgr_gem->exec_objects,
397 sizeof(*bufmgr_gem->exec_objects) * new_size);
398 bufmgr_gem->exec_bos =
399 realloc(bufmgr_gem->exec_bos,
400 sizeof(*bufmgr_gem->exec_bos) * new_size);
401 bufmgr_gem->exec_size = new_size;
402 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700403
Eric Anholtd70d6052009-10-06 12:40:42 -0700404 index = bufmgr_gem->exec_count;
405 bo_gem->validate_index = index;
406 /* Fill in array entry */
407 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
408 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
409 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
410 bufmgr_gem->exec_objects[index].alignment = 0;
411 bufmgr_gem->exec_objects[index].offset = 0;
412 bufmgr_gem->exec_bos[index] = bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700413 bufmgr_gem->exec_count++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700414}
415
Jesse Barnesb5096402009-09-15 11:02:58 -0700416static void
417drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
418{
419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
421 int index;
422
Eric Anholt47102862010-03-03 10:07:27 -0800423 if (bo_gem->validate_index != -1) {
424 if (need_fence)
425 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
426 EXEC_OBJECT_NEEDS_FENCE;
Jesse Barnesb5096402009-09-15 11:02:58 -0700427 return;
Eric Anholt47102862010-03-03 10:07:27 -0800428 }
Jesse Barnesb5096402009-09-15 11:02:58 -0700429
430 /* Extend the array of validation entries as necessary. */
431 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
432 int new_size = bufmgr_gem->exec_size * 2;
433
434 if (new_size == 0)
435 new_size = 5;
436
437 bufmgr_gem->exec2_objects =
438 realloc(bufmgr_gem->exec2_objects,
439 sizeof(*bufmgr_gem->exec2_objects) * new_size);
440 bufmgr_gem->exec_bos =
441 realloc(bufmgr_gem->exec_bos,
442 sizeof(*bufmgr_gem->exec_bos) * new_size);
443 bufmgr_gem->exec_size = new_size;
444 }
445
446 index = bufmgr_gem->exec_count;
447 bo_gem->validate_index = index;
448 /* Fill in array entry */
449 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
450 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
451 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
452 bufmgr_gem->exec2_objects[index].alignment = 0;
453 bufmgr_gem->exec2_objects[index].offset = 0;
454 bufmgr_gem->exec_bos[index] = bo;
455 bufmgr_gem->exec2_objects[index].flags = 0;
456 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
457 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
458 if (need_fence) {
459 bufmgr_gem->exec2_objects[index].flags |=
460 EXEC_OBJECT_NEEDS_FENCE;
461 }
462 bufmgr_gem->exec_count++;
463}
464
Eric Anholt6a9eb082008-06-03 09:27:37 -0700465#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
466 sizeof(uint32_t))
467
Chris Wilsone22fb792009-11-30 22:14:30 +0000468static void
469drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
470 drm_intel_bo_gem *bo_gem)
471{
472 int size;
473
474 assert(!bo_gem->used_as_reloc_target);
475
476 /* The older chipsets are far-less flexible in terms of tiling,
477 * and require tiled buffer to be size aligned in the aperture.
478 * This means that in the worst possible case we will need a hole
479 * twice as large as the object in order for it to fit into the
480 * aperture. Optimal packing is for wimps.
481 */
482 size = bo_gem->bo.size;
Chris Wilson51b89502010-11-22 09:50:06 +0000483 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
484 int min_size;
485
486 if (bufmgr_gem->has_relaxed_fencing) {
487 if (bufmgr_gem->gen == 3)
488 min_size = 1024*1024;
489 else
490 min_size = 512*1024;
491
492 while (min_size < size)
493 min_size *= 2;
494 } else
495 min_size = size;
496
497 /* Account for worst-case alignment. */
498 size = 2 * min_size;
499 }
Chris Wilsone22fb792009-11-30 22:14:30 +0000500
501 bo_gem->reloc_tree_size = size;
502}
503
Eric Anholt6a9eb082008-06-03 09:27:37 -0700504static int
Eric Anholt4b982642008-10-30 09:33:07 -0700505drm_intel_setup_reloc_list(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700506{
Eric Anholtd70d6052009-10-06 12:40:42 -0700507 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
509 unsigned int max_relocs = bufmgr_gem->max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700510
Eric Anholtd70d6052009-10-06 12:40:42 -0700511 if (bo->size / 4 < max_relocs)
512 max_relocs = bo->size / 4;
Eric Anholt3c9bd062009-10-05 16:35:32 -0700513
Eric Anholtd70d6052009-10-06 12:40:42 -0700514 bo_gem->relocs = malloc(max_relocs *
515 sizeof(struct drm_i915_gem_relocation_entry));
Jesse Barnesb5096402009-09-15 11:02:58 -0700516 bo_gem->reloc_target_info = malloc(max_relocs *
Chris Wilson35061732010-04-11 18:40:38 +0100517 sizeof(drm_intel_reloc_target));
Jesse Barnesb5096402009-09-15 11:02:58 -0700518 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700519 bo_gem->has_error = true;
Chris Wilson792fed12009-12-02 13:12:39 +0000520
521 free (bo_gem->relocs);
522 bo_gem->relocs = NULL;
523
Jesse Barnesb5096402009-09-15 11:02:58 -0700524 free (bo_gem->reloc_target_info);
525 bo_gem->reloc_target_info = NULL;
Chris Wilson792fed12009-12-02 13:12:39 +0000526
527 return 1;
528 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700529
Eric Anholtd70d6052009-10-06 12:40:42 -0700530 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700531}
532
Eric Anholt8214a652009-08-27 18:32:07 -0700533static int
534drm_intel_gem_bo_busy(drm_intel_bo *bo)
535{
Eric Anholtd70d6052009-10-06 12:40:42 -0700536 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
537 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
538 struct drm_i915_gem_busy busy;
539 int ret;
Eric Anholt8214a652009-08-27 18:32:07 -0700540
Eric Anholtd70d6052009-10-06 12:40:42 -0700541 memset(&busy, 0, sizeof(busy));
542 busy.handle = bo_gem->gem_handle;
Eric Anholt8214a652009-08-27 18:32:07 -0700543
Chris Wilson62997222010-09-25 21:32:59 +0100544 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
Eric Anholt8214a652009-08-27 18:32:07 -0700545
Eric Anholtd70d6052009-10-06 12:40:42 -0700546 return (ret == 0 && busy.busy);
Eric Anholt8214a652009-08-27 18:32:07 -0700547}
548
Chris Wilson0fb215a2009-10-02 04:31:34 +0100549static int
Chris Wilson83a35b62009-11-11 13:04:38 +0000550drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
551 drm_intel_bo_gem *bo_gem, int state)
Chris Wilson0fb215a2009-10-02 04:31:34 +0100552{
Eric Anholtd70d6052009-10-06 12:40:42 -0700553 struct drm_i915_gem_madvise madv;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100554
Eric Anholtd70d6052009-10-06 12:40:42 -0700555 madv.handle = bo_gem->gem_handle;
556 madv.madv = state;
557 madv.retained = 1;
Chris Wilson62997222010-09-25 21:32:59 +0100558 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100559
Eric Anholtd70d6052009-10-06 12:40:42 -0700560 return madv.retained;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100561}
562
Chris Wilson83a35b62009-11-11 13:04:38 +0000563static int
564drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
565{
566 return drm_intel_gem_bo_madvise_internal
567 ((drm_intel_bufmgr_gem *) bo->bufmgr,
568 (drm_intel_bo_gem *) bo,
569 madv);
570}
571
Chris Wilson0fb215a2009-10-02 04:31:34 +0100572/* drop the oldest entries that have been purged by the kernel */
573static void
574drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
575 struct drm_intel_gem_bo_bucket *bucket)
576{
Eric Anholtd70d6052009-10-06 12:40:42 -0700577 while (!DRMLISTEMPTY(&bucket->head)) {
578 drm_intel_bo_gem *bo_gem;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100579
Eric Anholtd70d6052009-10-06 12:40:42 -0700580 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
581 bucket->head.next, head);
Chris Wilson83a35b62009-11-11 13:04:38 +0000582 if (drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700583 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
584 break;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100585
Eric Anholtd70d6052009-10-06 12:40:42 -0700586 DRMLISTDEL(&bo_gem->head);
587 drm_intel_gem_bo_free(&bo_gem->bo);
588 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100589}
590
Eric Anholt4b982642008-10-30 09:33:07 -0700591static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700592drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
593 const char *name,
594 unsigned long size,
Chris Wilson1db22ff2010-06-21 14:27:23 +0100595 unsigned long flags,
596 uint32_t tiling_mode,
597 unsigned long stride)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700598{
Eric Anholtd70d6052009-10-06 12:40:42 -0700599 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
600 drm_intel_bo_gem *bo_gem;
601 unsigned int page_size = getpagesize();
602 int ret;
603 struct drm_intel_gem_bo_bucket *bucket;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700604 bool alloc_from_cache;
Eric Anholtd70d6052009-10-06 12:40:42 -0700605 unsigned long bo_size;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700606 bool for_render = false;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700607
608 if (flags & BO_ALLOC_FOR_RENDER)
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700609 for_render = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700610
Eric Anholtd70d6052009-10-06 12:40:42 -0700611 /* Round the allocated size up to a power of two number of pages. */
612 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700613
Eric Anholtd70d6052009-10-06 12:40:42 -0700614 /* If we don't have caching at this size, don't actually round the
615 * allocation up.
616 */
617 if (bucket == NULL) {
618 bo_size = size;
619 if (bo_size < page_size)
620 bo_size = page_size;
Eric Anholt72abe982009-02-18 13:06:35 -0800621 } else {
Eric Anholtd70d6052009-10-06 12:40:42 -0700622 bo_size = bucket->size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700623 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100624
Eric Anholtd70d6052009-10-06 12:40:42 -0700625 pthread_mutex_lock(&bufmgr_gem->lock);
626 /* Get a buffer out of the cache if available */
627retry:
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700628 alloc_from_cache = false;
Eric Anholtd70d6052009-10-06 12:40:42 -0700629 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
630 if (for_render) {
631 /* Allocate new render-target BOs from the tail (MRU)
632 * of the list, as it will likely be hot in the GPU
633 * cache and in the aperture for us.
634 */
635 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
636 bucket->head.prev, head);
637 DRMLISTDEL(&bo_gem->head);
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700638 alloc_from_cache = true;
Eric Anholtd70d6052009-10-06 12:40:42 -0700639 } else {
640 /* For non-render-target BOs (where we're probably
641 * going to map it first thing in order to fill it
642 * with data), check if the last BO in the cache is
643 * unbusy, and only reuse in that case. Otherwise,
644 * allocating a new buffer is probably faster than
645 * waiting for the GPU to finish.
646 */
647 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
648 bucket->head.next, head);
649 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700650 alloc_from_cache = true;
Eric Anholtd70d6052009-10-06 12:40:42 -0700651 DRMLISTDEL(&bo_gem->head);
652 }
653 }
654
655 if (alloc_from_cache) {
Chris Wilson83a35b62009-11-11 13:04:38 +0000656 if (!drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700657 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
658 drm_intel_gem_bo_free(&bo_gem->bo);
659 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
660 bucket);
661 goto retry;
662 }
Chris Wilson1db22ff2010-06-21 14:27:23 +0100663
664 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
665 tiling_mode,
666 stride)) {
667 drm_intel_gem_bo_free(&bo_gem->bo);
668 goto retry;
669 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700670 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100671 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700672 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700673
Eric Anholtd70d6052009-10-06 12:40:42 -0700674 if (!alloc_from_cache) {
675 struct drm_i915_gem_create create;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700676
Eric Anholtd70d6052009-10-06 12:40:42 -0700677 bo_gem = calloc(1, sizeof(*bo_gem));
678 if (!bo_gem)
679 return NULL;
Keith Packarda919ff52008-06-05 15:58:09 -0700680
Eric Anholtd70d6052009-10-06 12:40:42 -0700681 bo_gem->bo.size = bo_size;
682 memset(&create, 0, sizeof(create));
683 create.size = bo_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700684
Chris Wilson62997222010-09-25 21:32:59 +0100685 ret = drmIoctl(bufmgr_gem->fd,
686 DRM_IOCTL_I915_GEM_CREATE,
687 &create);
Eric Anholtd70d6052009-10-06 12:40:42 -0700688 bo_gem->gem_handle = create.handle;
689 bo_gem->bo.handle = bo_gem->gem_handle;
690 if (ret != 0) {
691 free(bo_gem);
692 return NULL;
693 }
694 bo_gem->bo.bufmgr = bufmgr;
Chris Wilson1db22ff2010-06-21 14:27:23 +0100695
696 bo_gem->tiling_mode = I915_TILING_NONE;
697 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
698 bo_gem->stride = 0;
699
700 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
701 tiling_mode,
702 stride)) {
703 drm_intel_gem_bo_free(&bo_gem->bo);
704 return NULL;
705 }
Chris Wilson36d49392011-02-14 09:39:06 +0000706
707 DRMINITLISTHEAD(&bo_gem->name_list);
Chris Wilsone4b60f22011-12-05 21:29:05 +0000708 DRMINITLISTHEAD(&bo_gem->vma_list);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700709 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700710
Eric Anholtd70d6052009-10-06 12:40:42 -0700711 bo_gem->name = name;
712 atomic_set(&bo_gem->refcount, 1);
713 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700714 bo_gem->reloc_tree_fences = 0;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700715 bo_gem->used_as_reloc_target = false;
716 bo_gem->has_error = false;
717 bo_gem->reusable = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700718
Chris Wilsone22fb792009-11-30 22:14:30 +0000719 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
720
Eric Anholtd70d6052009-10-06 12:40:42 -0700721 DBG("bo_create: buf %d (%s) %ldb\n",
722 bo_gem->gem_handle, bo_gem->name, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700723
Eric Anholtd70d6052009-10-06 12:40:42 -0700724 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700725}
726
Eric Anholt72abe982009-02-18 13:06:35 -0800727static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700728drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
729 const char *name,
730 unsigned long size,
731 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800732{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700733 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
Chris Wilson1db22ff2010-06-21 14:27:23 +0100734 BO_ALLOC_FOR_RENDER,
735 I915_TILING_NONE, 0);
Eric Anholt72abe982009-02-18 13:06:35 -0800736}
737
738static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700739drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
740 const char *name,
741 unsigned long size,
742 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800743{
Chris Wilson1db22ff2010-06-21 14:27:23 +0100744 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
745 I915_TILING_NONE, 0);
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700746}
747
748static drm_intel_bo *
749drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
750 int x, int y, int cpp, uint32_t *tiling_mode,
751 unsigned long *pitch, unsigned long flags)
752{
753 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
Chris Wilsone65caeb2010-06-09 10:08:41 +0100754 unsigned long size, stride;
755 uint32_t tiling;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700756
Chris Wilsone65caeb2010-06-09 10:08:41 +0100757 do {
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100758 unsigned long aligned_y, height_alignment;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700759
Chris Wilsone65caeb2010-06-09 10:08:41 +0100760 tiling = *tiling_mode;
761
762 /* If we're tiled, our allocations are in 8 or 32-row blocks,
763 * so failure to align our height means that we won't allocate
764 * enough pages.
765 *
766 * If we're untiled, we still have to align to 2 rows high
767 * because the data port accesses 2x2 blocks even if the
768 * bottom row isn't to be rendered, so failure to align means
769 * we could walk off the end of the GTT and fault. This is
770 * documented on 965, and may be the case on older chipsets
771 * too so we try to be careful.
772 */
773 aligned_y = y;
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100774 height_alignment = 2;
775
Eric Anholt078bc5b2011-12-20 13:10:36 -0800776 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
Daniel Vetter06ebbf72011-03-26 15:04:04 +0100777 height_alignment = 16;
Daniel Vetter194aa1b2011-09-22 22:20:53 +0200778 else if (tiling == I915_TILING_X
Eric Anholt078bc5b2011-12-20 13:10:36 -0800779 || (IS_915(bufmgr_gem->pci_device)
780 && tiling == I915_TILING_Y))
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100781 height_alignment = 8;
Chris Wilsone65caeb2010-06-09 10:08:41 +0100782 else if (tiling == I915_TILING_Y)
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100783 height_alignment = 32;
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100784 aligned_y = ALIGN(y, height_alignment);
Chris Wilsone65caeb2010-06-09 10:08:41 +0100785
786 stride = x * cpp;
Chris Wilson726210f2010-06-24 11:38:00 +0100787 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
Chris Wilsone65caeb2010-06-09 10:08:41 +0100788 size = stride * aligned_y;
789 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
790 } while (*tiling_mode != tiling);
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100791 *pitch = stride;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700792
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100793 if (tiling == I915_TILING_NONE)
Chris Wilson5eec2862010-06-21 14:20:56 +0100794 stride = 0;
795
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100796 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
797 tiling, stride);
Eric Anholt72abe982009-02-18 13:06:35 -0800798}
799
Eric Anholt6a9eb082008-06-03 09:27:37 -0700800/**
Eric Anholt4b982642008-10-30 09:33:07 -0700801 * Returns a drm_intel_bo wrapping the given buffer object handle.
Eric Anholt6a9eb082008-06-03 09:27:37 -0700802 *
803 * This can be used when one application needs to pass a buffer object
804 * to another.
805 */
Eric Anholt4b982642008-10-30 09:33:07 -0700806drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700807drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
808 const char *name,
Eric Anholt4b982642008-10-30 09:33:07 -0700809 unsigned int handle)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700810{
Eric Anholtd70d6052009-10-06 12:40:42 -0700811 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
812 drm_intel_bo_gem *bo_gem;
813 int ret;
814 struct drm_gem_open open_arg;
815 struct drm_i915_gem_get_tiling get_tiling;
Chris Wilson36d49392011-02-14 09:39:06 +0000816 drmMMListHead *list;
817
818 /* At the moment most applications only have a few named bo.
819 * For instance, in a DRI client only the render buffers passed
820 * between X and the client are named. And since X returns the
821 * alternating names for the front/back buffer a linear search
822 * provides a sufficiently fast match.
823 */
824 for (list = bufmgr_gem->named.next;
825 list != &bufmgr_gem->named;
826 list = list->next) {
827 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
828 if (bo_gem->global_name == handle) {
829 drm_intel_gem_bo_reference(&bo_gem->bo);
830 return &bo_gem->bo;
831 }
832 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700833
Eric Anholtd70d6052009-10-06 12:40:42 -0700834 bo_gem = calloc(1, sizeof(*bo_gem));
835 if (!bo_gem)
836 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700837
Eric Anholtd70d6052009-10-06 12:40:42 -0700838 memset(&open_arg, 0, sizeof(open_arg));
839 open_arg.name = handle;
Chris Wilson62997222010-09-25 21:32:59 +0100840 ret = drmIoctl(bufmgr_gem->fd,
841 DRM_IOCTL_GEM_OPEN,
842 &open_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -0700843 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +0100844 DBG("Couldn't reference %s handle 0x%08x: %s\n",
845 name, handle, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -0700846 free(bo_gem);
847 return NULL;
848 }
849 bo_gem->bo.size = open_arg.size;
850 bo_gem->bo.offset = 0;
851 bo_gem->bo.virtual = NULL;
852 bo_gem->bo.bufmgr = bufmgr;
853 bo_gem->name = name;
854 atomic_set(&bo_gem->refcount, 1);
855 bo_gem->validate_index = -1;
856 bo_gem->gem_handle = open_arg.handle;
Chris Wilson53581b62011-02-14 09:27:05 +0000857 bo_gem->bo.handle = open_arg.handle;
Eric Anholtd70d6052009-10-06 12:40:42 -0700858 bo_gem->global_name = handle;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700859 bo_gem->reusable = false;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700860
Eric Anholtd70d6052009-10-06 12:40:42 -0700861 memset(&get_tiling, 0, sizeof(get_tiling));
862 get_tiling.handle = bo_gem->gem_handle;
Chris Wilson62997222010-09-25 21:32:59 +0100863 ret = drmIoctl(bufmgr_gem->fd,
864 DRM_IOCTL_I915_GEM_GET_TILING,
865 &get_tiling);
Eric Anholtd70d6052009-10-06 12:40:42 -0700866 if (ret != 0) {
867 drm_intel_gem_bo_unreference(&bo_gem->bo);
868 return NULL;
869 }
870 bo_gem->tiling_mode = get_tiling.tiling_mode;
871 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
Chris Wilson056aa9b2010-06-21 14:31:29 +0100872 /* XXX stride is unknown */
Chris Wilsone22fb792009-11-30 22:14:30 +0000873 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
Keith Packard18f091d2008-12-15 15:08:12 -0800874
Chris Wilsone4b60f22011-12-05 21:29:05 +0000875 DRMINITLISTHEAD(&bo_gem->vma_list);
Chris Wilson36d49392011-02-14 09:39:06 +0000876 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
Eric Anholtd70d6052009-10-06 12:40:42 -0700877 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700878
Eric Anholtd70d6052009-10-06 12:40:42 -0700879 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700880}
881
882static void
Eric Anholt4b982642008-10-30 09:33:07 -0700883drm_intel_gem_bo_free(drm_intel_bo *bo)
Eric Anholt500c81d2008-06-06 17:13:16 -0700884{
Eric Anholtd70d6052009-10-06 12:40:42 -0700885 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
886 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
887 struct drm_gem_close close;
888 int ret;
Eric Anholt500c81d2008-06-06 17:13:16 -0700889
Chris Wilsone4b60f22011-12-05 21:29:05 +0000890 DRMLISTDEL(&bo_gem->vma_list);
891 if (bo_gem->mem_virtual) {
892 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
893 bufmgr_gem->vma_count--;
894 }
895 if (bo_gem->gtt_virtual) {
896 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
897 bufmgr_gem->vma_count--;
898 }
899
Eric Anholtd70d6052009-10-06 12:40:42 -0700900 /* Close this object */
901 memset(&close, 0, sizeof(close));
902 close.handle = bo_gem->gem_handle;
Chris Wilson62997222010-09-25 21:32:59 +0100903 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
Eric Anholtd70d6052009-10-06 12:40:42 -0700904 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +0100905 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
906 bo_gem->gem_handle, bo_gem->name, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -0700907 }
908 free(bo);
Eric Anholt500c81d2008-06-06 17:13:16 -0700909}
910
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700911/** Frees all cached buffers significantly older than @time. */
912static void
913drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
914{
Chris Wilson04495ee2009-10-02 04:39:22 +0100915 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700916
Chris Wilsonf16b4162010-06-21 15:21:48 +0100917 if (bufmgr_gem->time == time)
918 return;
919
Eric Anholt0ec768e2010-06-04 17:09:11 -0700920 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -0700921 struct drm_intel_gem_bo_bucket *bucket =
922 &bufmgr_gem->cache_bucket[i];
Chris Wilson04495ee2009-10-02 04:39:22 +0100923
Eric Anholtd70d6052009-10-06 12:40:42 -0700924 while (!DRMLISTEMPTY(&bucket->head)) {
925 drm_intel_bo_gem *bo_gem;
Chris Wilson04495ee2009-10-02 04:39:22 +0100926
Eric Anholtd70d6052009-10-06 12:40:42 -0700927 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
928 bucket->head.next, head);
929 if (time - bo_gem->free_time <= 1)
930 break;
Chris Wilson04495ee2009-10-02 04:39:22 +0100931
Eric Anholtd70d6052009-10-06 12:40:42 -0700932 DRMLISTDEL(&bo_gem->head);
Chris Wilson04495ee2009-10-02 04:39:22 +0100933
Eric Anholtd70d6052009-10-06 12:40:42 -0700934 drm_intel_gem_bo_free(&bo_gem->bo);
935 }
936 }
Chris Wilsonf16b4162010-06-21 15:21:48 +0100937
938 bufmgr_gem->time = time;
Chris Wilson04495ee2009-10-02 04:39:22 +0100939}
940
Chris Wilsone4b60f22011-12-05 21:29:05 +0000941static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
942{
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000943 int limit;
944
945 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
946 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
Chris Wilsone4b60f22011-12-05 21:29:05 +0000947
948 if (bufmgr_gem->vma_max < 0)
949 return;
950
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000951 /* We may need to evict a few entries in order to create new mmaps */
952 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
953 if (limit < 0)
954 limit = 0;
955
956 while (bufmgr_gem->vma_count > limit) {
Chris Wilsone4b60f22011-12-05 21:29:05 +0000957 drm_intel_bo_gem *bo_gem;
958
959 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
960 bufmgr_gem->vma_cache.next,
961 vma_list);
962 assert(bo_gem->map_count == 0);
Chris Wilson0ab22512011-12-14 08:20:10 +0000963 DRMLISTDELINIT(&bo_gem->vma_list);
Chris Wilsone4b60f22011-12-05 21:29:05 +0000964
965 if (bo_gem->mem_virtual) {
966 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
967 bo_gem->mem_virtual = NULL;
968 bufmgr_gem->vma_count--;
969 }
970 if (bo_gem->gtt_virtual) {
971 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
972 bo_gem->gtt_virtual = NULL;
973 bufmgr_gem->vma_count--;
974 }
975 }
976}
977
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000978static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
979 drm_intel_bo_gem *bo_gem)
Chris Wilsone4b60f22011-12-05 21:29:05 +0000980{
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000981 bufmgr_gem->vma_open--;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000982 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
983 if (bo_gem->mem_virtual)
984 bufmgr_gem->vma_count++;
985 if (bo_gem->gtt_virtual)
986 bufmgr_gem->vma_count++;
987 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
988}
989
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000990static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
991 drm_intel_bo_gem *bo_gem)
Chris Wilsone4b60f22011-12-05 21:29:05 +0000992{
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000993 bufmgr_gem->vma_open++;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000994 DRMLISTDEL(&bo_gem->vma_list);
995 if (bo_gem->mem_virtual)
996 bufmgr_gem->vma_count--;
997 if (bo_gem->gtt_virtual)
998 bufmgr_gem->vma_count--;
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000999 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001000}
1001
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001002static void
1003drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
Chris Wilson04495ee2009-10-02 04:39:22 +01001004{
Eric Anholtd70d6052009-10-06 12:40:42 -07001005 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1006 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1007 struct drm_intel_gem_bo_bucket *bucket;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001008 int i;
Chris Wilson04495ee2009-10-02 04:39:22 +01001009
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001010 /* Unreference all the target buffers */
1011 for (i = 0; i < bo_gem->reloc_count; i++) {
Eric Anholt4f7704a2010-06-10 08:58:08 -07001012 if (bo_gem->reloc_target_info[i].bo != bo) {
1013 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1014 reloc_target_info[i].bo,
1015 time);
1016 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001017 }
Chris Wilsonb666f412009-11-30 23:07:19 +00001018 bo_gem->reloc_count = 0;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001019 bo_gem->used_as_reloc_target = false;
Eric Anholtd70d6052009-10-06 12:40:42 -07001020
1021 DBG("bo_unreference final: %d (%s)\n",
1022 bo_gem->gem_handle, bo_gem->name);
1023
Chris Wilson57473c72009-12-02 13:36:22 +00001024 /* release memory associated with this object */
Jesse Barnesb5096402009-09-15 11:02:58 -07001025 if (bo_gem->reloc_target_info) {
1026 free(bo_gem->reloc_target_info);
1027 bo_gem->reloc_target_info = NULL;
Chris Wilson57473c72009-12-02 13:36:22 +00001028 }
1029 if (bo_gem->relocs) {
1030 free(bo_gem->relocs);
1031 bo_gem->relocs = NULL;
1032 }
1033
Chris Wilson5c5332b2011-12-05 10:39:49 +00001034 /* Clear any left-over mappings */
1035 if (bo_gem->map_count) {
1036 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1037 bo_gem->map_count = 0;
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001038 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilson5c5332b2011-12-05 10:39:49 +00001039 }
Chris Wilson5c5332b2011-12-05 10:39:49 +00001040
Chris Wilson36d49392011-02-14 09:39:06 +00001041 DRMLISTDEL(&bo_gem->name_list);
1042
Eric Anholtd70d6052009-10-06 12:40:42 -07001043 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1044 /* Put the buffer into our internal cache for reuse if we can. */
Eric Anholtd70d6052009-10-06 12:40:42 -07001045 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
Chris Wilson60aa8032009-11-30 20:02:05 +00001046 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1047 I915_MADV_DONTNEED)) {
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001048 bo_gem->free_time = time;
Eric Anholtd70d6052009-10-06 12:40:42 -07001049
1050 bo_gem->name = NULL;
1051 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001052
1053 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
Eric Anholtd70d6052009-10-06 12:40:42 -07001054 } else {
1055 drm_intel_gem_bo_free(bo);
1056 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001057}
1058
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001059static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1060 time_t time)
1061{
1062 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1063
1064 assert(atomic_read(&bo_gem->refcount) > 0);
Eric Anholtd70d6052009-10-06 12:40:42 -07001065 if (atomic_dec_and_test(&bo_gem->refcount))
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001066 drm_intel_gem_bo_unreference_final(bo, time);
Eric Anholtd70d6052009-10-06 12:40:42 -07001067}
1068
1069static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1070{
1071 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1072
1073 assert(atomic_read(&bo_gem->refcount) > 0);
1074 if (atomic_dec_and_test(&bo_gem->refcount)) {
1075 drm_intel_bufmgr_gem *bufmgr_gem =
1076 (drm_intel_bufmgr_gem *) bo->bufmgr;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001077 struct timespec time;
1078
1079 clock_gettime(CLOCK_MONOTONIC, &time);
1080
Eric Anholtd70d6052009-10-06 12:40:42 -07001081 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001082 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
Chris Wilsonf16b4162010-06-21 15:21:48 +01001083 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
Eric Anholtd70d6052009-10-06 12:40:42 -07001084 pthread_mutex_unlock(&bufmgr_gem->lock);
1085 }
1086}
1087
1088static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1089{
1090 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1091 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1092 struct drm_i915_gem_set_domain set_domain;
1093 int ret;
1094
Chris Wilsona3305b02010-05-13 08:24:28 +01001095 pthread_mutex_lock(&bufmgr_gem->lock);
1096
Chris Wilsone4b60f22011-12-05 21:29:05 +00001097 if (bo_gem->map_count++ == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001098 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001099
Eric Anholtd70d6052009-10-06 12:40:42 -07001100 if (!bo_gem->mem_virtual) {
1101 struct drm_i915_gem_mmap mmap_arg;
Carl Worthafd245d2009-04-29 14:43:55 -07001102
Chris Wilson015286f2011-12-11 17:35:06 +00001103 DBG("bo_map: %d (%s), map_count=%d\n",
1104 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
Eric Anholtd70d6052009-10-06 12:40:42 -07001105
1106 memset(&mmap_arg, 0, sizeof(mmap_arg));
1107 mmap_arg.handle = bo_gem->gem_handle;
1108 mmap_arg.offset = 0;
1109 mmap_arg.size = bo->size;
Chris Wilson62997222010-09-25 21:32:59 +01001110 ret = drmIoctl(bufmgr_gem->fd,
1111 DRM_IOCTL_I915_GEM_MMAP,
1112 &mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001113 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001114 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001115 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1116 __FILE__, __LINE__, bo_gem->gem_handle,
1117 bo_gem->name, strerror(errno));
Chris Wilsone4b60f22011-12-05 21:29:05 +00001118 if (--bo_gem->map_count == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001119 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsona3305b02010-05-13 08:24:28 +01001120 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtd70d6052009-10-06 12:40:42 -07001121 return ret;
1122 }
1123 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1124 }
1125 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1126 bo_gem->mem_virtual);
1127 bo->virtual = bo_gem->mem_virtual;
1128
1129 set_domain.handle = bo_gem->gem_handle;
1130 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1131 if (write_enable)
1132 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1133 else
1134 set_domain.write_domain = 0;
Chris Wilson62997222010-09-25 21:32:59 +01001135 ret = drmIoctl(bufmgr_gem->fd,
1136 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1137 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001138 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001139 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1140 __FILE__, __LINE__, bo_gem->gem_handle,
1141 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001142 }
1143
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001144 if (write_enable)
1145 bo_gem->mapped_cpu_write = true;
1146
Chris Wilsona3305b02010-05-13 08:24:28 +01001147 pthread_mutex_unlock(&bufmgr_gem->lock);
1148
Eric Anholtd70d6052009-10-06 12:40:42 -07001149 return 0;
1150}
1151
1152int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1153{
1154 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1155 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1156 struct drm_i915_gem_set_domain set_domain;
1157 int ret;
1158
Chris Wilsona3305b02010-05-13 08:24:28 +01001159 pthread_mutex_lock(&bufmgr_gem->lock);
1160
Chris Wilsone4b60f22011-12-05 21:29:05 +00001161 if (bo_gem->map_count++ == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001162 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001163
Eric Anholtd70d6052009-10-06 12:40:42 -07001164 /* Get a mapping of the buffer if we haven't before. */
1165 if (bo_gem->gtt_virtual == NULL) {
1166 struct drm_i915_gem_mmap_gtt mmap_arg;
1167
Chris Wilson015286f2011-12-11 17:35:06 +00001168 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1169 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
Eric Anholtd70d6052009-10-06 12:40:42 -07001170
1171 memset(&mmap_arg, 0, sizeof(mmap_arg));
1172 mmap_arg.handle = bo_gem->gem_handle;
1173
1174 /* Get the fake offset back... */
Chris Wilson62997222010-09-25 21:32:59 +01001175 ret = drmIoctl(bufmgr_gem->fd,
1176 DRM_IOCTL_I915_GEM_MMAP_GTT,
1177 &mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001178 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001179 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001180 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1181 __FILE__, __LINE__,
1182 bo_gem->gem_handle, bo_gem->name,
1183 strerror(errno));
Chris Wilsonc5f0ed12011-12-13 10:30:54 +00001184 if (--bo_gem->map_count == 0)
1185 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsona3305b02010-05-13 08:24:28 +01001186 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtd70d6052009-10-06 12:40:42 -07001187 return ret;
1188 }
1189
1190 /* and mmap it */
1191 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1192 MAP_SHARED, bufmgr_gem->fd,
1193 mmap_arg.offset);
1194 if (bo_gem->gtt_virtual == MAP_FAILED) {
Chris Wilson08371bc2009-12-08 22:35:24 +00001195 bo_gem->gtt_virtual = NULL;
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001196 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001197 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1198 __FILE__, __LINE__,
1199 bo_gem->gem_handle, bo_gem->name,
1200 strerror(errno));
Chris Wilsone4b60f22011-12-05 21:29:05 +00001201 if (--bo_gem->map_count == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001202 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsona3305b02010-05-13 08:24:28 +01001203 pthread_mutex_unlock(&bufmgr_gem->lock);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001204 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -07001205 }
1206 }
1207
1208 bo->virtual = bo_gem->gtt_virtual;
1209
1210 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1211 bo_gem->gtt_virtual);
1212
1213 /* Now move it to the GTT domain so that the CPU caches are flushed */
1214 set_domain.handle = bo_gem->gem_handle;
1215 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1216 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilson62997222010-09-25 21:32:59 +01001217 ret = drmIoctl(bufmgr_gem->fd,
1218 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1219 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001220 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001221 DBG("%s:%d: Error setting domain %d: %s\n",
1222 __FILE__, __LINE__, bo_gem->gem_handle,
1223 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001224 }
1225
Chris Wilsona3305b02010-05-13 08:24:28 +01001226 pthread_mutex_unlock(&bufmgr_gem->lock);
1227
Chris Wilsonc3ddfea2010-06-29 20:12:44 +01001228 return 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001229}
1230
Eric Anholtd70d6052009-10-06 12:40:42 -07001231static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1232{
1233 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1234 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1235 struct drm_i915_gem_sw_finish sw_finish;
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001236 int ret = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001237
1238 if (bo == NULL)
1239 return 0;
1240
Chris Wilsona3305b02010-05-13 08:24:28 +01001241 pthread_mutex_lock(&bufmgr_gem->lock);
1242
Chris Wilson015286f2011-12-11 17:35:06 +00001243 if (bo_gem->map_count <= 0) {
1244 DBG("attempted to unmap an unmapped bo\n");
1245 pthread_mutex_unlock(&bufmgr_gem->lock);
1246 /* Preserve the old behaviour of just treating this as a
1247 * no-op rather than reporting the error.
1248 */
1249 return 0;
1250 }
Chris Wilsone4b60f22011-12-05 21:29:05 +00001251
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001252 if (bo_gem->mapped_cpu_write) {
1253 /* Cause a flush to happen if the buffer's pinned for
1254 * scanout, so the results show up in a timely manner.
1255 * Unlike GTT set domains, this only does work if the
1256 * buffer should be scanout-related.
1257 */
1258 sw_finish.handle = bo_gem->gem_handle;
1259 ret = drmIoctl(bufmgr_gem->fd,
1260 DRM_IOCTL_I915_GEM_SW_FINISH,
1261 &sw_finish);
1262 ret = ret == -1 ? -errno : 0;
1263
1264 bo_gem->mapped_cpu_write = false;
1265 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001266
Chris Wilsonc549a772011-12-05 10:14:34 +00001267 /* We need to unmap after every innovation as we cannot track
1268 * an open vma for every bo as that will exhaasut the system
1269 * limits and cause later failures.
1270 */
1271 if (--bo_gem->map_count == 0) {
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001272 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsonc549a772011-12-05 10:14:34 +00001273 bo->virtual = NULL;
1274 }
Chris Wilsona3305b02010-05-13 08:24:28 +01001275 pthread_mutex_unlock(&bufmgr_gem->lock);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001276
1277 return ret;
Carl Worthafd245d2009-04-29 14:43:55 -07001278}
1279
Eric Anholtd0ae6832011-10-28 13:13:08 -07001280int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1281{
1282 return drm_intel_gem_bo_unmap(bo);
1283}
1284
Eric Anholt6a9eb082008-06-03 09:27:37 -07001285static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001286drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1287 unsigned long size, const void *data)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001288{
Eric Anholtd70d6052009-10-06 12:40:42 -07001289 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1290 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1291 struct drm_i915_gem_pwrite pwrite;
1292 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001293
Eric Anholtd70d6052009-10-06 12:40:42 -07001294 memset(&pwrite, 0, sizeof(pwrite));
1295 pwrite.handle = bo_gem->gem_handle;
1296 pwrite.offset = offset;
1297 pwrite.size = size;
1298 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
Chris Wilson62997222010-09-25 21:32:59 +01001299 ret = drmIoctl(bufmgr_gem->fd,
1300 DRM_IOCTL_I915_GEM_PWRITE,
1301 &pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001302 if (ret != 0) {
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001303 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001304 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1305 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1306 (int)size, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001307 }
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001308
1309 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -07001310}
1311
1312static int
1313drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1314{
1315 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1316 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1317 int ret;
1318
1319 get_pipe_from_crtc_id.crtc_id = crtc_id;
Chris Wilson62997222010-09-25 21:32:59 +01001320 ret = drmIoctl(bufmgr_gem->fd,
1321 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1322 &get_pipe_from_crtc_id);
Eric Anholtd70d6052009-10-06 12:40:42 -07001323 if (ret != 0) {
1324 /* We return -1 here to signal that we don't
1325 * know which pipe is associated with this crtc.
1326 * This lets the caller know that this information
1327 * isn't available; using the wrong pipe for
1328 * vblank waiting can cause the chipset to lock up
1329 */
1330 return -1;
1331 }
1332
1333 return get_pipe_from_crtc_id.pipe;
1334}
1335
1336static int
1337drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1338 unsigned long size, void *data)
1339{
1340 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1341 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1342 struct drm_i915_gem_pread pread;
1343 int ret;
1344
1345 memset(&pread, 0, sizeof(pread));
1346 pread.handle = bo_gem->gem_handle;
1347 pread.offset = offset;
1348 pread.size = size;
1349 pread.data_ptr = (uint64_t) (uintptr_t) data;
Chris Wilson62997222010-09-25 21:32:59 +01001350 ret = drmIoctl(bufmgr_gem->fd,
1351 DRM_IOCTL_I915_GEM_PREAD,
1352 &pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001353 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001354 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001355 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1356 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1357 (int)size, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001358 }
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001359
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001360 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001361}
1362
Eric Anholt877b2ce2010-11-09 13:51:45 -08001363/** Waits for all GPU rendering with the object to have completed. */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001364static void
Eric Anholt4b982642008-10-30 09:33:07 -07001365drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001366{
Eric Anholt877b2ce2010-11-09 13:51:45 -08001367 drm_intel_gem_bo_start_gtt_access(bo, 1);
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001368}
1369
1370/**
1371 * Sets the object to the GTT read and possibly write domain, used by the X
1372 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1373 *
1374 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1375 * can do tiled pixmaps this way.
1376 */
1377void
1378drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1379{
Eric Anholtd70d6052009-10-06 12:40:42 -07001380 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1381 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1382 struct drm_i915_gem_set_domain set_domain;
1383 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001384
Eric Anholtd70d6052009-10-06 12:40:42 -07001385 set_domain.handle = bo_gem->gem_handle;
1386 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1387 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
Chris Wilson62997222010-09-25 21:32:59 +01001388 ret = drmIoctl(bufmgr_gem->fd,
1389 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1390 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001391 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001392 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1393 __FILE__, __LINE__, bo_gem->gem_handle,
1394 set_domain.read_domains, set_domain.write_domain,
1395 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001396 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001397}
1398
1399static void
Eric Anholt4b982642008-10-30 09:33:07 -07001400drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001401{
Eric Anholtd70d6052009-10-06 12:40:42 -07001402 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1403 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001404
Jesse Barnesb5096402009-09-15 11:02:58 -07001405 free(bufmgr_gem->exec2_objects);
Eric Anholtd70d6052009-10-06 12:40:42 -07001406 free(bufmgr_gem->exec_objects);
1407 free(bufmgr_gem->exec_bos);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001408
Eric Anholtd70d6052009-10-06 12:40:42 -07001409 pthread_mutex_destroy(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001410
Eric Anholtd70d6052009-10-06 12:40:42 -07001411 /* Free any cached buffer objects we were going to reuse */
Eric Anholt0ec768e2010-06-04 17:09:11 -07001412 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001413 struct drm_intel_gem_bo_bucket *bucket =
1414 &bufmgr_gem->cache_bucket[i];
1415 drm_intel_bo_gem *bo_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001416
Eric Anholtd70d6052009-10-06 12:40:42 -07001417 while (!DRMLISTEMPTY(&bucket->head)) {
1418 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1419 bucket->head.next, head);
1420 DRMLISTDEL(&bo_gem->head);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001421
Eric Anholtd70d6052009-10-06 12:40:42 -07001422 drm_intel_gem_bo_free(&bo_gem->bo);
1423 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001424 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001425
Eric Anholtd70d6052009-10-06 12:40:42 -07001426 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001427}
1428
1429/**
1430 * Adds the target buffer to the validation list and adds the relocation
1431 * to the reloc_buffer's relocation list.
1432 *
1433 * The relocation entry at the given offset must already contain the
1434 * precomputed relocation value, because the kernel will optimize out
1435 * the relocation entry write when the buffer hasn't moved from the
1436 * last known offset in target_bo.
1437 */
1438static int
Jesse Barnesb5096402009-09-15 11:02:58 -07001439do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1440 drm_intel_bo *target_bo, uint32_t target_offset,
1441 uint32_t read_domains, uint32_t write_domain,
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001442 bool need_fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001443{
Eric Anholtd70d6052009-10-06 12:40:42 -07001444 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1445 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1446 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001447 bool fenced_command;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001448
Chris Wilson97077332009-12-01 23:01:34 +00001449 if (bo_gem->has_error)
Chris Wilson792fed12009-12-02 13:12:39 +00001450 return -ENOMEM;
Chris Wilson792fed12009-12-02 13:12:39 +00001451
1452 if (target_bo_gem->has_error) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001453 bo_gem->has_error = true;
Chris Wilson792fed12009-12-02 13:12:39 +00001454 return -ENOMEM;
1455 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001456
Jesse Barnesb5096402009-09-15 11:02:58 -07001457 /* We never use HW fences for rendering on 965+ */
Eric Anholta1f9ea72010-03-02 08:49:36 -08001458 if (bufmgr_gem->gen >= 4)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001459 need_fence = false;
Jesse Barnesb5096402009-09-15 11:02:58 -07001460
Chris Wilson537703f2010-12-07 20:34:22 +00001461 fenced_command = need_fence;
1462 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001463 need_fence = false;
Chris Wilson537703f2010-12-07 20:34:22 +00001464
Eric Anholtd70d6052009-10-06 12:40:42 -07001465 /* Create a new relocation list if needed */
Chris Wilson97077332009-12-01 23:01:34 +00001466 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
Chris Wilson792fed12009-12-02 13:12:39 +00001467 return -ENOMEM;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001468
Eric Anholtd70d6052009-10-06 12:40:42 -07001469 /* Check overflow */
1470 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001471
Eric Anholtd70d6052009-10-06 12:40:42 -07001472 /* Check args */
1473 assert(offset <= bo->size - 4);
1474 assert((write_domain & (write_domain - 1)) == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001475
Eric Anholtd70d6052009-10-06 12:40:42 -07001476 /* Make sure that we're not adding a reloc to something whose size has
1477 * already been accounted for.
1478 */
1479 assert(!bo_gem->used_as_reloc_target);
Eric Anholtf1791372010-06-07 14:22:36 -07001480 if (target_bo_gem != bo_gem) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001481 target_bo_gem->used_as_reloc_target = true;
Eric Anholtf1791372010-06-07 14:22:36 -07001482 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1483 }
Eric Anholta1f9ea72010-03-02 08:49:36 -08001484 /* An object needing a fence is a tiled buffer, so it won't have
Jesse Barnesb5096402009-09-15 11:02:58 -07001485 * relocs to other buffers.
1486 */
1487 if (need_fence)
1488 target_bo_gem->reloc_tree_fences = 1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001489 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
Eric Anholt0e867312008-10-21 00:10:54 -07001490
Eric Anholtd70d6052009-10-06 12:40:42 -07001491 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1492 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1493 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1494 target_bo_gem->gem_handle;
1495 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1496 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1497 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001498
Jesse Barnesb5096402009-09-15 11:02:58 -07001499 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
Eric Anholt4f7704a2010-06-10 08:58:08 -07001500 if (target_bo != bo)
1501 drm_intel_gem_bo_reference(target_bo);
Chris Wilsonaf3d2822010-12-03 10:48:12 +00001502 if (fenced_command)
Jesse Barnesb5096402009-09-15 11:02:58 -07001503 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1504 DRM_INTEL_RELOC_FENCE;
1505 else
1506 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001507
Eric Anholtd70d6052009-10-06 12:40:42 -07001508 bo_gem->reloc_count++;
Eric Anholt6df7b072008-06-12 23:22:26 -07001509
Eric Anholtd70d6052009-10-06 12:40:42 -07001510 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001511}
1512
Jesse Barnesb5096402009-09-15 11:02:58 -07001513static int
1514drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1515 drm_intel_bo *target_bo, uint32_t target_offset,
1516 uint32_t read_domains, uint32_t write_domain)
1517{
1518 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1519
1520 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1521 read_domains, write_domain,
1522 !bufmgr_gem->fenced_relocs);
1523}
1524
1525static int
1526drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1527 drm_intel_bo *target_bo,
1528 uint32_t target_offset,
1529 uint32_t read_domains, uint32_t write_domain)
1530{
1531 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001532 read_domains, write_domain, true);
Jesse Barnesb5096402009-09-15 11:02:58 -07001533}
1534
Eric Anholt515cea62011-10-21 18:48:20 -07001535int
1536drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1537{
1538 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1539
1540 return bo_gem->reloc_count;
1541}
1542
1543/**
1544 * Removes existing relocation entries in the BO after "start".
1545 *
1546 * This allows a user to avoid a two-step process for state setup with
1547 * counting up all the buffer objects and doing a
1548 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1549 * relocations for the state setup. Instead, save the state of the
1550 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1551 * state, and then check if it still fits in the aperture.
1552 *
1553 * Any further drm_intel_bufmgr_check_aperture_space() queries
1554 * involving this buffer in the tree are undefined after this call.
1555 */
1556void
1557drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1558{
1559 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1560 int i;
1561 struct timespec time;
1562
1563 clock_gettime(CLOCK_MONOTONIC, &time);
1564
1565 assert(bo_gem->reloc_count >= start);
1566 /* Unreference the cleared target buffers */
1567 for (i = start; i < bo_gem->reloc_count; i++) {
1568 if (bo_gem->reloc_target_info[i].bo != bo) {
1569 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1570 reloc_target_info[i].bo,
1571 time.tv_sec);
1572 }
1573 }
1574 bo_gem->reloc_count = start;
1575}
1576
Eric Anholt6a9eb082008-06-03 09:27:37 -07001577/**
1578 * Walk the tree of relocations rooted at BO and accumulate the list of
1579 * validations to be performed and update the relocation buffers with
1580 * index values into the validation list.
1581 */
1582static void
Eric Anholt4b982642008-10-30 09:33:07 -07001583drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001584{
Eric Anholtd70d6052009-10-06 12:40:42 -07001585 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1586 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001587
Eric Anholtd70d6052009-10-06 12:40:42 -07001588 if (bo_gem->relocs == NULL)
1589 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001590
Eric Anholtd70d6052009-10-06 12:40:42 -07001591 for (i = 0; i < bo_gem->reloc_count; i++) {
Jesse Barnesb5096402009-09-15 11:02:58 -07001592 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001593
Eric Anholtf1791372010-06-07 14:22:36 -07001594 if (target_bo == bo)
1595 continue;
1596
Eric Anholtd70d6052009-10-06 12:40:42 -07001597 /* Continue walking the tree depth-first. */
1598 drm_intel_gem_bo_process_reloc(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001599
Eric Anholtd70d6052009-10-06 12:40:42 -07001600 /* Add the target to the validate list */
1601 drm_intel_add_validate_buffer(target_bo);
1602 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001603}
1604
Eric Anholt6a9eb082008-06-03 09:27:37 -07001605static void
Jesse Barnesb5096402009-09-15 11:02:58 -07001606drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1607{
1608 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1609 int i;
1610
1611 if (bo_gem->relocs == NULL)
1612 return;
1613
1614 for (i = 0; i < bo_gem->reloc_count; i++) {
1615 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1616 int need_fence;
1617
Eric Anholtf1791372010-06-07 14:22:36 -07001618 if (target_bo == bo)
1619 continue;
1620
Jesse Barnesb5096402009-09-15 11:02:58 -07001621 /* Continue walking the tree depth-first. */
1622 drm_intel_gem_bo_process_reloc2(target_bo);
1623
1624 need_fence = (bo_gem->reloc_target_info[i].flags &
1625 DRM_INTEL_RELOC_FENCE);
1626
1627 /* Add the target to the validate list */
1628 drm_intel_add_validate_buffer2(target_bo, need_fence);
1629 }
1630}
1631
1632
1633static void
Eric Anholtd70d6052009-10-06 12:40:42 -07001634drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001635{
Eric Anholtd70d6052009-10-06 12:40:42 -07001636 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001637
Eric Anholtd70d6052009-10-06 12:40:42 -07001638 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1639 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1640 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001641
Eric Anholtd70d6052009-10-06 12:40:42 -07001642 /* Update the buffer offset */
1643 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1644 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1645 bo_gem->gem_handle, bo_gem->name, bo->offset,
1646 (unsigned long long)bufmgr_gem->exec_objects[i].
1647 offset);
1648 bo->offset = bufmgr_gem->exec_objects[i].offset;
1649 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001650 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001651}
1652
Jesse Barnesb5096402009-09-15 11:02:58 -07001653static void
1654drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1655{
1656 int i;
1657
1658 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1659 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1660 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1661
1662 /* Update the buffer offset */
1663 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1664 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1665 bo_gem->gem_handle, bo_gem->name, bo->offset,
1666 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1667 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1668 }
1669 }
1670}
1671
Eric Anholtf9d98be2008-09-08 08:51:40 -07001672static int
Eric Anholt4b982642008-10-30 09:33:07 -07001673drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07001674 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001675{
Eric Anholtd70d6052009-10-06 12:40:42 -07001676 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
Chris Wilson792fed12009-12-02 13:12:39 +00001677 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholtd70d6052009-10-06 12:40:42 -07001678 struct drm_i915_gem_execbuffer execbuf;
1679 int ret, i;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001680
Chris Wilson792fed12009-12-02 13:12:39 +00001681 if (bo_gem->has_error)
1682 return -ENOMEM;
1683
Eric Anholtd70d6052009-10-06 12:40:42 -07001684 pthread_mutex_lock(&bufmgr_gem->lock);
1685 /* Update indices and set up the validate list. */
1686 drm_intel_gem_bo_process_reloc(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001687
Eric Anholtd70d6052009-10-06 12:40:42 -07001688 /* Add the batch buffer to the validation list. There are no
1689 * relocations pointing to it.
1690 */
1691 drm_intel_add_validate_buffer(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001692
Eric Anholtd70d6052009-10-06 12:40:42 -07001693 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1694 execbuf.buffer_count = bufmgr_gem->exec_count;
1695 execbuf.batch_start_offset = 0;
1696 execbuf.batch_len = used;
1697 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1698 execbuf.num_cliprects = num_cliprects;
1699 execbuf.DR1 = 0;
1700 execbuf.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07001701
Chris Wilson62997222010-09-25 21:32:59 +01001702 ret = drmIoctl(bufmgr_gem->fd,
1703 DRM_IOCTL_I915_GEM_EXECBUFFER,
1704 &execbuf);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001705 if (ret != 0) {
1706 ret = -errno;
1707 if (errno == ENOSPC) {
Chris Wilson96214862010-10-01 16:50:09 +01001708 DBG("Execbuffer fails to pin. "
1709 "Estimate: %u. Actual: %u. Available: %u\n",
1710 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1711 bufmgr_gem->
1712 exec_count),
1713 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1714 bufmgr_gem->
1715 exec_count),
1716 (unsigned int)bufmgr_gem->gtt_size);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001717 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001718 }
1719 drm_intel_update_buffer_offsets(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001720
Eric Anholtd70d6052009-10-06 12:40:42 -07001721 if (bufmgr_gem->bufmgr.debug)
1722 drm_intel_gem_dump_validation_list(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001723
Eric Anholtd70d6052009-10-06 12:40:42 -07001724 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1725 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1726 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001727
Eric Anholtd70d6052009-10-06 12:40:42 -07001728 /* Disconnect the buffer from the validate list */
1729 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001730 bufmgr_gem->exec_bos[i] = NULL;
1731 }
1732 bufmgr_gem->exec_count = 0;
1733 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtf9d98be2008-09-08 08:51:40 -07001734
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001735 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001736}
1737
Keith Packard8e41ce12008-08-04 00:34:08 -07001738static int
Zou Nan hai66375fd2010-06-02 10:07:37 +08001739drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1740 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
Chris Wilson0184bb12010-12-19 13:01:15 +00001741 unsigned int flags)
Jesse Barnesb5096402009-09-15 11:02:58 -07001742{
1743 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1744 struct drm_i915_gem_execbuffer2 execbuf;
1745 int ret, i;
1746
Chris Wilson0184bb12010-12-19 13:01:15 +00001747 switch (flags & 0x7) {
Chris Wilson057fab32010-10-26 11:35:11 +01001748 default:
Zou Nan hai66375fd2010-06-02 10:07:37 +08001749 return -EINVAL;
Chris Wilson057fab32010-10-26 11:35:11 +01001750 case I915_EXEC_BLT:
1751 if (!bufmgr_gem->has_blt)
1752 return -EINVAL;
1753 break;
1754 case I915_EXEC_BSD:
1755 if (!bufmgr_gem->has_bsd)
1756 return -EINVAL;
1757 break;
1758 case I915_EXEC_RENDER:
1759 case I915_EXEC_DEFAULT:
1760 break;
1761 }
Zou Nan hai66375fd2010-06-02 10:07:37 +08001762
Jesse Barnesb5096402009-09-15 11:02:58 -07001763 pthread_mutex_lock(&bufmgr_gem->lock);
1764 /* Update indices and set up the validate list. */
1765 drm_intel_gem_bo_process_reloc2(bo);
1766
1767 /* Add the batch buffer to the validation list. There are no relocations
1768 * pointing to it.
1769 */
1770 drm_intel_add_validate_buffer2(bo, 0);
1771
1772 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1773 execbuf.buffer_count = bufmgr_gem->exec_count;
1774 execbuf.batch_start_offset = 0;
1775 execbuf.batch_len = used;
1776 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1777 execbuf.num_cliprects = num_cliprects;
1778 execbuf.DR1 = 0;
1779 execbuf.DR4 = DR4;
Chris Wilson0184bb12010-12-19 13:01:15 +00001780 execbuf.flags = flags;
Jesse Barnesb5096402009-09-15 11:02:58 -07001781 execbuf.rsvd1 = 0;
1782 execbuf.rsvd2 = 0;
1783
Chris Wilson62997222010-09-25 21:32:59 +01001784 ret = drmIoctl(bufmgr_gem->fd,
1785 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1786 &execbuf);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001787 if (ret != 0) {
1788 ret = -errno;
Chris Wilson13e82702010-06-21 15:38:06 +01001789 if (ret == -ENOSPC) {
Chris Wilson96214862010-10-01 16:50:09 +01001790 DBG("Execbuffer fails to pin. "
1791 "Estimate: %u. Actual: %u. Available: %u\n",
1792 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1793 bufmgr_gem->exec_count),
1794 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1795 bufmgr_gem->exec_count),
1796 (unsigned int) bufmgr_gem->gtt_size);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001797 }
Jesse Barnesb5096402009-09-15 11:02:58 -07001798 }
1799 drm_intel_update_buffer_offsets2(bufmgr_gem);
1800
1801 if (bufmgr_gem->bufmgr.debug)
1802 drm_intel_gem_dump_validation_list(bufmgr_gem);
1803
1804 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1805 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1806 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1807
1808 /* Disconnect the buffer from the validate list */
1809 bo_gem->validate_index = -1;
1810 bufmgr_gem->exec_bos[i] = NULL;
1811 }
1812 bufmgr_gem->exec_count = 0;
1813 pthread_mutex_unlock(&bufmgr_gem->lock);
1814
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001815 return ret;
Jesse Barnesb5096402009-09-15 11:02:58 -07001816}
1817
1818static int
Zou Nan hai66375fd2010-06-02 10:07:37 +08001819drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1820 drm_clip_rect_t *cliprects, int num_cliprects,
1821 int DR4)
1822{
1823 return drm_intel_gem_bo_mrb_exec2(bo, used,
1824 cliprects, num_cliprects, DR4,
1825 I915_EXEC_RENDER);
1826}
1827
1828static int
Eric Anholt4b982642008-10-30 09:33:07 -07001829drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
Keith Packard8e41ce12008-08-04 00:34:08 -07001830{
Eric Anholtd70d6052009-10-06 12:40:42 -07001831 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1832 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1833 struct drm_i915_gem_pin pin;
1834 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001835
Eric Anholtd70d6052009-10-06 12:40:42 -07001836 memset(&pin, 0, sizeof(pin));
1837 pin.handle = bo_gem->gem_handle;
1838 pin.alignment = alignment;
Keith Packard8e41ce12008-08-04 00:34:08 -07001839
Chris Wilson62997222010-09-25 21:32:59 +01001840 ret = drmIoctl(bufmgr_gem->fd,
1841 DRM_IOCTL_I915_GEM_PIN,
1842 &pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07001843 if (ret != 0)
1844 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07001845
Eric Anholtd70d6052009-10-06 12:40:42 -07001846 bo->offset = pin.offset;
1847 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001848}
1849
1850static int
Eric Anholt4b982642008-10-30 09:33:07 -07001851drm_intel_gem_bo_unpin(drm_intel_bo *bo)
Keith Packard8e41ce12008-08-04 00:34:08 -07001852{
Eric Anholtd70d6052009-10-06 12:40:42 -07001853 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1854 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1855 struct drm_i915_gem_unpin unpin;
1856 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001857
Eric Anholtd70d6052009-10-06 12:40:42 -07001858 memset(&unpin, 0, sizeof(unpin));
1859 unpin.handle = bo_gem->gem_handle;
Keith Packard8e41ce12008-08-04 00:34:08 -07001860
Chris Wilson62997222010-09-25 21:32:59 +01001861 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
Eric Anholtd70d6052009-10-06 12:40:42 -07001862 if (ret != 0)
1863 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07001864
Eric Anholtd70d6052009-10-06 12:40:42 -07001865 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001866}
1867
1868static int
Chris Wilson1db22ff2010-06-21 14:27:23 +01001869drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1870 uint32_t tiling_mode,
1871 uint32_t stride)
Keith Packard8e41ce12008-08-04 00:34:08 -07001872{
Eric Anholtd70d6052009-10-06 12:40:42 -07001873 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1874 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1875 struct drm_i915_gem_set_tiling set_tiling;
1876 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001877
Chris Wilsonaba35022010-06-22 13:00:22 +01001878 if (bo_gem->global_name == 0 &&
1879 tiling_mode == bo_gem->tiling_mode &&
Chris Wilson056aa9b2010-06-21 14:31:29 +01001880 stride == bo_gem->stride)
Eric Anholtd70d6052009-10-06 12:40:42 -07001881 return 0;
Keith Packard18f091d2008-12-15 15:08:12 -08001882
Eric Anholtd70d6052009-10-06 12:40:42 -07001883 memset(&set_tiling, 0, sizeof(set_tiling));
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001884 do {
Chris Wilson62997222010-09-25 21:32:59 +01001885 /* set_tiling is slightly broken and overwrites the
1886 * input on the error path, so we have to open code
1887 * rmIoctl.
1888 */
Chris Wilson1db22ff2010-06-21 14:27:23 +01001889 set_tiling.handle = bo_gem->gem_handle;
1890 set_tiling.tiling_mode = tiling_mode;
Chris Wilson4f0f8712010-02-10 09:45:13 +00001891 set_tiling.stride = stride;
1892
Chris Wilson8ffd2e12009-12-01 13:08:04 +00001893 ret = ioctl(bufmgr_gem->fd,
1894 DRM_IOCTL_I915_GEM_SET_TILING,
1895 &set_tiling);
Chris Wilson62997222010-09-25 21:32:59 +01001896 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
Chris Wilson1db22ff2010-06-21 14:27:23 +01001897 if (ret == -1)
1898 return -errno;
1899
1900 bo_gem->tiling_mode = set_tiling.tiling_mode;
1901 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
Chris Wilsonaba35022010-06-22 13:00:22 +01001902 bo_gem->stride = set_tiling.stride;
Chris Wilson1db22ff2010-06-21 14:27:23 +01001903 return 0;
1904}
1905
1906static int
1907drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1908 uint32_t stride)
1909{
1910 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1911 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1912 int ret;
1913
Chris Wilsoncd34cbe2010-06-22 11:07:26 +01001914 /* Linear buffers have no stride. By ensuring that we only ever use
1915 * stride 0 with linear buffers, we simplify our code.
1916 */
Chris Wilsonc7bbaca2010-06-22 11:15:56 +01001917 if (*tiling_mode == I915_TILING_NONE)
Chris Wilsoncd34cbe2010-06-22 11:07:26 +01001918 stride = 0;
1919
Chris Wilson1db22ff2010-06-21 14:27:23 +01001920 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1921 if (ret == 0)
Chris Wilsonfcf3e612010-05-24 18:35:41 +01001922 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
Chris Wilsone22fb792009-11-30 22:14:30 +00001923
Keith Packard18f091d2008-12-15 15:08:12 -08001924 *tiling_mode = bo_gem->tiling_mode;
Chris Wilsonfcf3e612010-05-24 18:35:41 +01001925 return ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001926}
1927
1928static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001929drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1930 uint32_t * swizzle_mode)
Keith Packard8e41ce12008-08-04 00:34:08 -07001931{
Eric Anholtd70d6052009-10-06 12:40:42 -07001932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt99338382008-10-14 13:18:11 -07001933
Eric Anholtd70d6052009-10-06 12:40:42 -07001934 *tiling_mode = bo_gem->tiling_mode;
1935 *swizzle_mode = bo_gem->swizzle_mode;
1936 return 0;
Eric Anholt99338382008-10-14 13:18:11 -07001937}
1938
1939static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001940drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
Keith Packard8e41ce12008-08-04 00:34:08 -07001941{
Eric Anholtd70d6052009-10-06 12:40:42 -07001942 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1943 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1944 struct drm_gem_flink flink;
1945 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07001946
Eric Anholtd70d6052009-10-06 12:40:42 -07001947 if (!bo_gem->global_name) {
1948 memset(&flink, 0, sizeof(flink));
1949 flink.handle = bo_gem->gem_handle;
1950
Chris Wilson62997222010-09-25 21:32:59 +01001951 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
Eric Anholtd70d6052009-10-06 12:40:42 -07001952 if (ret != 0)
1953 return -errno;
1954 bo_gem->global_name = flink.name;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001955 bo_gem->reusable = false;
Chris Wilson36d49392011-02-14 09:39:06 +00001956
1957 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
Eric Anholtd70d6052009-10-06 12:40:42 -07001958 }
1959
1960 *name = bo_gem->global_name;
1961 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07001962}
1963
Eric Anholt6a9eb082008-06-03 09:27:37 -07001964/**
1965 * Enables unlimited caching of buffer objects for reuse.
1966 *
1967 * This is potentially very memory expensive, as the cache at each bucket
1968 * size is only bounded by how many buffers of that size we've managed to have
1969 * in flight at once.
1970 */
1971void
Eric Anholt4b982642008-10-30 09:33:07 -07001972drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001973{
Eric Anholtd70d6052009-10-06 12:40:42 -07001974 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001975
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001976 bufmgr_gem->bo_reuse = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001977}
1978
Eric Anholt0e867312008-10-21 00:10:54 -07001979/**
Jesse Barnesb5096402009-09-15 11:02:58 -07001980 * Enable use of fenced reloc type.
1981 *
1982 * New code should enable this to avoid unnecessary fence register
1983 * allocation. If this option is not enabled, all relocs will have fence
1984 * register allocated.
1985 */
1986void
1987drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1988{
Eric Anholt766fa792010-03-02 16:04:14 -08001989 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
Jesse Barnesb5096402009-09-15 11:02:58 -07001990
Eric Anholt766fa792010-03-02 16:04:14 -08001991 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001992 bufmgr_gem->fenced_relocs = true;
Jesse Barnesb5096402009-09-15 11:02:58 -07001993}
1994
1995/**
Eric Anholt0e867312008-10-21 00:10:54 -07001996 * Return the additional aperture space required by the tree of buffer objects
1997 * rooted at bo.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001998 */
1999static int
Eric Anholt4b982642008-10-30 09:33:07 -07002000drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002001{
Eric Anholtd70d6052009-10-06 12:40:42 -07002002 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2003 int i;
2004 int total = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07002005
Eric Anholtd70d6052009-10-06 12:40:42 -07002006 if (bo == NULL || bo_gem->included_in_check_aperture)
2007 return 0;
Eric Anholt0e867312008-10-21 00:10:54 -07002008
Eric Anholtd70d6052009-10-06 12:40:42 -07002009 total += bo->size;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002010 bo_gem->included_in_check_aperture = true;
Eric Anholt0e867312008-10-21 00:10:54 -07002011
Eric Anholtd70d6052009-10-06 12:40:42 -07002012 for (i = 0; i < bo_gem->reloc_count; i++)
2013 total +=
2014 drm_intel_gem_bo_get_aperture_space(bo_gem->
Jesse Barnesb5096402009-09-15 11:02:58 -07002015 reloc_target_info[i].bo);
Eric Anholt0e867312008-10-21 00:10:54 -07002016
Eric Anholtd70d6052009-10-06 12:40:42 -07002017 return total;
Eric Anholt0e867312008-10-21 00:10:54 -07002018}
2019
2020/**
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002021 * Count the number of buffers in this list that need a fence reg
2022 *
2023 * If the count is greater than the number of available regs, we'll have
2024 * to ask the caller to resubmit a batch with fewer tiled buffers.
2025 *
Eric Anholt9209c9a2009-01-27 16:54:11 -08002026 * This function over-counts if the same buffer is used multiple times.
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002027 */
2028static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -07002029drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002030{
Eric Anholtd70d6052009-10-06 12:40:42 -07002031 int i;
2032 unsigned int total = 0;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002033
Eric Anholtd70d6052009-10-06 12:40:42 -07002034 for (i = 0; i < count; i++) {
2035 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002036
Eric Anholtd70d6052009-10-06 12:40:42 -07002037 if (bo_gem == NULL)
2038 continue;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002039
Eric Anholtd70d6052009-10-06 12:40:42 -07002040 total += bo_gem->reloc_tree_fences;
2041 }
2042 return total;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002043}
2044
2045/**
Eric Anholt4b982642008-10-30 09:33:07 -07002046 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2047 * for the next drm_intel_bufmgr_check_aperture_space() call.
Eric Anholt0e867312008-10-21 00:10:54 -07002048 */
2049static void
Eric Anholt4b982642008-10-30 09:33:07 -07002050drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
Eric Anholt0e867312008-10-21 00:10:54 -07002051{
Eric Anholtd70d6052009-10-06 12:40:42 -07002052 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2053 int i;
Eric Anholt0e867312008-10-21 00:10:54 -07002054
Eric Anholtd70d6052009-10-06 12:40:42 -07002055 if (bo == NULL || !bo_gem->included_in_check_aperture)
2056 return;
Eric Anholt0e867312008-10-21 00:10:54 -07002057
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002058 bo_gem->included_in_check_aperture = false;
Eric Anholt0e867312008-10-21 00:10:54 -07002059
Eric Anholtd70d6052009-10-06 12:40:42 -07002060 for (i = 0; i < bo_gem->reloc_count; i++)
2061 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
Jesse Barnesb5096402009-09-15 11:02:58 -07002062 reloc_target_info[i].bo);
Eric Anholt0e867312008-10-21 00:10:54 -07002063}
2064
2065/**
Keith Packardb13f4e12008-11-21 01:49:39 -08002066 * Return a conservative estimate for the amount of aperture required
2067 * for a collection of buffers. This may double-count some buffers.
2068 */
2069static unsigned int
2070drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2071{
Eric Anholtd70d6052009-10-06 12:40:42 -07002072 int i;
2073 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08002074
Eric Anholtd70d6052009-10-06 12:40:42 -07002075 for (i = 0; i < count; i++) {
2076 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2077 if (bo_gem != NULL)
2078 total += bo_gem->reloc_tree_size;
2079 }
2080 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08002081}
2082
2083/**
2084 * Return the amount of aperture needed for a collection of buffers.
2085 * This avoids double counting any buffers, at the cost of looking
2086 * at every buffer in the set.
2087 */
2088static unsigned int
2089drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2090{
Eric Anholtd70d6052009-10-06 12:40:42 -07002091 int i;
2092 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08002093
Eric Anholtd70d6052009-10-06 12:40:42 -07002094 for (i = 0; i < count; i++) {
2095 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2096 /* For the first buffer object in the array, we get an
2097 * accurate count back for its reloc_tree size (since nothing
2098 * had been flagged as being counted yet). We can save that
2099 * value out as a more conservative reloc_tree_size that
2100 * avoids double-counting target buffers. Since the first
2101 * buffer happens to usually be the batch buffer in our
2102 * callers, this can pull us back from doing the tree
2103 * walk on every new batch emit.
2104 */
2105 if (i == 0) {
2106 drm_intel_bo_gem *bo_gem =
2107 (drm_intel_bo_gem *) bo_array[i];
2108 bo_gem->reloc_tree_size = total;
2109 }
Eric Anholt7ce8d4c2009-02-27 13:46:31 -08002110 }
Keith Packardb13f4e12008-11-21 01:49:39 -08002111
Eric Anholtd70d6052009-10-06 12:40:42 -07002112 for (i = 0; i < count; i++)
2113 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2114 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08002115}
2116
2117/**
Eric Anholt0e867312008-10-21 00:10:54 -07002118 * Return -1 if the batchbuffer should be flushed before attempting to
2119 * emit rendering referencing the buffers pointed to by bo_array.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002120 *
Eric Anholt0e867312008-10-21 00:10:54 -07002121 * This is required because if we try to emit a batchbuffer with relocations
2122 * to a tree of buffers that won't simultaneously fit in the aperture,
2123 * the rendering will return an error at a point where the software is not
2124 * prepared to recover from it.
2125 *
2126 * However, we also want to emit the batchbuffer significantly before we reach
2127 * the limit, as a series of batchbuffers each of which references buffers
2128 * covering almost all of the aperture means that at each emit we end up
2129 * waiting to evict a buffer from the last rendering, and we get synchronous
2130 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2131 * get better parallelism.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002132 */
2133static int
Eric Anholt4b982642008-10-30 09:33:07 -07002134drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002135{
Eric Anholtd70d6052009-10-06 12:40:42 -07002136 drm_intel_bufmgr_gem *bufmgr_gem =
2137 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2138 unsigned int total = 0;
2139 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2140 int total_fences;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002141
Eric Anholtd70d6052009-10-06 12:40:42 -07002142 /* Check for fence reg constraints if necessary */
2143 if (bufmgr_gem->available_fences) {
2144 total_fences = drm_intel_gem_total_fences(bo_array, count);
2145 if (total_fences > bufmgr_gem->available_fences)
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002146 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07002147 }
Eric Anholt0e867312008-10-21 00:10:54 -07002148
Eric Anholtd70d6052009-10-06 12:40:42 -07002149 total = drm_intel_gem_estimate_batch_space(bo_array, count);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002150
Eric Anholtd70d6052009-10-06 12:40:42 -07002151 if (total > threshold)
2152 total = drm_intel_gem_compute_batch_space(bo_array, count);
Eric Anholt0e867312008-10-21 00:10:54 -07002153
Eric Anholtd70d6052009-10-06 12:40:42 -07002154 if (total > threshold) {
2155 DBG("check_space: overflowed available aperture, "
2156 "%dkb vs %dkb\n",
2157 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002158 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07002159 } else {
2160 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2161 (int)bufmgr_gem->gtt_size / 1024);
2162 return 0;
2163 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07002164}
2165
Keith Packard5b5ce302009-05-11 13:42:12 -07002166/*
2167 * Disable buffer reuse for objects which are shared with the kernel
2168 * as scanout buffers
2169 */
2170static int
2171drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2172{
Eric Anholtd70d6052009-10-06 12:40:42 -07002173 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Keith Packard5b5ce302009-05-11 13:42:12 -07002174
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002175 bo_gem->reusable = false;
Eric Anholtd70d6052009-10-06 12:40:42 -07002176 return 0;
Keith Packard5b5ce302009-05-11 13:42:12 -07002177}
2178
Eric Anholt769b1052009-10-01 19:09:26 -07002179static int
Chris Wilson07e75892010-05-11 08:54:06 +01002180drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2181{
2182 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2183
2184 return bo_gem->reusable;
2185}
2186
2187static int
Eric Anholt66d27142009-10-20 13:20:55 -07002188_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
Eric Anholt769b1052009-10-01 19:09:26 -07002189{
Eric Anholtd70d6052009-10-06 12:40:42 -07002190 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2191 int i;
Eric Anholt769b1052009-10-01 19:09:26 -07002192
Eric Anholtd70d6052009-10-06 12:40:42 -07002193 for (i = 0; i < bo_gem->reloc_count; i++) {
Jesse Barnesb5096402009-09-15 11:02:58 -07002194 if (bo_gem->reloc_target_info[i].bo == target_bo)
Eric Anholtd70d6052009-10-06 12:40:42 -07002195 return 1;
Eric Anholt4f7704a2010-06-10 08:58:08 -07002196 if (bo == bo_gem->reloc_target_info[i].bo)
2197 continue;
Jesse Barnesb5096402009-09-15 11:02:58 -07002198 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
Eric Anholtd70d6052009-10-06 12:40:42 -07002199 target_bo))
2200 return 1;
2201 }
2202
Eric Anholt769b1052009-10-01 19:09:26 -07002203 return 0;
Eric Anholt769b1052009-10-01 19:09:26 -07002204}
2205
Eric Anholt66d27142009-10-20 13:20:55 -07002206/** Return true if target_bo is referenced by bo's relocation tree. */
2207static int
2208drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2209{
2210 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2211
2212 if (bo == NULL || target_bo == NULL)
2213 return 0;
2214 if (target_bo_gem->used_as_reloc_target)
2215 return _drm_intel_gem_bo_references(bo, target_bo);
2216 return 0;
2217}
2218
Eric Anholt0ec768e2010-06-04 17:09:11 -07002219static void
2220add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2221{
2222 unsigned int i = bufmgr_gem->num_buckets;
2223
2224 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2225
2226 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2227 bufmgr_gem->cache_bucket[i].size = size;
2228 bufmgr_gem->num_buckets++;
2229}
2230
2231static void
2232init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2233{
2234 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2235
2236 /* OK, so power of two buckets was too wasteful of memory.
2237 * Give 3 other sizes between each power of two, to hopefully
2238 * cover things accurately enough. (The alternative is
2239 * probably to just go for exact matching of sizes, and assume
2240 * that for things like composited window resize the tiled
2241 * width/height alignment and rounding of sizes to pages will
2242 * get us useful cache hit rates anyway)
2243 */
2244 add_bucket(bufmgr_gem, 4096);
2245 add_bucket(bufmgr_gem, 4096 * 2);
2246 add_bucket(bufmgr_gem, 4096 * 3);
2247
2248 /* Initialize the linked lists for BO reuse cache. */
2249 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2250 add_bucket(bufmgr_gem, size);
2251
2252 add_bucket(bufmgr_gem, size + size * 1 / 4);
2253 add_bucket(bufmgr_gem, size + size * 2 / 4);
2254 add_bucket(bufmgr_gem, size + size * 3 / 4);
2255 }
2256}
2257
Chris Wilsone4b60f22011-12-05 21:29:05 +00002258void
2259drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2260{
2261 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2262
2263 bufmgr_gem->vma_max = limit;
2264
2265 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2266}
2267
Eric Anholt769b1052009-10-01 19:09:26 -07002268/**
Eric Anholt6a9eb082008-06-03 09:27:37 -07002269 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2270 * and manage map buffer objections.
2271 *
2272 * \param fd File descriptor of the opened DRM device.
2273 */
Eric Anholt4b982642008-10-30 09:33:07 -07002274drm_intel_bufmgr *
2275drm_intel_bufmgr_gem_init(int fd, int batch_size)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002276{
Eric Anholtd70d6052009-10-06 12:40:42 -07002277 drm_intel_bufmgr_gem *bufmgr_gem;
2278 struct drm_i915_gem_get_aperture aperture;
2279 drm_i915_getparam_t gp;
Daniel Vetter630dd262011-09-22 22:20:09 +02002280 int ret, tmp;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002281 bool exec2 = false;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002282
Eric Anholtd70d6052009-10-06 12:40:42 -07002283 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
Dave Airlie973d8d62010-02-02 10:57:12 +10002284 if (bufmgr_gem == NULL)
2285 return NULL;
2286
Eric Anholtd70d6052009-10-06 12:40:42 -07002287 bufmgr_gem->fd = fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002288
Eric Anholtd70d6052009-10-06 12:40:42 -07002289 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2290 free(bufmgr_gem);
2291 return NULL;
2292 }
Eric Anholt6df7b072008-06-12 23:22:26 -07002293
Chris Wilson62997222010-09-25 21:32:59 +01002294 ret = drmIoctl(bufmgr_gem->fd,
2295 DRM_IOCTL_I915_GEM_GET_APERTURE,
2296 &aperture);
Eric Anholt0e867312008-10-21 00:10:54 -07002297
Eric Anholtd70d6052009-10-06 12:40:42 -07002298 if (ret == 0)
2299 bufmgr_gem->gtt_size = aperture.aper_available_size;
2300 else {
2301 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2302 strerror(errno));
2303 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2304 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2305 "May lead to reduced performance or incorrect "
2306 "rendering.\n",
2307 (int)bufmgr_gem->gtt_size / 1024);
2308 }
Eric Anholt0e867312008-10-21 00:10:54 -07002309
Eric Anholtd70d6052009-10-06 12:40:42 -07002310 gp.param = I915_PARAM_CHIPSET_ID;
2311 gp.value = &bufmgr_gem->pci_device;
Chris Wilson62997222010-09-25 21:32:59 +01002312 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Eric Anholtcbdd6272009-01-27 17:16:11 -08002313 if (ret) {
Eric Anholtd70d6052009-10-06 12:40:42 -07002314 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2315 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
Eric Anholtcbdd6272009-01-27 17:16:11 -08002316 }
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002317
Eric Anholt078bc5b2011-12-20 13:10:36 -08002318 if (IS_GEN2(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002319 bufmgr_gem->gen = 2;
Eric Anholt078bc5b2011-12-20 13:10:36 -08002320 else if (IS_GEN3(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002321 bufmgr_gem->gen = 3;
Eric Anholt078bc5b2011-12-20 13:10:36 -08002322 else if (IS_GEN4(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002323 bufmgr_gem->gen = 4;
Chad Versace592ac672012-01-27 10:02:16 -08002324 else if (IS_GEN5(bufmgr_gem->pci_device))
2325 bufmgr_gem->gen = 5;
2326 else if (IS_GEN6(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002327 bufmgr_gem->gen = 6;
Chad Versace592ac672012-01-27 10:02:16 -08002328 else if (IS_GEN7(bufmgr_gem->pci_device))
2329 bufmgr_gem->gen = 7;
2330 else
2331 assert(0);
Eric Anholta1f9ea72010-03-02 08:49:36 -08002332
Eric Anholt078bc5b2011-12-20 13:10:36 -08002333 if (IS_GEN3(bufmgr_gem->pci_device) &&
2334 bufmgr_gem->gtt_size > 256*1024*1024) {
Daniel Vetter36cff1c2011-12-04 12:51:45 +01002335 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2336 * be used for tiled blits. To simplify the accounting, just
2337 * substract the unmappable part (fixed to 256MB on all known
2338 * gen3 devices) if the kernel advertises it. */
2339 bufmgr_gem->gtt_size -= 256*1024*1024;
2340 }
2341
Daniel Vetter630dd262011-09-22 22:20:09 +02002342 gp.value = &tmp;
2343
Jesse Barnesb5096402009-09-15 11:02:58 -07002344 gp.param = I915_PARAM_HAS_EXECBUF2;
Chris Wilson62997222010-09-25 21:32:59 +01002345 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Jesse Barnesb5096402009-09-15 11:02:58 -07002346 if (!ret)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002347 exec2 = true;
Jesse Barnesb5096402009-09-15 11:02:58 -07002348
Zou Nan hai66375fd2010-06-02 10:07:37 +08002349 gp.param = I915_PARAM_HAS_BSD;
Chris Wilson62997222010-09-25 21:32:59 +01002350 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Chris Wilson057fab32010-10-26 11:35:11 +01002351 bufmgr_gem->has_bsd = ret == 0;
2352
2353 gp.param = I915_PARAM_HAS_BLT;
2354 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2355 bufmgr_gem->has_blt = ret == 0;
Zou Nan hai66375fd2010-06-02 10:07:37 +08002356
Chris Wilson36245772010-10-29 10:49:54 +01002357 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2358 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2359 bufmgr_gem->has_relaxed_fencing = ret == 0;
2360
Eric Anholta1f9ea72010-03-02 08:49:36 -08002361 if (bufmgr_gem->gen < 4) {
Eric Anholtd70d6052009-10-06 12:40:42 -07002362 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2363 gp.value = &bufmgr_gem->available_fences;
Chris Wilson62997222010-09-25 21:32:59 +01002364 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Eric Anholtd70d6052009-10-06 12:40:42 -07002365 if (ret) {
2366 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2367 errno);
2368 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2369 *gp.value);
2370 bufmgr_gem->available_fences = 0;
Chris Wilsonfdcde592010-02-09 08:32:54 +00002371 } else {
2372 /* XXX The kernel reports the total number of fences,
2373 * including any that may be pinned.
2374 *
2375 * We presume that there will be at least one pinned
2376 * fence for the scanout buffer, but there may be more
2377 * than one scanout and the user may be manually
2378 * pinning buffers. Let's move to execbuffer2 and
2379 * thereby forget the insanity of using fences...
2380 */
2381 bufmgr_gem->available_fences -= 2;
2382 if (bufmgr_gem->available_fences < 0)
2383 bufmgr_gem->available_fences = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07002384 }
2385 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07002386
Eric Anholtd70d6052009-10-06 12:40:42 -07002387 /* Let's go with one relocation per every 2 dwords (but round down a bit
2388 * since a power of two will mean an extra page allocation for the reloc
2389 * buffer).
2390 *
2391 * Every 4 was too few for the blender benchmark.
2392 */
2393 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
Eric Anholt769b1052009-10-01 19:09:26 -07002394
Eric Anholtd70d6052009-10-06 12:40:42 -07002395 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2396 bufmgr_gem->bufmgr.bo_alloc_for_render =
2397 drm_intel_gem_bo_alloc_for_render;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07002398 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07002399 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2400 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2401 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2402 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2403 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2404 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2405 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2406 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
Jesse Barnesb5096402009-09-15 11:02:58 -07002407 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
Eric Anholtd70d6052009-10-06 12:40:42 -07002408 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2409 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2410 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2411 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2412 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
Jesse Barnesb5096402009-09-15 11:02:58 -07002413 /* Use the new one if available */
Zou Nan hai66375fd2010-06-02 10:07:37 +08002414 if (exec2) {
Jesse Barnesb5096402009-09-15 11:02:58 -07002415 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
Albert Damen49447a92010-11-07 15:54:32 +01002416 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
Zou Nan hai66375fd2010-06-02 10:07:37 +08002417 } else
Jesse Barnesb5096402009-09-15 11:02:58 -07002418 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
Eric Anholtd70d6052009-10-06 12:40:42 -07002419 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
Chris Wilson83a35b62009-11-11 13:04:38 +00002420 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
Eric Anholtd70d6052009-10-06 12:40:42 -07002421 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2422 bufmgr_gem->bufmgr.debug = 0;
2423 bufmgr_gem->bufmgr.check_aperture_space =
2424 drm_intel_gem_check_aperture_space;
2425 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
Chris Wilson07e75892010-05-11 08:54:06 +01002426 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
Eric Anholtd70d6052009-10-06 12:40:42 -07002427 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2428 drm_intel_gem_get_pipe_from_crtc_id;
2429 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002430
Chris Wilson36d49392011-02-14 09:39:06 +00002431 DRMINITLISTHEAD(&bufmgr_gem->named);
Eric Anholt0ec768e2010-06-04 17:09:11 -07002432 init_cache_buckets(bufmgr_gem);
Eric Anholtd70d6052009-10-06 12:40:42 -07002433
Chris Wilsone4b60f22011-12-05 21:29:05 +00002434 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2435 bufmgr_gem->vma_max = -1; /* unlimited by default */
2436
Eric Anholtd70d6052009-10-06 12:40:42 -07002437 return &bufmgr_gem->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002438}