blob: 12a3197798fb2248beefc69c1594732620d5ad85 [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
Eric Anholtc9ce2ed2012-03-09 16:08:23 -08004 * Copyright © 2007-2012 Intel Corporation
Eric Anholt6a9eb082008-06-03 09:27:37 -07005 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholt6a9eb082008-06-03 09:27:37 -070041#include <xf86drm.h>
Pauli Nieminen21105bc2010-03-10 13:35:59 +020042#include <xf86atomic.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080043#include <fcntl.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070044#include <stdio.h>
45#include <stdlib.h>
46#include <string.h>
47#include <unistd.h>
48#include <assert.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070049#include <pthread.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070050#include <sys/ioctl.h>
51#include <sys/mman.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080052#include <sys/stat.h>
53#include <sys/types.h>
Eric Anholt2c2bdb32011-10-21 16:53:16 -070054#include <stdbool.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070055
56#include "errno.h"
Eric Anholt72abe982009-02-18 13:06:35 -080057#include "libdrm_lists.h"
Eric Anholtc4857422008-06-03 10:20:49 -070058#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010059#include "intel_bufmgr_priv.h"
Eric Anholtcbdd6272009-01-27 17:16:11 -080060#include "intel_chipset.h"
Eric Anholt4db16a92011-10-11 15:59:03 -070061#include "intel_aub.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070062#include "string.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070063
64#include "i915_drm.h"
65
Chris Wilson90b23cc2012-02-09 10:23:10 +000066#ifdef HAVE_VALGRIND
67#include <valgrind.h>
68#include <memcheck.h>
69#define VG(x) x
70#else
71#define VG(x)
72#endif
73
74#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
75
Eric Anholt6a9eb082008-06-03 09:27:37 -070076#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070077 if (bufmgr_gem->bufmgr.debug) \
78 fprintf(stderr, __VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070079} while (0)
80
Eric Anholt0ec768e2010-06-04 17:09:11 -070081#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
82
Eric Anholt4b982642008-10-30 09:33:07 -070083typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
Keith Packarda919ff52008-06-05 15:58:09 -070084
Eric Anholt4b982642008-10-30 09:33:07 -070085struct drm_intel_gem_bo_bucket {
Eric Anholtd70d6052009-10-06 12:40:42 -070086 drmMMListHead head;
87 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -070088};
89
Eric Anholt4b982642008-10-30 09:33:07 -070090typedef struct _drm_intel_bufmgr_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -070091 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -070092
Eric Anholtd70d6052009-10-06 12:40:42 -070093 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -070094
Eric Anholtd70d6052009-10-06 12:40:42 -070095 int max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -070096
Eric Anholtd70d6052009-10-06 12:40:42 -070097 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -070098
Eric Anholtd70d6052009-10-06 12:40:42 -070099 struct drm_i915_gem_exec_object *exec_objects;
Jesse Barnesb5096402009-09-15 11:02:58 -0700100 struct drm_i915_gem_exec_object2 *exec2_objects;
Eric Anholtd70d6052009-10-06 12:40:42 -0700101 drm_intel_bo **exec_bos;
102 int exec_size;
103 int exec_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700104
Eric Anholtd70d6052009-10-06 12:40:42 -0700105 /** Array of lists of cached gem objects of power-of-two sizes */
Eric Anholt0ec768e2010-06-04 17:09:11 -0700106 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
107 int num_buckets;
Chris Wilsonf16b4162010-06-21 15:21:48 +0100108 time_t time;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700109
Chris Wilson36d49392011-02-14 09:39:06 +0000110 drmMMListHead named;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000111 drmMMListHead vma_cache;
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000112 int vma_count, vma_open, vma_max;
Chris Wilson36d49392011-02-14 09:39:06 +0000113
Eric Anholtd70d6052009-10-06 12:40:42 -0700114 uint64_t gtt_size;
115 int available_fences;
116 int pci_device;
Eric Anholta1f9ea72010-03-02 08:49:36 -0800117 int gen;
Chris Wilson36245772010-10-29 10:49:54 +0100118 unsigned int has_bsd : 1;
119 unsigned int has_blt : 1;
120 unsigned int has_relaxed_fencing : 1;
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -0200121 unsigned int has_llc : 1;
Ben Widawsky971c0802012-06-05 11:30:48 -0700122 unsigned int has_wait_timeout : 1;
Chris Wilson36245772010-10-29 10:49:54 +0100123 unsigned int bo_reuse : 1;
Kenneth Graunke6e642db2011-10-11 14:38:34 -0700124 unsigned int no_exec : 1;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700125 bool fenced_relocs;
Eric Anholt4db16a92011-10-11 15:59:03 -0700126
127 FILE *aub_file;
128 uint32_t aub_offset;
Eric Anholt4b982642008-10-30 09:33:07 -0700129} drm_intel_bufmgr_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700130
Jesse Barnesb5096402009-09-15 11:02:58 -0700131#define DRM_INTEL_RELOC_FENCE (1<<0)
132
133typedef struct _drm_intel_reloc_target_info {
134 drm_intel_bo *bo;
135 int flags;
136} drm_intel_reloc_target;
137
Eric Anholt4b982642008-10-30 09:33:07 -0700138struct _drm_intel_bo_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -0700139 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700140
Eric Anholtd70d6052009-10-06 12:40:42 -0700141 atomic_t refcount;
142 uint32_t gem_handle;
143 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700144
Eric Anholtd70d6052009-10-06 12:40:42 -0700145 /**
146 * Kenel-assigned global name for this object
147 */
148 unsigned int global_name;
Chris Wilson36d49392011-02-14 09:39:06 +0000149 drmMMListHead name_list;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700150
Eric Anholtd70d6052009-10-06 12:40:42 -0700151 /**
152 * Index of the buffer within the validation list while preparing a
153 * batchbuffer execution.
154 */
155 int validate_index;
Keith Packard18f091d2008-12-15 15:08:12 -0800156
Eric Anholtd70d6052009-10-06 12:40:42 -0700157 /**
158 * Current tiling mode
159 */
160 uint32_t tiling_mode;
161 uint32_t swizzle_mode;
Chris Wilson056aa9b2010-06-21 14:31:29 +0100162 unsigned long stride;
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700163
Eric Anholtd70d6052009-10-06 12:40:42 -0700164 time_t free_time;
Keith Packard329e0862008-06-05 16:05:35 -0700165
Eric Anholtd70d6052009-10-06 12:40:42 -0700166 /** Array passed to the DRM containing relocation information. */
167 struct drm_i915_gem_relocation_entry *relocs;
Jesse Barnesb5096402009-09-15 11:02:58 -0700168 /**
169 * Array of info structs corresponding to relocs[i].target_handle etc
170 */
171 drm_intel_reloc_target *reloc_target_info;
Eric Anholtd70d6052009-10-06 12:40:42 -0700172 /** Number of entries in relocs */
173 int reloc_count;
174 /** Mapped address for the buffer, saved across map/unmap cycles */
175 void *mem_virtual;
176 /** GTT virtual address for the buffer, saved across map/unmap cycles */
177 void *gtt_virtual;
Chris Wilsonc549a772011-12-05 10:14:34 +0000178 int map_count;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000179 drmMMListHead vma_list;
Eric Anholt0e867312008-10-21 00:10:54 -0700180
Eric Anholtd70d6052009-10-06 12:40:42 -0700181 /** BO cache list */
182 drmMMListHead head;
Eric Anholt0e867312008-10-21 00:10:54 -0700183
Eric Anholtd70d6052009-10-06 12:40:42 -0700184 /**
185 * Boolean of whether this BO and its children have been included in
186 * the current drm_intel_bufmgr_check_aperture_space() total.
187 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700188 bool included_in_check_aperture;
Eric Anholt0e867312008-10-21 00:10:54 -0700189
Eric Anholtd70d6052009-10-06 12:40:42 -0700190 /**
191 * Boolean of whether this buffer has been used as a relocation
192 * target and had its size accounted for, and thus can't have any
193 * further relocations added to it.
194 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700195 bool used_as_reloc_target;
Keith Packard5b5ce302009-05-11 13:42:12 -0700196
Eric Anholtd70d6052009-10-06 12:40:42 -0700197 /**
Chris Wilson792fed12009-12-02 13:12:39 +0000198 * Boolean of whether we have encountered an error whilst building the relocation tree.
199 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700200 bool has_error;
Chris Wilson792fed12009-12-02 13:12:39 +0000201
202 /**
Eric Anholtd70d6052009-10-06 12:40:42 -0700203 * Boolean of whether this buffer can be re-used
204 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700205 bool reusable;
Eric Anholtd70d6052009-10-06 12:40:42 -0700206
207 /**
208 * Size in bytes of this buffer and its relocation descendents.
209 *
210 * Used to avoid costly tree walking in
211 * drm_intel_bufmgr_check_aperture in the common case.
212 */
213 int reloc_tree_size;
214
215 /**
216 * Number of potential fence registers required by this buffer and its
217 * relocations.
218 */
219 int reloc_tree_fences;
Eric Anholt4cb01ee2011-10-28 13:12:16 -0700220
221 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
222 bool mapped_cpu_write;
Eric Anholt4db16a92011-10-11 15:59:03 -0700223
224 uint32_t aub_offset;
Paul Berryda02f722012-05-04 12:41:00 -0700225
226 drm_intel_aub_annotation *aub_annotations;
227 unsigned aub_annotation_count;
Keith Packarda919ff52008-06-05 15:58:09 -0700228};
Eric Anholt6a9eb082008-06-03 09:27:37 -0700229
Keith Packardb13f4e12008-11-21 01:49:39 -0800230static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700231drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800232
233static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700234drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800235
Eric Anholt6a9eb082008-06-03 09:27:37 -0700236static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700237drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
238 uint32_t * swizzle_mode);
Keith Packard18f091d2008-12-15 15:08:12 -0800239
240static int
Chris Wilson1db22ff2010-06-21 14:27:23 +0100241drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
242 uint32_t tiling_mode,
243 uint32_t stride);
Keith Packard18f091d2008-12-15 15:08:12 -0800244
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700245static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
246 time_t time);
Chris Wilson04495ee2009-10-02 04:39:22 +0100247
Eric Anholtd70d6052009-10-06 12:40:42 -0700248static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
Keith Packard18f091d2008-12-15 15:08:12 -0800249
Eric Anholtd70d6052009-10-06 12:40:42 -0700250static void drm_intel_gem_bo_free(drm_intel_bo *bo);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100251
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700252static unsigned long
253drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
254 uint32_t *tiling_mode)
255{
256 unsigned long min_size, max_size;
257 unsigned long i;
258
259 if (*tiling_mode == I915_TILING_NONE)
260 return size;
261
262 /* 965+ just need multiples of page size for tiling */
Eric Anholta1f9ea72010-03-02 08:49:36 -0800263 if (bufmgr_gem->gen >= 4)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700264 return ROUND_UP_TO(size, 4096);
265
266 /* Older chips need powers of two, of at least 512k or 1M */
Eric Anholtacbaff22010-03-02 15:24:50 -0800267 if (bufmgr_gem->gen == 3) {
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700268 min_size = 1024*1024;
269 max_size = 128*1024*1024;
270 } else {
271 min_size = 512*1024;
272 max_size = 64*1024*1024;
273 }
274
275 if (size > max_size) {
276 *tiling_mode = I915_TILING_NONE;
277 return size;
278 }
279
Chris Wilson36245772010-10-29 10:49:54 +0100280 /* Do we need to allocate every page for the fence? */
281 if (bufmgr_gem->has_relaxed_fencing)
282 return ROUND_UP_TO(size, 4096);
283
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700284 for (i = min_size; i < size; i <<= 1)
285 ;
286
287 return i;
288}
289
290/*
291 * Round a given pitch up to the minimum required for X tiling on a
292 * given chip. We use 512 as the minimum to allow for a later tiling
293 * change.
294 */
295static unsigned long
296drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
Chris Wilson726210f2010-06-24 11:38:00 +0100297 unsigned long pitch, uint32_t *tiling_mode)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700298{
Eric Anholt1d4d1e62010-03-04 16:09:40 -0800299 unsigned long tile_width;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700300 unsigned long i;
301
Eric Anholt7c697b12010-03-17 10:05:55 -0700302 /* If untiled, then just align it so that we can do rendering
303 * to it with the 3D engine.
304 */
Chris Wilson726210f2010-06-24 11:38:00 +0100305 if (*tiling_mode == I915_TILING_NONE)
Eric Anholt7c697b12010-03-17 10:05:55 -0700306 return ALIGN(pitch, 64);
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700307
Daniel Vetter194aa1b2011-09-22 22:20:53 +0200308 if (*tiling_mode == I915_TILING_X
Eric Anholt078bc5b2011-12-20 13:10:36 -0800309 || (IS_915(bufmgr_gem->pci_device)
310 && *tiling_mode == I915_TILING_Y))
Eric Anholt1d4d1e62010-03-04 16:09:40 -0800311 tile_width = 512;
312 else
313 tile_width = 128;
314
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700315 /* 965 is flexible */
Eric Anholta1f9ea72010-03-02 08:49:36 -0800316 if (bufmgr_gem->gen >= 4)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700317 return ROUND_UP_TO(pitch, tile_width);
318
Chris Wilson726210f2010-06-24 11:38:00 +0100319 /* The older hardware has a maximum pitch of 8192 with tiled
320 * surfaces, so fallback to untiled if it's too large.
321 */
322 if (pitch > 8192) {
323 *tiling_mode = I915_TILING_NONE;
324 return ALIGN(pitch, 64);
325 }
326
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700327 /* Pre-965 needs power of two tile width */
328 for (i = tile_width; i < pitch; i <<= 1)
329 ;
330
331 return i;
332}
333
Eric Anholt4b982642008-10-30 09:33:07 -0700334static struct drm_intel_gem_bo_bucket *
335drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
336 unsigned long size)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700337{
Eric Anholtd70d6052009-10-06 12:40:42 -0700338 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700339
Eric Anholt0ec768e2010-06-04 17:09:11 -0700340 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -0700341 struct drm_intel_gem_bo_bucket *bucket =
342 &bufmgr_gem->cache_bucket[i];
343 if (bucket->size >= size) {
344 return bucket;
345 }
Eric Anholt78fa5902009-07-06 11:55:28 -0700346 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700347
Eric Anholtd70d6052009-10-06 12:40:42 -0700348 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700349}
350
Eric Anholtd70d6052009-10-06 12:40:42 -0700351static void
352drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700353{
Eric Anholtd70d6052009-10-06 12:40:42 -0700354 int i, j;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700355
Eric Anholtd70d6052009-10-06 12:40:42 -0700356 for (i = 0; i < bufmgr_gem->exec_count; i++) {
357 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
358 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700359
Eric Anholtd70d6052009-10-06 12:40:42 -0700360 if (bo_gem->relocs == NULL) {
361 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
362 bo_gem->name);
363 continue;
364 }
365
366 for (j = 0; j < bo_gem->reloc_count; j++) {
Jesse Barnesb5096402009-09-15 11:02:58 -0700367 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700368 drm_intel_bo_gem *target_gem =
369 (drm_intel_bo_gem *) target_bo;
370
371 DBG("%2d: %d (%s)@0x%08llx -> "
372 "%d (%s)@0x%08lx + 0x%08x\n",
373 i,
374 bo_gem->gem_handle, bo_gem->name,
375 (unsigned long long)bo_gem->relocs[j].offset,
376 target_gem->gem_handle,
377 target_gem->name,
378 target_bo->offset,
379 bo_gem->relocs[j].delta);
380 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700381 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700382}
383
Chris Wilson9fec2a82009-12-02 10:42:51 +0000384static inline void
Chris Wilson04495ee2009-10-02 04:39:22 +0100385drm_intel_gem_bo_reference(drm_intel_bo *bo)
386{
Eric Anholtd70d6052009-10-06 12:40:42 -0700387 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Chris Wilson04495ee2009-10-02 04:39:22 +0100388
Eric Anholtd70d6052009-10-06 12:40:42 -0700389 atomic_inc(&bo_gem->refcount);
Chris Wilson04495ee2009-10-02 04:39:22 +0100390}
391
Eric Anholt6a9eb082008-06-03 09:27:37 -0700392/**
393 * Adds the given buffer to the list of buffers to be validated (moved into the
394 * appropriate memory type) with the next batch submission.
395 *
396 * If a buffer is validated multiple times in a batch submission, it ends up
397 * with the intersection of the memory type flags and the union of the
398 * access flags.
399 */
400static void
Eric Anholt4b982642008-10-30 09:33:07 -0700401drm_intel_add_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700402{
Eric Anholtd70d6052009-10-06 12:40:42 -0700403 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
404 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
405 int index;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700406
Eric Anholtd70d6052009-10-06 12:40:42 -0700407 if (bo_gem->validate_index != -1)
408 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700409
Eric Anholtd70d6052009-10-06 12:40:42 -0700410 /* Extend the array of validation entries as necessary. */
411 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
412 int new_size = bufmgr_gem->exec_size * 2;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700413
Eric Anholtd70d6052009-10-06 12:40:42 -0700414 if (new_size == 0)
415 new_size = 5;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700416
Eric Anholtd70d6052009-10-06 12:40:42 -0700417 bufmgr_gem->exec_objects =
418 realloc(bufmgr_gem->exec_objects,
419 sizeof(*bufmgr_gem->exec_objects) * new_size);
420 bufmgr_gem->exec_bos =
421 realloc(bufmgr_gem->exec_bos,
422 sizeof(*bufmgr_gem->exec_bos) * new_size);
423 bufmgr_gem->exec_size = new_size;
424 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700425
Eric Anholtd70d6052009-10-06 12:40:42 -0700426 index = bufmgr_gem->exec_count;
427 bo_gem->validate_index = index;
428 /* Fill in array entry */
429 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
430 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
431 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
432 bufmgr_gem->exec_objects[index].alignment = 0;
433 bufmgr_gem->exec_objects[index].offset = 0;
434 bufmgr_gem->exec_bos[index] = bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700435 bufmgr_gem->exec_count++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700436}
437
Jesse Barnesb5096402009-09-15 11:02:58 -0700438static void
439drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
440{
441 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
442 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
443 int index;
444
Eric Anholt47102862010-03-03 10:07:27 -0800445 if (bo_gem->validate_index != -1) {
446 if (need_fence)
447 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
448 EXEC_OBJECT_NEEDS_FENCE;
Jesse Barnesb5096402009-09-15 11:02:58 -0700449 return;
Eric Anholt47102862010-03-03 10:07:27 -0800450 }
Jesse Barnesb5096402009-09-15 11:02:58 -0700451
452 /* Extend the array of validation entries as necessary. */
453 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
454 int new_size = bufmgr_gem->exec_size * 2;
455
456 if (new_size == 0)
457 new_size = 5;
458
459 bufmgr_gem->exec2_objects =
460 realloc(bufmgr_gem->exec2_objects,
461 sizeof(*bufmgr_gem->exec2_objects) * new_size);
462 bufmgr_gem->exec_bos =
463 realloc(bufmgr_gem->exec_bos,
464 sizeof(*bufmgr_gem->exec_bos) * new_size);
465 bufmgr_gem->exec_size = new_size;
466 }
467
468 index = bufmgr_gem->exec_count;
469 bo_gem->validate_index = index;
470 /* Fill in array entry */
471 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
472 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
473 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
474 bufmgr_gem->exec2_objects[index].alignment = 0;
475 bufmgr_gem->exec2_objects[index].offset = 0;
476 bufmgr_gem->exec_bos[index] = bo;
477 bufmgr_gem->exec2_objects[index].flags = 0;
478 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
479 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
480 if (need_fence) {
481 bufmgr_gem->exec2_objects[index].flags |=
482 EXEC_OBJECT_NEEDS_FENCE;
483 }
484 bufmgr_gem->exec_count++;
485}
486
Eric Anholt6a9eb082008-06-03 09:27:37 -0700487#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
488 sizeof(uint32_t))
489
Chris Wilsone22fb792009-11-30 22:14:30 +0000490static void
491drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
492 drm_intel_bo_gem *bo_gem)
493{
494 int size;
495
496 assert(!bo_gem->used_as_reloc_target);
497
498 /* The older chipsets are far-less flexible in terms of tiling,
499 * and require tiled buffer to be size aligned in the aperture.
500 * This means that in the worst possible case we will need a hole
501 * twice as large as the object in order for it to fit into the
502 * aperture. Optimal packing is for wimps.
503 */
504 size = bo_gem->bo.size;
Chris Wilson51b89502010-11-22 09:50:06 +0000505 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
506 int min_size;
507
508 if (bufmgr_gem->has_relaxed_fencing) {
509 if (bufmgr_gem->gen == 3)
510 min_size = 1024*1024;
511 else
512 min_size = 512*1024;
513
514 while (min_size < size)
515 min_size *= 2;
516 } else
517 min_size = size;
518
519 /* Account for worst-case alignment. */
520 size = 2 * min_size;
521 }
Chris Wilsone22fb792009-11-30 22:14:30 +0000522
523 bo_gem->reloc_tree_size = size;
524}
525
Eric Anholt6a9eb082008-06-03 09:27:37 -0700526static int
Eric Anholt4b982642008-10-30 09:33:07 -0700527drm_intel_setup_reloc_list(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700528{
Eric Anholtd70d6052009-10-06 12:40:42 -0700529 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
530 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
531 unsigned int max_relocs = bufmgr_gem->max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700532
Eric Anholtd70d6052009-10-06 12:40:42 -0700533 if (bo->size / 4 < max_relocs)
534 max_relocs = bo->size / 4;
Eric Anholt3c9bd062009-10-05 16:35:32 -0700535
Eric Anholtd70d6052009-10-06 12:40:42 -0700536 bo_gem->relocs = malloc(max_relocs *
537 sizeof(struct drm_i915_gem_relocation_entry));
Jesse Barnesb5096402009-09-15 11:02:58 -0700538 bo_gem->reloc_target_info = malloc(max_relocs *
Chris Wilson35061732010-04-11 18:40:38 +0100539 sizeof(drm_intel_reloc_target));
Jesse Barnesb5096402009-09-15 11:02:58 -0700540 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700541 bo_gem->has_error = true;
Chris Wilson792fed12009-12-02 13:12:39 +0000542
543 free (bo_gem->relocs);
544 bo_gem->relocs = NULL;
545
Jesse Barnesb5096402009-09-15 11:02:58 -0700546 free (bo_gem->reloc_target_info);
547 bo_gem->reloc_target_info = NULL;
Chris Wilson792fed12009-12-02 13:12:39 +0000548
549 return 1;
550 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700551
Eric Anholtd70d6052009-10-06 12:40:42 -0700552 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700553}
554
Eric Anholt8214a652009-08-27 18:32:07 -0700555static int
556drm_intel_gem_bo_busy(drm_intel_bo *bo)
557{
Eric Anholtd70d6052009-10-06 12:40:42 -0700558 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
559 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
560 struct drm_i915_gem_busy busy;
561 int ret;
Eric Anholt8214a652009-08-27 18:32:07 -0700562
Chris Wilson90b23cc2012-02-09 10:23:10 +0000563 VG_CLEAR(busy);
Eric Anholtd70d6052009-10-06 12:40:42 -0700564 busy.handle = bo_gem->gem_handle;
Eric Anholt8214a652009-08-27 18:32:07 -0700565
Chris Wilson62997222010-09-25 21:32:59 +0100566 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
Eric Anholt8214a652009-08-27 18:32:07 -0700567
Eric Anholtd70d6052009-10-06 12:40:42 -0700568 return (ret == 0 && busy.busy);
Eric Anholt8214a652009-08-27 18:32:07 -0700569}
570
Chris Wilson0fb215a2009-10-02 04:31:34 +0100571static int
Chris Wilson83a35b62009-11-11 13:04:38 +0000572drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
573 drm_intel_bo_gem *bo_gem, int state)
Chris Wilson0fb215a2009-10-02 04:31:34 +0100574{
Eric Anholtd70d6052009-10-06 12:40:42 -0700575 struct drm_i915_gem_madvise madv;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100576
Chris Wilson90b23cc2012-02-09 10:23:10 +0000577 VG_CLEAR(madv);
Eric Anholtd70d6052009-10-06 12:40:42 -0700578 madv.handle = bo_gem->gem_handle;
579 madv.madv = state;
580 madv.retained = 1;
Chris Wilson62997222010-09-25 21:32:59 +0100581 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100582
Eric Anholtd70d6052009-10-06 12:40:42 -0700583 return madv.retained;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100584}
585
Chris Wilson83a35b62009-11-11 13:04:38 +0000586static int
587drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
588{
589 return drm_intel_gem_bo_madvise_internal
590 ((drm_intel_bufmgr_gem *) bo->bufmgr,
591 (drm_intel_bo_gem *) bo,
592 madv);
593}
594
Chris Wilson0fb215a2009-10-02 04:31:34 +0100595/* drop the oldest entries that have been purged by the kernel */
596static void
597drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
598 struct drm_intel_gem_bo_bucket *bucket)
599{
Eric Anholtd70d6052009-10-06 12:40:42 -0700600 while (!DRMLISTEMPTY(&bucket->head)) {
601 drm_intel_bo_gem *bo_gem;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100602
Eric Anholtd70d6052009-10-06 12:40:42 -0700603 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
604 bucket->head.next, head);
Chris Wilson83a35b62009-11-11 13:04:38 +0000605 if (drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700606 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
607 break;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100608
Eric Anholtd70d6052009-10-06 12:40:42 -0700609 DRMLISTDEL(&bo_gem->head);
610 drm_intel_gem_bo_free(&bo_gem->bo);
611 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100612}
613
Eric Anholt4b982642008-10-30 09:33:07 -0700614static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700615drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
616 const char *name,
617 unsigned long size,
Chris Wilson1db22ff2010-06-21 14:27:23 +0100618 unsigned long flags,
619 uint32_t tiling_mode,
620 unsigned long stride)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700621{
Eric Anholtd70d6052009-10-06 12:40:42 -0700622 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
623 drm_intel_bo_gem *bo_gem;
624 unsigned int page_size = getpagesize();
625 int ret;
626 struct drm_intel_gem_bo_bucket *bucket;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700627 bool alloc_from_cache;
Eric Anholtd70d6052009-10-06 12:40:42 -0700628 unsigned long bo_size;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700629 bool for_render = false;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700630
631 if (flags & BO_ALLOC_FOR_RENDER)
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700632 for_render = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700633
Eric Anholtd70d6052009-10-06 12:40:42 -0700634 /* Round the allocated size up to a power of two number of pages. */
635 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700636
Eric Anholtd70d6052009-10-06 12:40:42 -0700637 /* If we don't have caching at this size, don't actually round the
638 * allocation up.
639 */
640 if (bucket == NULL) {
641 bo_size = size;
642 if (bo_size < page_size)
643 bo_size = page_size;
Eric Anholt72abe982009-02-18 13:06:35 -0800644 } else {
Eric Anholtd70d6052009-10-06 12:40:42 -0700645 bo_size = bucket->size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700646 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100647
Eric Anholtd70d6052009-10-06 12:40:42 -0700648 pthread_mutex_lock(&bufmgr_gem->lock);
649 /* Get a buffer out of the cache if available */
650retry:
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700651 alloc_from_cache = false;
Eric Anholtd70d6052009-10-06 12:40:42 -0700652 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
653 if (for_render) {
654 /* Allocate new render-target BOs from the tail (MRU)
655 * of the list, as it will likely be hot in the GPU
656 * cache and in the aperture for us.
657 */
658 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
659 bucket->head.prev, head);
660 DRMLISTDEL(&bo_gem->head);
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700661 alloc_from_cache = true;
Eric Anholtd70d6052009-10-06 12:40:42 -0700662 } else {
663 /* For non-render-target BOs (where we're probably
664 * going to map it first thing in order to fill it
665 * with data), check if the last BO in the cache is
666 * unbusy, and only reuse in that case. Otherwise,
667 * allocating a new buffer is probably faster than
668 * waiting for the GPU to finish.
669 */
670 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
671 bucket->head.next, head);
672 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700673 alloc_from_cache = true;
Eric Anholtd70d6052009-10-06 12:40:42 -0700674 DRMLISTDEL(&bo_gem->head);
675 }
676 }
677
678 if (alloc_from_cache) {
Chris Wilson83a35b62009-11-11 13:04:38 +0000679 if (!drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700680 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
681 drm_intel_gem_bo_free(&bo_gem->bo);
682 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
683 bucket);
684 goto retry;
685 }
Chris Wilson1db22ff2010-06-21 14:27:23 +0100686
687 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
688 tiling_mode,
689 stride)) {
690 drm_intel_gem_bo_free(&bo_gem->bo);
691 goto retry;
692 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700693 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100694 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700695 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700696
Eric Anholtd70d6052009-10-06 12:40:42 -0700697 if (!alloc_from_cache) {
698 struct drm_i915_gem_create create;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700699
Eric Anholtd70d6052009-10-06 12:40:42 -0700700 bo_gem = calloc(1, sizeof(*bo_gem));
701 if (!bo_gem)
702 return NULL;
Keith Packarda919ff52008-06-05 15:58:09 -0700703
Eric Anholtd70d6052009-10-06 12:40:42 -0700704 bo_gem->bo.size = bo_size;
Chris Wilson90b23cc2012-02-09 10:23:10 +0000705
706 VG_CLEAR(create);
Eric Anholtd70d6052009-10-06 12:40:42 -0700707 create.size = bo_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700708
Chris Wilson62997222010-09-25 21:32:59 +0100709 ret = drmIoctl(bufmgr_gem->fd,
710 DRM_IOCTL_I915_GEM_CREATE,
711 &create);
Eric Anholtd70d6052009-10-06 12:40:42 -0700712 bo_gem->gem_handle = create.handle;
713 bo_gem->bo.handle = bo_gem->gem_handle;
714 if (ret != 0) {
715 free(bo_gem);
716 return NULL;
717 }
718 bo_gem->bo.bufmgr = bufmgr;
Chris Wilson1db22ff2010-06-21 14:27:23 +0100719
720 bo_gem->tiling_mode = I915_TILING_NONE;
721 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
722 bo_gem->stride = 0;
723
724 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
725 tiling_mode,
726 stride)) {
727 drm_intel_gem_bo_free(&bo_gem->bo);
728 return NULL;
729 }
Chris Wilson36d49392011-02-14 09:39:06 +0000730
731 DRMINITLISTHEAD(&bo_gem->name_list);
Chris Wilsone4b60f22011-12-05 21:29:05 +0000732 DRMINITLISTHEAD(&bo_gem->vma_list);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700733 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700734
Eric Anholtd70d6052009-10-06 12:40:42 -0700735 bo_gem->name = name;
736 atomic_set(&bo_gem->refcount, 1);
737 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700738 bo_gem->reloc_tree_fences = 0;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700739 bo_gem->used_as_reloc_target = false;
740 bo_gem->has_error = false;
741 bo_gem->reusable = true;
Paul Berryda02f722012-05-04 12:41:00 -0700742 bo_gem->aub_annotations = NULL;
743 bo_gem->aub_annotation_count = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700744
Chris Wilsone22fb792009-11-30 22:14:30 +0000745 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
746
Eric Anholtd70d6052009-10-06 12:40:42 -0700747 DBG("bo_create: buf %d (%s) %ldb\n",
748 bo_gem->gem_handle, bo_gem->name, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700749
Eric Anholtd70d6052009-10-06 12:40:42 -0700750 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700751}
752
Eric Anholt72abe982009-02-18 13:06:35 -0800753static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700754drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
755 const char *name,
756 unsigned long size,
757 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800758{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700759 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
Chris Wilson1db22ff2010-06-21 14:27:23 +0100760 BO_ALLOC_FOR_RENDER,
761 I915_TILING_NONE, 0);
Eric Anholt72abe982009-02-18 13:06:35 -0800762}
763
764static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700765drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
766 const char *name,
767 unsigned long size,
768 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800769{
Chris Wilson1db22ff2010-06-21 14:27:23 +0100770 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
771 I915_TILING_NONE, 0);
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700772}
773
774static drm_intel_bo *
775drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
776 int x, int y, int cpp, uint32_t *tiling_mode,
777 unsigned long *pitch, unsigned long flags)
778{
779 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
Chris Wilsone65caeb2010-06-09 10:08:41 +0100780 unsigned long size, stride;
781 uint32_t tiling;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700782
Chris Wilsone65caeb2010-06-09 10:08:41 +0100783 do {
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100784 unsigned long aligned_y, height_alignment;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700785
Chris Wilsone65caeb2010-06-09 10:08:41 +0100786 tiling = *tiling_mode;
787
788 /* If we're tiled, our allocations are in 8 or 32-row blocks,
789 * so failure to align our height means that we won't allocate
790 * enough pages.
791 *
792 * If we're untiled, we still have to align to 2 rows high
793 * because the data port accesses 2x2 blocks even if the
794 * bottom row isn't to be rendered, so failure to align means
795 * we could walk off the end of the GTT and fault. This is
796 * documented on 965, and may be the case on older chipsets
797 * too so we try to be careful.
798 */
799 aligned_y = y;
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100800 height_alignment = 2;
801
Eric Anholt078bc5b2011-12-20 13:10:36 -0800802 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
Daniel Vetter06ebbf72011-03-26 15:04:04 +0100803 height_alignment = 16;
Daniel Vetter194aa1b2011-09-22 22:20:53 +0200804 else if (tiling == I915_TILING_X
Eric Anholt078bc5b2011-12-20 13:10:36 -0800805 || (IS_915(bufmgr_gem->pci_device)
806 && tiling == I915_TILING_Y))
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100807 height_alignment = 8;
Chris Wilsone65caeb2010-06-09 10:08:41 +0100808 else if (tiling == I915_TILING_Y)
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100809 height_alignment = 32;
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100810 aligned_y = ALIGN(y, height_alignment);
Chris Wilsone65caeb2010-06-09 10:08:41 +0100811
812 stride = x * cpp;
Chris Wilson726210f2010-06-24 11:38:00 +0100813 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
Chris Wilsone65caeb2010-06-09 10:08:41 +0100814 size = stride * aligned_y;
815 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
816 } while (*tiling_mode != tiling);
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100817 *pitch = stride;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700818
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100819 if (tiling == I915_TILING_NONE)
Chris Wilson5eec2862010-06-21 14:20:56 +0100820 stride = 0;
821
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100822 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
823 tiling, stride);
Eric Anholt72abe982009-02-18 13:06:35 -0800824}
825
Eric Anholt6a9eb082008-06-03 09:27:37 -0700826/**
Eric Anholt4b982642008-10-30 09:33:07 -0700827 * Returns a drm_intel_bo wrapping the given buffer object handle.
Eric Anholt6a9eb082008-06-03 09:27:37 -0700828 *
829 * This can be used when one application needs to pass a buffer object
830 * to another.
831 */
Eric Anholt4b982642008-10-30 09:33:07 -0700832drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700833drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
834 const char *name,
Eric Anholt4b982642008-10-30 09:33:07 -0700835 unsigned int handle)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700836{
Eric Anholtd70d6052009-10-06 12:40:42 -0700837 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
838 drm_intel_bo_gem *bo_gem;
839 int ret;
840 struct drm_gem_open open_arg;
841 struct drm_i915_gem_get_tiling get_tiling;
Chris Wilson36d49392011-02-14 09:39:06 +0000842 drmMMListHead *list;
843
844 /* At the moment most applications only have a few named bo.
845 * For instance, in a DRI client only the render buffers passed
846 * between X and the client are named. And since X returns the
847 * alternating names for the front/back buffer a linear search
848 * provides a sufficiently fast match.
849 */
850 for (list = bufmgr_gem->named.next;
851 list != &bufmgr_gem->named;
852 list = list->next) {
853 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
854 if (bo_gem->global_name == handle) {
855 drm_intel_gem_bo_reference(&bo_gem->bo);
856 return &bo_gem->bo;
857 }
858 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700859
Eric Anholtd70d6052009-10-06 12:40:42 -0700860 bo_gem = calloc(1, sizeof(*bo_gem));
861 if (!bo_gem)
862 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700863
Chris Wilson90b23cc2012-02-09 10:23:10 +0000864 VG_CLEAR(open_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -0700865 open_arg.name = handle;
Chris Wilson62997222010-09-25 21:32:59 +0100866 ret = drmIoctl(bufmgr_gem->fd,
867 DRM_IOCTL_GEM_OPEN,
868 &open_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -0700869 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +0100870 DBG("Couldn't reference %s handle 0x%08x: %s\n",
871 name, handle, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -0700872 free(bo_gem);
873 return NULL;
874 }
875 bo_gem->bo.size = open_arg.size;
876 bo_gem->bo.offset = 0;
877 bo_gem->bo.virtual = NULL;
878 bo_gem->bo.bufmgr = bufmgr;
879 bo_gem->name = name;
880 atomic_set(&bo_gem->refcount, 1);
881 bo_gem->validate_index = -1;
882 bo_gem->gem_handle = open_arg.handle;
Chris Wilson53581b62011-02-14 09:27:05 +0000883 bo_gem->bo.handle = open_arg.handle;
Eric Anholtd70d6052009-10-06 12:40:42 -0700884 bo_gem->global_name = handle;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700885 bo_gem->reusable = false;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700886
Chris Wilson90b23cc2012-02-09 10:23:10 +0000887 VG_CLEAR(get_tiling);
Eric Anholtd70d6052009-10-06 12:40:42 -0700888 get_tiling.handle = bo_gem->gem_handle;
Chris Wilson62997222010-09-25 21:32:59 +0100889 ret = drmIoctl(bufmgr_gem->fd,
890 DRM_IOCTL_I915_GEM_GET_TILING,
891 &get_tiling);
Eric Anholtd70d6052009-10-06 12:40:42 -0700892 if (ret != 0) {
893 drm_intel_gem_bo_unreference(&bo_gem->bo);
894 return NULL;
895 }
896 bo_gem->tiling_mode = get_tiling.tiling_mode;
897 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
Chris Wilson056aa9b2010-06-21 14:31:29 +0100898 /* XXX stride is unknown */
Chris Wilsone22fb792009-11-30 22:14:30 +0000899 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
Keith Packard18f091d2008-12-15 15:08:12 -0800900
Chris Wilsone4b60f22011-12-05 21:29:05 +0000901 DRMINITLISTHEAD(&bo_gem->vma_list);
Chris Wilson36d49392011-02-14 09:39:06 +0000902 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
Eric Anholtd70d6052009-10-06 12:40:42 -0700903 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700904
Eric Anholtd70d6052009-10-06 12:40:42 -0700905 return &bo_gem->bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700906}
907
908static void
Eric Anholt4b982642008-10-30 09:33:07 -0700909drm_intel_gem_bo_free(drm_intel_bo *bo)
Eric Anholt500c81d2008-06-06 17:13:16 -0700910{
Eric Anholtd70d6052009-10-06 12:40:42 -0700911 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
912 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
913 struct drm_gem_close close;
914 int ret;
Eric Anholt500c81d2008-06-06 17:13:16 -0700915
Chris Wilsone4b60f22011-12-05 21:29:05 +0000916 DRMLISTDEL(&bo_gem->vma_list);
917 if (bo_gem->mem_virtual) {
Chris Wilson90b23cc2012-02-09 10:23:10 +0000918 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
Chris Wilsone4b60f22011-12-05 21:29:05 +0000919 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
920 bufmgr_gem->vma_count--;
921 }
922 if (bo_gem->gtt_virtual) {
923 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
924 bufmgr_gem->vma_count--;
925 }
926
Eric Anholtd70d6052009-10-06 12:40:42 -0700927 /* Close this object */
Chris Wilson90b23cc2012-02-09 10:23:10 +0000928 VG_CLEAR(close);
Eric Anholtd70d6052009-10-06 12:40:42 -0700929 close.handle = bo_gem->gem_handle;
Chris Wilson62997222010-09-25 21:32:59 +0100930 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
Eric Anholtd70d6052009-10-06 12:40:42 -0700931 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +0100932 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
933 bo_gem->gem_handle, bo_gem->name, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -0700934 }
Paul Berryda02f722012-05-04 12:41:00 -0700935 free(bo_gem->aub_annotations);
Eric Anholtd70d6052009-10-06 12:40:42 -0700936 free(bo);
Eric Anholt500c81d2008-06-06 17:13:16 -0700937}
938
Chris Wilson23eeb7e2012-02-09 10:29:22 +0000939static void
940drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
941{
942#if HAVE_VALGRIND
943 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
944
945 if (bo_gem->mem_virtual)
946 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
947
948 if (bo_gem->gtt_virtual)
949 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
950#endif
951}
952
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700953/** Frees all cached buffers significantly older than @time. */
954static void
955drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
956{
Chris Wilson04495ee2009-10-02 04:39:22 +0100957 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700958
Chris Wilsonf16b4162010-06-21 15:21:48 +0100959 if (bufmgr_gem->time == time)
960 return;
961
Eric Anholt0ec768e2010-06-04 17:09:11 -0700962 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -0700963 struct drm_intel_gem_bo_bucket *bucket =
964 &bufmgr_gem->cache_bucket[i];
Chris Wilson04495ee2009-10-02 04:39:22 +0100965
Eric Anholtd70d6052009-10-06 12:40:42 -0700966 while (!DRMLISTEMPTY(&bucket->head)) {
967 drm_intel_bo_gem *bo_gem;
Chris Wilson04495ee2009-10-02 04:39:22 +0100968
Eric Anholtd70d6052009-10-06 12:40:42 -0700969 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
970 bucket->head.next, head);
971 if (time - bo_gem->free_time <= 1)
972 break;
Chris Wilson04495ee2009-10-02 04:39:22 +0100973
Eric Anholtd70d6052009-10-06 12:40:42 -0700974 DRMLISTDEL(&bo_gem->head);
Chris Wilson04495ee2009-10-02 04:39:22 +0100975
Eric Anholtd70d6052009-10-06 12:40:42 -0700976 drm_intel_gem_bo_free(&bo_gem->bo);
977 }
978 }
Chris Wilsonf16b4162010-06-21 15:21:48 +0100979
980 bufmgr_gem->time = time;
Chris Wilson04495ee2009-10-02 04:39:22 +0100981}
982
Chris Wilsone4b60f22011-12-05 21:29:05 +0000983static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
984{
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000985 int limit;
986
987 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
988 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
Chris Wilsone4b60f22011-12-05 21:29:05 +0000989
990 if (bufmgr_gem->vma_max < 0)
991 return;
992
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000993 /* We may need to evict a few entries in order to create new mmaps */
994 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
995 if (limit < 0)
996 limit = 0;
997
998 while (bufmgr_gem->vma_count > limit) {
Chris Wilsone4b60f22011-12-05 21:29:05 +0000999 drm_intel_bo_gem *bo_gem;
1000
1001 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1002 bufmgr_gem->vma_cache.next,
1003 vma_list);
1004 assert(bo_gem->map_count == 0);
Chris Wilson0ab22512011-12-14 08:20:10 +00001005 DRMLISTDELINIT(&bo_gem->vma_list);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001006
1007 if (bo_gem->mem_virtual) {
1008 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1009 bo_gem->mem_virtual = NULL;
1010 bufmgr_gem->vma_count--;
1011 }
1012 if (bo_gem->gtt_virtual) {
1013 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1014 bo_gem->gtt_virtual = NULL;
1015 bufmgr_gem->vma_count--;
1016 }
1017 }
1018}
1019
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001020static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1021 drm_intel_bo_gem *bo_gem)
Chris Wilsone4b60f22011-12-05 21:29:05 +00001022{
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001023 bufmgr_gem->vma_open--;
Chris Wilsone4b60f22011-12-05 21:29:05 +00001024 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1025 if (bo_gem->mem_virtual)
1026 bufmgr_gem->vma_count++;
1027 if (bo_gem->gtt_virtual)
1028 bufmgr_gem->vma_count++;
1029 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1030}
1031
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001032static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1033 drm_intel_bo_gem *bo_gem)
Chris Wilsone4b60f22011-12-05 21:29:05 +00001034{
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001035 bufmgr_gem->vma_open++;
Chris Wilsone4b60f22011-12-05 21:29:05 +00001036 DRMLISTDEL(&bo_gem->vma_list);
1037 if (bo_gem->mem_virtual)
1038 bufmgr_gem->vma_count--;
1039 if (bo_gem->gtt_virtual)
1040 bufmgr_gem->vma_count--;
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001041 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001042}
1043
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001044static void
1045drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
Chris Wilson04495ee2009-10-02 04:39:22 +01001046{
Eric Anholtd70d6052009-10-06 12:40:42 -07001047 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1048 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1049 struct drm_intel_gem_bo_bucket *bucket;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001050 int i;
Chris Wilson04495ee2009-10-02 04:39:22 +01001051
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001052 /* Unreference all the target buffers */
1053 for (i = 0; i < bo_gem->reloc_count; i++) {
Eric Anholt4f7704a2010-06-10 08:58:08 -07001054 if (bo_gem->reloc_target_info[i].bo != bo) {
1055 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1056 reloc_target_info[i].bo,
1057 time);
1058 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001059 }
Chris Wilsonb666f412009-11-30 23:07:19 +00001060 bo_gem->reloc_count = 0;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001061 bo_gem->used_as_reloc_target = false;
Eric Anholtd70d6052009-10-06 12:40:42 -07001062
1063 DBG("bo_unreference final: %d (%s)\n",
1064 bo_gem->gem_handle, bo_gem->name);
1065
Chris Wilson57473c72009-12-02 13:36:22 +00001066 /* release memory associated with this object */
Jesse Barnesb5096402009-09-15 11:02:58 -07001067 if (bo_gem->reloc_target_info) {
1068 free(bo_gem->reloc_target_info);
1069 bo_gem->reloc_target_info = NULL;
Chris Wilson57473c72009-12-02 13:36:22 +00001070 }
1071 if (bo_gem->relocs) {
1072 free(bo_gem->relocs);
1073 bo_gem->relocs = NULL;
1074 }
1075
Chris Wilson5c5332b2011-12-05 10:39:49 +00001076 /* Clear any left-over mappings */
1077 if (bo_gem->map_count) {
1078 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1079 bo_gem->map_count = 0;
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001080 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001081 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
Chris Wilson5c5332b2011-12-05 10:39:49 +00001082 }
Chris Wilson5c5332b2011-12-05 10:39:49 +00001083
Chris Wilson36d49392011-02-14 09:39:06 +00001084 DRMLISTDEL(&bo_gem->name_list);
1085
Eric Anholtd70d6052009-10-06 12:40:42 -07001086 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1087 /* Put the buffer into our internal cache for reuse if we can. */
Eric Anholtd70d6052009-10-06 12:40:42 -07001088 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
Chris Wilson60aa8032009-11-30 20:02:05 +00001089 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1090 I915_MADV_DONTNEED)) {
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001091 bo_gem->free_time = time;
Eric Anholtd70d6052009-10-06 12:40:42 -07001092
1093 bo_gem->name = NULL;
1094 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001095
1096 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
Eric Anholtd70d6052009-10-06 12:40:42 -07001097 } else {
1098 drm_intel_gem_bo_free(bo);
1099 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001100}
1101
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001102static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1103 time_t time)
1104{
1105 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1106
1107 assert(atomic_read(&bo_gem->refcount) > 0);
Eric Anholtd70d6052009-10-06 12:40:42 -07001108 if (atomic_dec_and_test(&bo_gem->refcount))
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001109 drm_intel_gem_bo_unreference_final(bo, time);
Eric Anholtd70d6052009-10-06 12:40:42 -07001110}
1111
1112static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1113{
1114 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1115
1116 assert(atomic_read(&bo_gem->refcount) > 0);
1117 if (atomic_dec_and_test(&bo_gem->refcount)) {
1118 drm_intel_bufmgr_gem *bufmgr_gem =
1119 (drm_intel_bufmgr_gem *) bo->bufmgr;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001120 struct timespec time;
1121
1122 clock_gettime(CLOCK_MONOTONIC, &time);
1123
Eric Anholtd70d6052009-10-06 12:40:42 -07001124 pthread_mutex_lock(&bufmgr_gem->lock);
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001125 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
Chris Wilsonf16b4162010-06-21 15:21:48 +01001126 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
Eric Anholtd70d6052009-10-06 12:40:42 -07001127 pthread_mutex_unlock(&bufmgr_gem->lock);
1128 }
1129}
1130
1131static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1132{
1133 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1134 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1135 struct drm_i915_gem_set_domain set_domain;
1136 int ret;
1137
Chris Wilsona3305b02010-05-13 08:24:28 +01001138 pthread_mutex_lock(&bufmgr_gem->lock);
1139
Chris Wilsone4b60f22011-12-05 21:29:05 +00001140 if (bo_gem->map_count++ == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001141 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001142
Eric Anholtd70d6052009-10-06 12:40:42 -07001143 if (!bo_gem->mem_virtual) {
1144 struct drm_i915_gem_mmap mmap_arg;
Carl Worthafd245d2009-04-29 14:43:55 -07001145
Chris Wilson015286f2011-12-11 17:35:06 +00001146 DBG("bo_map: %d (%s), map_count=%d\n",
1147 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
Eric Anholtd70d6052009-10-06 12:40:42 -07001148
Chris Wilson90b23cc2012-02-09 10:23:10 +00001149 VG_CLEAR(mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001150 mmap_arg.handle = bo_gem->gem_handle;
1151 mmap_arg.offset = 0;
1152 mmap_arg.size = bo->size;
Chris Wilson62997222010-09-25 21:32:59 +01001153 ret = drmIoctl(bufmgr_gem->fd,
1154 DRM_IOCTL_I915_GEM_MMAP,
1155 &mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001156 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001157 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001158 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1159 __FILE__, __LINE__, bo_gem->gem_handle,
1160 bo_gem->name, strerror(errno));
Chris Wilsone4b60f22011-12-05 21:29:05 +00001161 if (--bo_gem->map_count == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001162 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsona3305b02010-05-13 08:24:28 +01001163 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtd70d6052009-10-06 12:40:42 -07001164 return ret;
1165 }
Chris Wilson90b23cc2012-02-09 10:23:10 +00001166 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
Eric Anholtd70d6052009-10-06 12:40:42 -07001167 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1168 }
1169 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1170 bo_gem->mem_virtual);
1171 bo->virtual = bo_gem->mem_virtual;
1172
Chris Wilson90b23cc2012-02-09 10:23:10 +00001173 VG_CLEAR(set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001174 set_domain.handle = bo_gem->gem_handle;
1175 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1176 if (write_enable)
1177 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1178 else
1179 set_domain.write_domain = 0;
Chris Wilson62997222010-09-25 21:32:59 +01001180 ret = drmIoctl(bufmgr_gem->fd,
1181 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1182 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001183 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001184 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1185 __FILE__, __LINE__, bo_gem->gem_handle,
1186 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001187 }
1188
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001189 if (write_enable)
1190 bo_gem->mapped_cpu_write = true;
1191
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001192 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1193 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
Chris Wilsona3305b02010-05-13 08:24:28 +01001194 pthread_mutex_unlock(&bufmgr_gem->lock);
1195
Eric Anholtd70d6052009-10-06 12:40:42 -07001196 return 0;
1197}
1198
Eric Anholt99c73372012-02-10 04:12:15 -08001199static int
1200map_gtt(drm_intel_bo *bo)
Eric Anholtd70d6052009-10-06 12:40:42 -07001201{
1202 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1203 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholtd70d6052009-10-06 12:40:42 -07001204 int ret;
1205
Chris Wilsone4b60f22011-12-05 21:29:05 +00001206 if (bo_gem->map_count++ == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001207 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001208
Eric Anholtd70d6052009-10-06 12:40:42 -07001209 /* Get a mapping of the buffer if we haven't before. */
1210 if (bo_gem->gtt_virtual == NULL) {
1211 struct drm_i915_gem_mmap_gtt mmap_arg;
1212
Chris Wilson015286f2011-12-11 17:35:06 +00001213 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1214 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
Eric Anholtd70d6052009-10-06 12:40:42 -07001215
Chris Wilson90b23cc2012-02-09 10:23:10 +00001216 VG_CLEAR(mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001217 mmap_arg.handle = bo_gem->gem_handle;
1218
1219 /* Get the fake offset back... */
Chris Wilson62997222010-09-25 21:32:59 +01001220 ret = drmIoctl(bufmgr_gem->fd,
1221 DRM_IOCTL_I915_GEM_MMAP_GTT,
1222 &mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001223 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001224 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001225 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1226 __FILE__, __LINE__,
1227 bo_gem->gem_handle, bo_gem->name,
1228 strerror(errno));
Chris Wilsonc5f0ed12011-12-13 10:30:54 +00001229 if (--bo_gem->map_count == 0)
1230 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Eric Anholtd70d6052009-10-06 12:40:42 -07001231 return ret;
1232 }
1233
1234 /* and mmap it */
1235 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1236 MAP_SHARED, bufmgr_gem->fd,
1237 mmap_arg.offset);
1238 if (bo_gem->gtt_virtual == MAP_FAILED) {
Chris Wilson08371bc2009-12-08 22:35:24 +00001239 bo_gem->gtt_virtual = NULL;
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001240 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001241 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1242 __FILE__, __LINE__,
1243 bo_gem->gem_handle, bo_gem->name,
1244 strerror(errno));
Chris Wilsone4b60f22011-12-05 21:29:05 +00001245 if (--bo_gem->map_count == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001246 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001247 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -07001248 }
1249 }
1250
1251 bo->virtual = bo_gem->gtt_virtual;
1252
1253 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1254 bo_gem->gtt_virtual);
1255
Eric Anholt99c73372012-02-10 04:12:15 -08001256 return 0;
1257}
1258
1259int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1260{
1261 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1262 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1263 struct drm_i915_gem_set_domain set_domain;
1264 int ret;
1265
1266 pthread_mutex_lock(&bufmgr_gem->lock);
1267
1268 ret = map_gtt(bo);
1269 if (ret) {
1270 pthread_mutex_unlock(&bufmgr_gem->lock);
1271 return ret;
1272 }
1273
1274 /* Now move it to the GTT domain so that the GPU and CPU
1275 * caches are flushed and the GPU isn't actively using the
1276 * buffer.
1277 *
1278 * The pagefault handler does this domain change for us when
1279 * it has unbound the BO from the GTT, but it's up to us to
1280 * tell it when we're about to use things if we had done
1281 * rendering and it still happens to be bound to the GTT.
1282 */
Chris Wilson90b23cc2012-02-09 10:23:10 +00001283 VG_CLEAR(set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001284 set_domain.handle = bo_gem->gem_handle;
1285 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1286 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilson62997222010-09-25 21:32:59 +01001287 ret = drmIoctl(bufmgr_gem->fd,
1288 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1289 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001290 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001291 DBG("%s:%d: Error setting domain %d: %s\n",
1292 __FILE__, __LINE__, bo_gem->gem_handle,
1293 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001294 }
1295
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001296 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1297 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
Chris Wilsona3305b02010-05-13 08:24:28 +01001298 pthread_mutex_unlock(&bufmgr_gem->lock);
1299
Chris Wilsonc3ddfea2010-06-29 20:12:44 +01001300 return 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001301}
1302
Eric Anholt99c73372012-02-10 04:12:15 -08001303/**
1304 * Performs a mapping of the buffer object like the normal GTT
1305 * mapping, but avoids waiting for the GPU to be done reading from or
1306 * rendering to the buffer.
1307 *
1308 * This is used in the implementation of GL_ARB_map_buffer_range: The
1309 * user asks to create a buffer, then does a mapping, fills some
1310 * space, runs a drawing command, then asks to map it again without
1311 * synchronizing because it guarantees that it won't write over the
1312 * data that the GPU is busy using (or, more specifically, that if it
1313 * does write over the data, it acknowledges that rendering is
1314 * undefined).
1315 */
1316
1317int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1318{
1319 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1320 int ret;
1321
1322 /* If the CPU cache isn't coherent with the GTT, then use a
1323 * regular synchronized mapping. The problem is that we don't
1324 * track where the buffer was last used on the CPU side in
1325 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1326 * we would potentially corrupt the buffer even when the user
1327 * does reasonable things.
1328 */
1329 if (!bufmgr_gem->has_llc)
1330 return drm_intel_gem_bo_map_gtt(bo);
1331
1332 pthread_mutex_lock(&bufmgr_gem->lock);
1333 ret = map_gtt(bo);
1334 pthread_mutex_unlock(&bufmgr_gem->lock);
1335
1336 return ret;
1337}
1338
Eric Anholtd70d6052009-10-06 12:40:42 -07001339static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1340{
1341 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1342 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001343 int ret = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001344
1345 if (bo == NULL)
1346 return 0;
1347
Chris Wilsona3305b02010-05-13 08:24:28 +01001348 pthread_mutex_lock(&bufmgr_gem->lock);
1349
Chris Wilson015286f2011-12-11 17:35:06 +00001350 if (bo_gem->map_count <= 0) {
1351 DBG("attempted to unmap an unmapped bo\n");
1352 pthread_mutex_unlock(&bufmgr_gem->lock);
1353 /* Preserve the old behaviour of just treating this as a
1354 * no-op rather than reporting the error.
1355 */
1356 return 0;
1357 }
Chris Wilsone4b60f22011-12-05 21:29:05 +00001358
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001359 if (bo_gem->mapped_cpu_write) {
Chris Wilson90b23cc2012-02-09 10:23:10 +00001360 struct drm_i915_gem_sw_finish sw_finish;
1361
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001362 /* Cause a flush to happen if the buffer's pinned for
1363 * scanout, so the results show up in a timely manner.
1364 * Unlike GTT set domains, this only does work if the
1365 * buffer should be scanout-related.
1366 */
Chris Wilson90b23cc2012-02-09 10:23:10 +00001367 VG_CLEAR(sw_finish);
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001368 sw_finish.handle = bo_gem->gem_handle;
1369 ret = drmIoctl(bufmgr_gem->fd,
1370 DRM_IOCTL_I915_GEM_SW_FINISH,
1371 &sw_finish);
1372 ret = ret == -1 ? -errno : 0;
1373
1374 bo_gem->mapped_cpu_write = false;
1375 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001376
Chris Wilsonc549a772011-12-05 10:14:34 +00001377 /* We need to unmap after every innovation as we cannot track
1378 * an open vma for every bo as that will exhaasut the system
1379 * limits and cause later failures.
1380 */
1381 if (--bo_gem->map_count == 0) {
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001382 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001383 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
Chris Wilsonc549a772011-12-05 10:14:34 +00001384 bo->virtual = NULL;
1385 }
Chris Wilsona3305b02010-05-13 08:24:28 +01001386 pthread_mutex_unlock(&bufmgr_gem->lock);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001387
1388 return ret;
Carl Worthafd245d2009-04-29 14:43:55 -07001389}
1390
Eric Anholtd0ae6832011-10-28 13:13:08 -07001391int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1392{
1393 return drm_intel_gem_bo_unmap(bo);
1394}
1395
Eric Anholt6a9eb082008-06-03 09:27:37 -07001396static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001397drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1398 unsigned long size, const void *data)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001399{
Eric Anholtd70d6052009-10-06 12:40:42 -07001400 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1401 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1402 struct drm_i915_gem_pwrite pwrite;
1403 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001404
Chris Wilson90b23cc2012-02-09 10:23:10 +00001405 VG_CLEAR(pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001406 pwrite.handle = bo_gem->gem_handle;
1407 pwrite.offset = offset;
1408 pwrite.size = size;
1409 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
Chris Wilson62997222010-09-25 21:32:59 +01001410 ret = drmIoctl(bufmgr_gem->fd,
1411 DRM_IOCTL_I915_GEM_PWRITE,
1412 &pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001413 if (ret != 0) {
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001414 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001415 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1416 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1417 (int)size, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001418 }
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001419
1420 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -07001421}
1422
1423static int
1424drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1425{
1426 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1427 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1428 int ret;
1429
Chris Wilson90b23cc2012-02-09 10:23:10 +00001430 VG_CLEAR(get_pipe_from_crtc_id);
Eric Anholtd70d6052009-10-06 12:40:42 -07001431 get_pipe_from_crtc_id.crtc_id = crtc_id;
Chris Wilson62997222010-09-25 21:32:59 +01001432 ret = drmIoctl(bufmgr_gem->fd,
1433 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1434 &get_pipe_from_crtc_id);
Eric Anholtd70d6052009-10-06 12:40:42 -07001435 if (ret != 0) {
1436 /* We return -1 here to signal that we don't
1437 * know which pipe is associated with this crtc.
1438 * This lets the caller know that this information
1439 * isn't available; using the wrong pipe for
1440 * vblank waiting can cause the chipset to lock up
1441 */
1442 return -1;
1443 }
1444
1445 return get_pipe_from_crtc_id.pipe;
1446}
1447
1448static int
1449drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1450 unsigned long size, void *data)
1451{
1452 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1453 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1454 struct drm_i915_gem_pread pread;
1455 int ret;
1456
Chris Wilson90b23cc2012-02-09 10:23:10 +00001457 VG_CLEAR(pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001458 pread.handle = bo_gem->gem_handle;
1459 pread.offset = offset;
1460 pread.size = size;
1461 pread.data_ptr = (uint64_t) (uintptr_t) data;
Chris Wilson62997222010-09-25 21:32:59 +01001462 ret = drmIoctl(bufmgr_gem->fd,
1463 DRM_IOCTL_I915_GEM_PREAD,
1464 &pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001465 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001466 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001467 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1468 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1469 (int)size, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001470 }
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001471
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001472 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001473}
1474
Eric Anholt877b2ce2010-11-09 13:51:45 -08001475/** Waits for all GPU rendering with the object to have completed. */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001476static void
Eric Anholt4b982642008-10-30 09:33:07 -07001477drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001478{
Eric Anholt877b2ce2010-11-09 13:51:45 -08001479 drm_intel_gem_bo_start_gtt_access(bo, 1);
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001480}
1481
1482/**
Ben Widawsky971c0802012-06-05 11:30:48 -07001483 * Waits on a BO for the given amount of time.
1484 *
1485 * @bo: buffer object to wait for
1486 * @timeout_ns: amount of time to wait in nanoseconds.
1487 * If value is less than 0, an infinite wait will occur.
1488 *
1489 * Returns 0 if the wait was successful ie. the last batch referencing the
1490 * object has completed within the allotted time. Otherwise some negative return
1491 * value describes the error. Of particular interest is -ETIME when the wait has
1492 * failed to yield the desired result.
1493 *
1494 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1495 * the operation to give up after a certain amount of time. Another subtle
1496 * difference is the internal locking semantics are different (this variant does
1497 * not hold the lock for the duration of the wait). This makes the wait subject
1498 * to a larger userspace race window.
1499 *
1500 * The implementation shall wait until the object is no longer actively
1501 * referenced within a batch buffer at the time of the call. The wait will
1502 * not guarantee that the buffer is re-issued via another thread, or an flinked
1503 * handle. Userspace must make sure this race does not occur if such precision
1504 * is important.
1505 */
1506int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1507{
1508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1509 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1510 struct drm_i915_gem_wait wait;
1511 int ret;
1512
1513 if (!bufmgr_gem->has_wait_timeout) {
1514 DBG("%s:%d: Timed wait is not supported. Falling back to "
1515 "infinite wait\n", __FILE__, __LINE__);
1516 if (timeout_ns) {
1517 drm_intel_gem_bo_wait_rendering(bo);
1518 return 0;
1519 } else {
1520 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1521 }
1522 }
1523
1524 wait.bo_handle = bo_gem->gem_handle;
1525 wait.timeout_ns = timeout_ns;
1526 wait.flags = 0;
1527 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1528 if (ret == -1)
1529 return -errno;
1530
1531 return ret;
1532}
1533
1534/**
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001535 * Sets the object to the GTT read and possibly write domain, used by the X
1536 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1537 *
1538 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1539 * can do tiled pixmaps this way.
1540 */
1541void
1542drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1543{
Eric Anholtd70d6052009-10-06 12:40:42 -07001544 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1545 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1546 struct drm_i915_gem_set_domain set_domain;
1547 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001548
Chris Wilson90b23cc2012-02-09 10:23:10 +00001549 VG_CLEAR(set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001550 set_domain.handle = bo_gem->gem_handle;
1551 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1552 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
Chris Wilson62997222010-09-25 21:32:59 +01001553 ret = drmIoctl(bufmgr_gem->fd,
1554 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1555 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001556 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001557 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1558 __FILE__, __LINE__, bo_gem->gem_handle,
1559 set_domain.read_domains, set_domain.write_domain,
1560 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001561 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001562}
1563
1564static void
Eric Anholt4b982642008-10-30 09:33:07 -07001565drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001566{
Eric Anholtd70d6052009-10-06 12:40:42 -07001567 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1568 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001569
Jesse Barnesb5096402009-09-15 11:02:58 -07001570 free(bufmgr_gem->exec2_objects);
Eric Anholtd70d6052009-10-06 12:40:42 -07001571 free(bufmgr_gem->exec_objects);
1572 free(bufmgr_gem->exec_bos);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001573
Eric Anholtd70d6052009-10-06 12:40:42 -07001574 pthread_mutex_destroy(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001575
Eric Anholtd70d6052009-10-06 12:40:42 -07001576 /* Free any cached buffer objects we were going to reuse */
Eric Anholt0ec768e2010-06-04 17:09:11 -07001577 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001578 struct drm_intel_gem_bo_bucket *bucket =
1579 &bufmgr_gem->cache_bucket[i];
1580 drm_intel_bo_gem *bo_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001581
Eric Anholtd70d6052009-10-06 12:40:42 -07001582 while (!DRMLISTEMPTY(&bucket->head)) {
1583 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1584 bucket->head.next, head);
1585 DRMLISTDEL(&bo_gem->head);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001586
Eric Anholtd70d6052009-10-06 12:40:42 -07001587 drm_intel_gem_bo_free(&bo_gem->bo);
1588 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001589 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001590
Eric Anholtd70d6052009-10-06 12:40:42 -07001591 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001592}
1593
1594/**
1595 * Adds the target buffer to the validation list and adds the relocation
1596 * to the reloc_buffer's relocation list.
1597 *
1598 * The relocation entry at the given offset must already contain the
1599 * precomputed relocation value, because the kernel will optimize out
1600 * the relocation entry write when the buffer hasn't moved from the
1601 * last known offset in target_bo.
1602 */
1603static int
Jesse Barnesb5096402009-09-15 11:02:58 -07001604do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1605 drm_intel_bo *target_bo, uint32_t target_offset,
1606 uint32_t read_domains, uint32_t write_domain,
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001607 bool need_fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001608{
Eric Anholtd70d6052009-10-06 12:40:42 -07001609 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1610 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1611 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001612 bool fenced_command;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001613
Chris Wilson97077332009-12-01 23:01:34 +00001614 if (bo_gem->has_error)
Chris Wilson792fed12009-12-02 13:12:39 +00001615 return -ENOMEM;
Chris Wilson792fed12009-12-02 13:12:39 +00001616
1617 if (target_bo_gem->has_error) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001618 bo_gem->has_error = true;
Chris Wilson792fed12009-12-02 13:12:39 +00001619 return -ENOMEM;
1620 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001621
Jesse Barnesb5096402009-09-15 11:02:58 -07001622 /* We never use HW fences for rendering on 965+ */
Eric Anholta1f9ea72010-03-02 08:49:36 -08001623 if (bufmgr_gem->gen >= 4)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001624 need_fence = false;
Jesse Barnesb5096402009-09-15 11:02:58 -07001625
Chris Wilson537703f2010-12-07 20:34:22 +00001626 fenced_command = need_fence;
1627 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001628 need_fence = false;
Chris Wilson537703f2010-12-07 20:34:22 +00001629
Eric Anholtd70d6052009-10-06 12:40:42 -07001630 /* Create a new relocation list if needed */
Chris Wilson97077332009-12-01 23:01:34 +00001631 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
Chris Wilson792fed12009-12-02 13:12:39 +00001632 return -ENOMEM;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001633
Eric Anholtd70d6052009-10-06 12:40:42 -07001634 /* Check overflow */
1635 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001636
Eric Anholtd70d6052009-10-06 12:40:42 -07001637 /* Check args */
1638 assert(offset <= bo->size - 4);
1639 assert((write_domain & (write_domain - 1)) == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001640
Eric Anholtd70d6052009-10-06 12:40:42 -07001641 /* Make sure that we're not adding a reloc to something whose size has
1642 * already been accounted for.
1643 */
1644 assert(!bo_gem->used_as_reloc_target);
Eric Anholtf1791372010-06-07 14:22:36 -07001645 if (target_bo_gem != bo_gem) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001646 target_bo_gem->used_as_reloc_target = true;
Eric Anholtf1791372010-06-07 14:22:36 -07001647 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1648 }
Eric Anholta1f9ea72010-03-02 08:49:36 -08001649 /* An object needing a fence is a tiled buffer, so it won't have
Jesse Barnesb5096402009-09-15 11:02:58 -07001650 * relocs to other buffers.
1651 */
1652 if (need_fence)
1653 target_bo_gem->reloc_tree_fences = 1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001654 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
Eric Anholt0e867312008-10-21 00:10:54 -07001655
Eric Anholtd70d6052009-10-06 12:40:42 -07001656 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1657 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1658 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1659 target_bo_gem->gem_handle;
1660 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1661 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1662 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001663
Jesse Barnesb5096402009-09-15 11:02:58 -07001664 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
Eric Anholt4f7704a2010-06-10 08:58:08 -07001665 if (target_bo != bo)
1666 drm_intel_gem_bo_reference(target_bo);
Chris Wilsonaf3d2822010-12-03 10:48:12 +00001667 if (fenced_command)
Jesse Barnesb5096402009-09-15 11:02:58 -07001668 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1669 DRM_INTEL_RELOC_FENCE;
1670 else
1671 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001672
Eric Anholtd70d6052009-10-06 12:40:42 -07001673 bo_gem->reloc_count++;
Eric Anholt6df7b072008-06-12 23:22:26 -07001674
Eric Anholtd70d6052009-10-06 12:40:42 -07001675 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001676}
1677
Jesse Barnesb5096402009-09-15 11:02:58 -07001678static int
1679drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1680 drm_intel_bo *target_bo, uint32_t target_offset,
1681 uint32_t read_domains, uint32_t write_domain)
1682{
1683 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1684
1685 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1686 read_domains, write_domain,
1687 !bufmgr_gem->fenced_relocs);
1688}
1689
1690static int
1691drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1692 drm_intel_bo *target_bo,
1693 uint32_t target_offset,
1694 uint32_t read_domains, uint32_t write_domain)
1695{
1696 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001697 read_domains, write_domain, true);
Jesse Barnesb5096402009-09-15 11:02:58 -07001698}
1699
Eric Anholt515cea62011-10-21 18:48:20 -07001700int
1701drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1702{
1703 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1704
1705 return bo_gem->reloc_count;
1706}
1707
1708/**
1709 * Removes existing relocation entries in the BO after "start".
1710 *
1711 * This allows a user to avoid a two-step process for state setup with
1712 * counting up all the buffer objects and doing a
1713 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1714 * relocations for the state setup. Instead, save the state of the
1715 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1716 * state, and then check if it still fits in the aperture.
1717 *
1718 * Any further drm_intel_bufmgr_check_aperture_space() queries
1719 * involving this buffer in the tree are undefined after this call.
1720 */
1721void
1722drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1723{
1724 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1725 int i;
1726 struct timespec time;
1727
1728 clock_gettime(CLOCK_MONOTONIC, &time);
1729
1730 assert(bo_gem->reloc_count >= start);
1731 /* Unreference the cleared target buffers */
1732 for (i = start; i < bo_gem->reloc_count; i++) {
1733 if (bo_gem->reloc_target_info[i].bo != bo) {
1734 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1735 reloc_target_info[i].bo,
1736 time.tv_sec);
1737 }
1738 }
1739 bo_gem->reloc_count = start;
1740}
1741
Eric Anholt6a9eb082008-06-03 09:27:37 -07001742/**
1743 * Walk the tree of relocations rooted at BO and accumulate the list of
1744 * validations to be performed and update the relocation buffers with
1745 * index values into the validation list.
1746 */
1747static void
Eric Anholt4b982642008-10-30 09:33:07 -07001748drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001749{
Eric Anholtd70d6052009-10-06 12:40:42 -07001750 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1751 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001752
Eric Anholtd70d6052009-10-06 12:40:42 -07001753 if (bo_gem->relocs == NULL)
1754 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001755
Eric Anholtd70d6052009-10-06 12:40:42 -07001756 for (i = 0; i < bo_gem->reloc_count; i++) {
Jesse Barnesb5096402009-09-15 11:02:58 -07001757 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001758
Eric Anholtf1791372010-06-07 14:22:36 -07001759 if (target_bo == bo)
1760 continue;
1761
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001762 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1763
Eric Anholtd70d6052009-10-06 12:40:42 -07001764 /* Continue walking the tree depth-first. */
1765 drm_intel_gem_bo_process_reloc(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001766
Eric Anholtd70d6052009-10-06 12:40:42 -07001767 /* Add the target to the validate list */
1768 drm_intel_add_validate_buffer(target_bo);
1769 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001770}
1771
Eric Anholt6a9eb082008-06-03 09:27:37 -07001772static void
Jesse Barnesb5096402009-09-15 11:02:58 -07001773drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1774{
1775 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1776 int i;
1777
1778 if (bo_gem->relocs == NULL)
1779 return;
1780
1781 for (i = 0; i < bo_gem->reloc_count; i++) {
1782 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1783 int need_fence;
1784
Eric Anholtf1791372010-06-07 14:22:36 -07001785 if (target_bo == bo)
1786 continue;
1787
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001788 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1789
Jesse Barnesb5096402009-09-15 11:02:58 -07001790 /* Continue walking the tree depth-first. */
1791 drm_intel_gem_bo_process_reloc2(target_bo);
1792
1793 need_fence = (bo_gem->reloc_target_info[i].flags &
1794 DRM_INTEL_RELOC_FENCE);
1795
1796 /* Add the target to the validate list */
1797 drm_intel_add_validate_buffer2(target_bo, need_fence);
1798 }
1799}
1800
1801
1802static void
Eric Anholtd70d6052009-10-06 12:40:42 -07001803drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001804{
Eric Anholtd70d6052009-10-06 12:40:42 -07001805 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001806
Eric Anholtd70d6052009-10-06 12:40:42 -07001807 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1808 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1809 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001810
Eric Anholtd70d6052009-10-06 12:40:42 -07001811 /* Update the buffer offset */
1812 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1813 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1814 bo_gem->gem_handle, bo_gem->name, bo->offset,
1815 (unsigned long long)bufmgr_gem->exec_objects[i].
1816 offset);
1817 bo->offset = bufmgr_gem->exec_objects[i].offset;
1818 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001819 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001820}
1821
Jesse Barnesb5096402009-09-15 11:02:58 -07001822static void
1823drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1824{
1825 int i;
1826
1827 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1828 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1829 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1830
1831 /* Update the buffer offset */
1832 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1833 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1834 bo_gem->gem_handle, bo_gem->name, bo->offset,
1835 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1836 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1837 }
1838 }
1839}
1840
Eric Anholt4db16a92011-10-11 15:59:03 -07001841static void
1842aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1843{
1844 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1845}
1846
1847static void
1848aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1849{
1850 fwrite(data, 1, size, bufmgr_gem->aub_file);
1851}
1852
1853static void
1854aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1855{
1856 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1857 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1858 uint32_t *data;
1859 unsigned int i;
1860
1861 data = malloc(bo->size);
1862 drm_intel_bo_get_subdata(bo, offset, size, data);
1863
1864 /* Easy mode: write out bo with no relocations */
1865 if (!bo_gem->reloc_count) {
1866 aub_out_data(bufmgr_gem, data, size);
1867 free(data);
1868 return;
1869 }
1870
1871 /* Otherwise, handle the relocations while writing. */
1872 for (i = 0; i < size / 4; i++) {
1873 int r;
1874 for (r = 0; r < bo_gem->reloc_count; r++) {
1875 struct drm_i915_gem_relocation_entry *reloc;
1876 drm_intel_reloc_target *info;
1877
1878 reloc = &bo_gem->relocs[r];
1879 info = &bo_gem->reloc_target_info[r];
1880
1881 if (reloc->offset == offset + i * 4) {
1882 drm_intel_bo_gem *target_gem;
1883 uint32_t val;
1884
1885 target_gem = (drm_intel_bo_gem *)info->bo;
1886
1887 val = reloc->delta;
1888 val += target_gem->aub_offset;
1889
1890 aub_out(bufmgr_gem, val);
1891 data[i] = val;
1892 break;
1893 }
1894 }
1895 if (r == bo_gem->reloc_count) {
1896 /* no relocation, just the data */
1897 aub_out(bufmgr_gem, data[i]);
1898 }
1899 }
1900
1901 free(data);
1902}
1903
1904static void
1905aub_bo_get_address(drm_intel_bo *bo)
1906{
1907 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1908 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1909
1910 /* Give the object a graphics address in the AUB file. We
1911 * don't just use the GEM object address because we do AUB
1912 * dumping before execution -- we want to successfully log
1913 * when the hardware might hang, and we might even want to aub
1914 * capture for a driver trying to execute on a different
1915 * generation of hardware by disabling the actual kernel exec
1916 * call.
1917 */
1918 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1919 bufmgr_gem->aub_offset += bo->size;
1920 /* XXX: Handle aperture overflow. */
1921 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1922}
1923
1924static void
1925aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1926 uint32_t offset, uint32_t size)
1927{
1928 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1929 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1930
1931 aub_out(bufmgr_gem,
1932 CMD_AUB_TRACE_HEADER_BLOCK |
1933 (5 - 2));
1934 aub_out(bufmgr_gem,
1935 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1936 aub_out(bufmgr_gem, subtype);
1937 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1938 aub_out(bufmgr_gem, size);
1939 aub_write_bo_data(bo, offset, size);
1940}
1941
Paul Berryda02f722012-05-04 12:41:00 -07001942/**
1943 * Break up large objects into multiple writes. Otherwise a 128kb VBO
1944 * would overflow the 16 bits of size field in the packet header and
1945 * everything goes badly after that.
1946 */
Eric Anholt4db16a92011-10-11 15:59:03 -07001947static void
Paul Berryda02f722012-05-04 12:41:00 -07001948aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1949 uint32_t offset, uint32_t size)
Eric Anholt4db16a92011-10-11 15:59:03 -07001950{
1951 uint32_t block_size;
Paul Berryda02f722012-05-04 12:41:00 -07001952 uint32_t sub_offset;
Eric Anholt4db16a92011-10-11 15:59:03 -07001953
Paul Berryda02f722012-05-04 12:41:00 -07001954 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
1955 block_size = size - sub_offset;
Eric Anholt4db16a92011-10-11 15:59:03 -07001956
1957 if (block_size > 8 * 4096)
1958 block_size = 8 * 4096;
1959
Paul Berryda02f722012-05-04 12:41:00 -07001960 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
1961 block_size);
1962 }
1963}
1964
1965static void
1966aub_write_bo(drm_intel_bo *bo)
1967{
1968 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1969 uint32_t offset = 0;
1970 unsigned i;
1971
1972 aub_bo_get_address(bo);
1973
1974 /* Write out each annotated section separately. */
1975 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
1976 drm_intel_aub_annotation *annotation =
1977 &bo_gem->aub_annotations[i];
1978 uint32_t ending_offset = annotation->ending_offset;
1979 if (ending_offset > bo->size)
1980 ending_offset = bo->size;
1981 if (ending_offset > offset) {
1982 aub_write_large_trace_block(bo, annotation->type,
1983 annotation->subtype,
1984 offset,
1985 ending_offset - offset);
1986 offset = ending_offset;
1987 }
1988 }
1989
1990 /* Write out any remaining unannotated data */
1991 if (offset < bo->size) {
1992 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1993 offset, bo->size - offset);
Eric Anholt4db16a92011-10-11 15:59:03 -07001994 }
1995}
1996
1997/*
1998 * Make a ringbuffer on fly and dump it
1999 */
2000static void
2001aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2002 uint32_t batch_buffer, int ring_flag)
2003{
2004 uint32_t ringbuffer[4096];
2005 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2006 int ring_count = 0;
2007
2008 if (ring_flag == I915_EXEC_BSD)
2009 ring = AUB_TRACE_TYPE_RING_PRB1;
2010
2011 /* Make a ring buffer to execute our batchbuffer. */
2012 memset(ringbuffer, 0, sizeof(ringbuffer));
2013 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2014 ringbuffer[ring_count++] = batch_buffer;
2015
2016 /* Write out the ring. This appears to trigger execution of
2017 * the ring in the simulator.
2018 */
2019 aub_out(bufmgr_gem,
2020 CMD_AUB_TRACE_HEADER_BLOCK |
2021 (5 - 2));
2022 aub_out(bufmgr_gem,
2023 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2024 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2025 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2026 aub_out(bufmgr_gem, ring_count * 4);
2027
2028 /* FIXME: Need some flush operations here? */
2029 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2030
2031 /* Update offset pointer */
2032 bufmgr_gem->aub_offset += 4096;
2033}
2034
2035void
2036drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2037 int x1, int y1, int width, int height,
2038 enum aub_dump_bmp_format format,
2039 int pitch, int offset)
2040{
2041 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2042 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2043 uint32_t cpp;
2044
2045 switch (format) {
2046 case AUB_DUMP_BMP_FORMAT_8BIT:
2047 cpp = 1;
2048 break;
2049 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2050 cpp = 2;
2051 break;
2052 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2053 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2054 cpp = 4;
2055 break;
2056 default:
2057 printf("Unknown AUB dump format %d\n", format);
2058 return;
2059 }
2060
2061 if (!bufmgr_gem->aub_file)
2062 return;
2063
2064 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2065 aub_out(bufmgr_gem, (y1 << 16) | x1);
2066 aub_out(bufmgr_gem,
2067 (format << 24) |
2068 (cpp << 19) |
2069 pitch / 4);
2070 aub_out(bufmgr_gem, (height << 16) | width);
2071 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2072 aub_out(bufmgr_gem,
2073 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2074 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2075}
2076
2077static void
2078aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2079{
2080 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2081 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2082 int i;
Paul Berryda02f722012-05-04 12:41:00 -07002083 bool batch_buffer_needs_annotations;
Eric Anholt4db16a92011-10-11 15:59:03 -07002084
2085 if (!bufmgr_gem->aub_file)
2086 return;
2087
Paul Berryda02f722012-05-04 12:41:00 -07002088 /* If batch buffer is not annotated, annotate it the best we
2089 * can.
2090 */
2091 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2092 if (batch_buffer_needs_annotations) {
2093 drm_intel_aub_annotation annotations[2] = {
2094 { AUB_TRACE_TYPE_BATCH, 0, used },
2095 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2096 };
2097 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
Eric Anholt4db16a92011-10-11 15:59:03 -07002098 }
2099
Paul Berryda02f722012-05-04 12:41:00 -07002100 /* Write out all buffers to AUB memory */
2101 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2102 aub_write_bo(bufmgr_gem->exec_bos[i]);
2103 }
Eric Anholt4db16a92011-10-11 15:59:03 -07002104
Paul Berryda02f722012-05-04 12:41:00 -07002105 /* Remove any annotations we added */
2106 if (batch_buffer_needs_annotations)
2107 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
Eric Anholt4db16a92011-10-11 15:59:03 -07002108
2109 /* Dump ring buffer */
2110 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2111
2112 fflush(bufmgr_gem->aub_file);
2113
2114 /*
2115 * One frame has been dumped. So reset the aub_offset for the next frame.
2116 *
2117 * FIXME: Can we do this?
2118 */
2119 bufmgr_gem->aub_offset = 0x10000;
2120}
2121
Eric Anholtf9d98be2008-09-08 08:51:40 -07002122static int
Eric Anholt4b982642008-10-30 09:33:07 -07002123drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07002124 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002125{
Eric Anholtd70d6052009-10-06 12:40:42 -07002126 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
Chris Wilson792fed12009-12-02 13:12:39 +00002127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholtd70d6052009-10-06 12:40:42 -07002128 struct drm_i915_gem_execbuffer execbuf;
2129 int ret, i;
Eric Anholtf9d98be2008-09-08 08:51:40 -07002130
Chris Wilson792fed12009-12-02 13:12:39 +00002131 if (bo_gem->has_error)
2132 return -ENOMEM;
2133
Eric Anholtd70d6052009-10-06 12:40:42 -07002134 pthread_mutex_lock(&bufmgr_gem->lock);
2135 /* Update indices and set up the validate list. */
2136 drm_intel_gem_bo_process_reloc(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07002137
Eric Anholtd70d6052009-10-06 12:40:42 -07002138 /* Add the batch buffer to the validation list. There are no
2139 * relocations pointing to it.
2140 */
2141 drm_intel_add_validate_buffer(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07002142
Chris Wilson90b23cc2012-02-09 10:23:10 +00002143 VG_CLEAR(execbuf);
Eric Anholtd70d6052009-10-06 12:40:42 -07002144 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2145 execbuf.buffer_count = bufmgr_gem->exec_count;
2146 execbuf.batch_start_offset = 0;
2147 execbuf.batch_len = used;
2148 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2149 execbuf.num_cliprects = num_cliprects;
2150 execbuf.DR1 = 0;
2151 execbuf.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07002152
Chris Wilson62997222010-09-25 21:32:59 +01002153 ret = drmIoctl(bufmgr_gem->fd,
2154 DRM_IOCTL_I915_GEM_EXECBUFFER,
2155 &execbuf);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002156 if (ret != 0) {
2157 ret = -errno;
2158 if (errno == ENOSPC) {
Chris Wilson96214862010-10-01 16:50:09 +01002159 DBG("Execbuffer fails to pin. "
2160 "Estimate: %u. Actual: %u. Available: %u\n",
2161 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2162 bufmgr_gem->
2163 exec_count),
2164 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2165 bufmgr_gem->
2166 exec_count),
2167 (unsigned int)bufmgr_gem->gtt_size);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002168 }
Eric Anholtd70d6052009-10-06 12:40:42 -07002169 }
2170 drm_intel_update_buffer_offsets(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07002171
Eric Anholtd70d6052009-10-06 12:40:42 -07002172 if (bufmgr_gem->bufmgr.debug)
2173 drm_intel_gem_dump_validation_list(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07002174
Eric Anholtd70d6052009-10-06 12:40:42 -07002175 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2176 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2177 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002178
Eric Anholtd70d6052009-10-06 12:40:42 -07002179 /* Disconnect the buffer from the validate list */
2180 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07002181 bufmgr_gem->exec_bos[i] = NULL;
2182 }
2183 bufmgr_gem->exec_count = 0;
2184 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtf9d98be2008-09-08 08:51:40 -07002185
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002186 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002187}
2188
Keith Packard8e41ce12008-08-04 00:34:08 -07002189static int
Ben Widawsky3ed38712012-03-18 18:28:28 -07002190do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2191 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2192 unsigned int flags)
Jesse Barnesb5096402009-09-15 11:02:58 -07002193{
2194 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2195 struct drm_i915_gem_execbuffer2 execbuf;
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002196 int ret = 0;
2197 int i;
Jesse Barnesb5096402009-09-15 11:02:58 -07002198
Chris Wilson0184bb12010-12-19 13:01:15 +00002199 switch (flags & 0x7) {
Chris Wilson057fab32010-10-26 11:35:11 +01002200 default:
Zou Nan hai66375fd2010-06-02 10:07:37 +08002201 return -EINVAL;
Chris Wilson057fab32010-10-26 11:35:11 +01002202 case I915_EXEC_BLT:
2203 if (!bufmgr_gem->has_blt)
2204 return -EINVAL;
2205 break;
2206 case I915_EXEC_BSD:
2207 if (!bufmgr_gem->has_bsd)
2208 return -EINVAL;
2209 break;
2210 case I915_EXEC_RENDER:
2211 case I915_EXEC_DEFAULT:
2212 break;
2213 }
Zou Nan hai66375fd2010-06-02 10:07:37 +08002214
Jesse Barnesb5096402009-09-15 11:02:58 -07002215 pthread_mutex_lock(&bufmgr_gem->lock);
2216 /* Update indices and set up the validate list. */
2217 drm_intel_gem_bo_process_reloc2(bo);
2218
2219 /* Add the batch buffer to the validation list. There are no relocations
2220 * pointing to it.
2221 */
2222 drm_intel_add_validate_buffer2(bo, 0);
2223
Chris Wilson90b23cc2012-02-09 10:23:10 +00002224 VG_CLEAR(execbuf);
Jesse Barnesb5096402009-09-15 11:02:58 -07002225 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2226 execbuf.buffer_count = bufmgr_gem->exec_count;
2227 execbuf.batch_start_offset = 0;
2228 execbuf.batch_len = used;
2229 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2230 execbuf.num_cliprects = num_cliprects;
2231 execbuf.DR1 = 0;
2232 execbuf.DR4 = DR4;
Chris Wilson0184bb12010-12-19 13:01:15 +00002233 execbuf.flags = flags;
Ben Widawsky3ed38712012-03-18 18:28:28 -07002234 if (ctx == NULL)
2235 i915_execbuffer2_set_context_id(execbuf, 0);
2236 else
2237 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
Jesse Barnesb5096402009-09-15 11:02:58 -07002238 execbuf.rsvd2 = 0;
2239
Eric Anholt4db16a92011-10-11 15:59:03 -07002240 aub_exec(bo, flags, used);
2241
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002242 if (bufmgr_gem->no_exec)
2243 goto skip_execution;
2244
Chris Wilson62997222010-09-25 21:32:59 +01002245 ret = drmIoctl(bufmgr_gem->fd,
2246 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2247 &execbuf);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00002248 if (ret != 0) {
2249 ret = -errno;
Chris Wilson13e82702010-06-21 15:38:06 +01002250 if (ret == -ENOSPC) {
Chris Wilson96214862010-10-01 16:50:09 +01002251 DBG("Execbuffer fails to pin. "
2252 "Estimate: %u. Actual: %u. Available: %u\n",
2253 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2254 bufmgr_gem->exec_count),
2255 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2256 bufmgr_gem->exec_count),
2257 (unsigned int) bufmgr_gem->gtt_size);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00002258 }
Jesse Barnesb5096402009-09-15 11:02:58 -07002259 }
2260 drm_intel_update_buffer_offsets2(bufmgr_gem);
2261
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002262skip_execution:
Jesse Barnesb5096402009-09-15 11:02:58 -07002263 if (bufmgr_gem->bufmgr.debug)
2264 drm_intel_gem_dump_validation_list(bufmgr_gem);
2265
2266 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2267 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2268 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2269
2270 /* Disconnect the buffer from the validate list */
2271 bo_gem->validate_index = -1;
2272 bufmgr_gem->exec_bos[i] = NULL;
2273 }
2274 bufmgr_gem->exec_count = 0;
2275 pthread_mutex_unlock(&bufmgr_gem->lock);
2276
Chris Wilson3e21e3b2010-03-04 21:17:48 +00002277 return ret;
Jesse Barnesb5096402009-09-15 11:02:58 -07002278}
2279
2280static int
Zou Nan hai66375fd2010-06-02 10:07:37 +08002281drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2282 drm_clip_rect_t *cliprects, int num_cliprects,
2283 int DR4)
2284{
Ben Widawsky3ed38712012-03-18 18:28:28 -07002285 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2286 I915_EXEC_RENDER);
2287}
2288
2289static int
2290drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2291 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2292 unsigned int flags)
2293{
2294 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2295 flags);
2296}
2297
2298int
2299drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2300 int used, unsigned int flags)
2301{
2302 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
Zou Nan hai66375fd2010-06-02 10:07:37 +08002303}
2304
2305static int
Eric Anholt4b982642008-10-30 09:33:07 -07002306drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
Keith Packard8e41ce12008-08-04 00:34:08 -07002307{
Eric Anholtd70d6052009-10-06 12:40:42 -07002308 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2309 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2310 struct drm_i915_gem_pin pin;
2311 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002312
Chris Wilson90b23cc2012-02-09 10:23:10 +00002313 VG_CLEAR(pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002314 pin.handle = bo_gem->gem_handle;
2315 pin.alignment = alignment;
Keith Packard8e41ce12008-08-04 00:34:08 -07002316
Chris Wilson62997222010-09-25 21:32:59 +01002317 ret = drmIoctl(bufmgr_gem->fd,
2318 DRM_IOCTL_I915_GEM_PIN,
2319 &pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002320 if (ret != 0)
2321 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07002322
Eric Anholtd70d6052009-10-06 12:40:42 -07002323 bo->offset = pin.offset;
2324 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07002325}
2326
2327static int
Eric Anholt4b982642008-10-30 09:33:07 -07002328drm_intel_gem_bo_unpin(drm_intel_bo *bo)
Keith Packard8e41ce12008-08-04 00:34:08 -07002329{
Eric Anholtd70d6052009-10-06 12:40:42 -07002330 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2331 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2332 struct drm_i915_gem_unpin unpin;
2333 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002334
Chris Wilson90b23cc2012-02-09 10:23:10 +00002335 VG_CLEAR(unpin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002336 unpin.handle = bo_gem->gem_handle;
Keith Packard8e41ce12008-08-04 00:34:08 -07002337
Chris Wilson62997222010-09-25 21:32:59 +01002338 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002339 if (ret != 0)
2340 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07002341
Eric Anholtd70d6052009-10-06 12:40:42 -07002342 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07002343}
2344
2345static int
Chris Wilson1db22ff2010-06-21 14:27:23 +01002346drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2347 uint32_t tiling_mode,
2348 uint32_t stride)
Keith Packard8e41ce12008-08-04 00:34:08 -07002349{
Eric Anholtd70d6052009-10-06 12:40:42 -07002350 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2351 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2352 struct drm_i915_gem_set_tiling set_tiling;
2353 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002354
Chris Wilsonaba35022010-06-22 13:00:22 +01002355 if (bo_gem->global_name == 0 &&
2356 tiling_mode == bo_gem->tiling_mode &&
Chris Wilson056aa9b2010-06-21 14:31:29 +01002357 stride == bo_gem->stride)
Eric Anholtd70d6052009-10-06 12:40:42 -07002358 return 0;
Keith Packard18f091d2008-12-15 15:08:12 -08002359
Eric Anholtd70d6052009-10-06 12:40:42 -07002360 memset(&set_tiling, 0, sizeof(set_tiling));
Chris Wilson8ffd2e12009-12-01 13:08:04 +00002361 do {
Chris Wilson62997222010-09-25 21:32:59 +01002362 /* set_tiling is slightly broken and overwrites the
2363 * input on the error path, so we have to open code
2364 * rmIoctl.
2365 */
Chris Wilson1db22ff2010-06-21 14:27:23 +01002366 set_tiling.handle = bo_gem->gem_handle;
2367 set_tiling.tiling_mode = tiling_mode;
Chris Wilson4f0f8712010-02-10 09:45:13 +00002368 set_tiling.stride = stride;
2369
Chris Wilson8ffd2e12009-12-01 13:08:04 +00002370 ret = ioctl(bufmgr_gem->fd,
2371 DRM_IOCTL_I915_GEM_SET_TILING,
2372 &set_tiling);
Chris Wilson62997222010-09-25 21:32:59 +01002373 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
Chris Wilson1db22ff2010-06-21 14:27:23 +01002374 if (ret == -1)
2375 return -errno;
2376
2377 bo_gem->tiling_mode = set_tiling.tiling_mode;
2378 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
Chris Wilsonaba35022010-06-22 13:00:22 +01002379 bo_gem->stride = set_tiling.stride;
Chris Wilson1db22ff2010-06-21 14:27:23 +01002380 return 0;
2381}
2382
2383static int
2384drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2385 uint32_t stride)
2386{
2387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2389 int ret;
2390
Chris Wilsoncd34cbe2010-06-22 11:07:26 +01002391 /* Linear buffers have no stride. By ensuring that we only ever use
2392 * stride 0 with linear buffers, we simplify our code.
2393 */
Chris Wilsonc7bbaca2010-06-22 11:15:56 +01002394 if (*tiling_mode == I915_TILING_NONE)
Chris Wilsoncd34cbe2010-06-22 11:07:26 +01002395 stride = 0;
2396
Chris Wilson1db22ff2010-06-21 14:27:23 +01002397 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2398 if (ret == 0)
Chris Wilsonfcf3e612010-05-24 18:35:41 +01002399 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
Chris Wilsone22fb792009-11-30 22:14:30 +00002400
Keith Packard18f091d2008-12-15 15:08:12 -08002401 *tiling_mode = bo_gem->tiling_mode;
Chris Wilsonfcf3e612010-05-24 18:35:41 +01002402 return ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002403}
2404
2405static int
Eric Anholtd70d6052009-10-06 12:40:42 -07002406drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2407 uint32_t * swizzle_mode)
Keith Packard8e41ce12008-08-04 00:34:08 -07002408{
Eric Anholtd70d6052009-10-06 12:40:42 -07002409 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt99338382008-10-14 13:18:11 -07002410
Eric Anholtd70d6052009-10-06 12:40:42 -07002411 *tiling_mode = bo_gem->tiling_mode;
2412 *swizzle_mode = bo_gem->swizzle_mode;
2413 return 0;
Eric Anholt99338382008-10-14 13:18:11 -07002414}
2415
2416static int
Eric Anholtd70d6052009-10-06 12:40:42 -07002417drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
Keith Packard8e41ce12008-08-04 00:34:08 -07002418{
Eric Anholtd70d6052009-10-06 12:40:42 -07002419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholtd70d6052009-10-06 12:40:42 -07002421 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002422
Eric Anholtd70d6052009-10-06 12:40:42 -07002423 if (!bo_gem->global_name) {
Chris Wilson90b23cc2012-02-09 10:23:10 +00002424 struct drm_gem_flink flink;
2425
2426 VG_CLEAR(flink);
Eric Anholtd70d6052009-10-06 12:40:42 -07002427 flink.handle = bo_gem->gem_handle;
2428
Chris Wilson62997222010-09-25 21:32:59 +01002429 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
Eric Anholtd70d6052009-10-06 12:40:42 -07002430 if (ret != 0)
2431 return -errno;
Chris Wilson90b23cc2012-02-09 10:23:10 +00002432
Eric Anholtd70d6052009-10-06 12:40:42 -07002433 bo_gem->global_name = flink.name;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002434 bo_gem->reusable = false;
Chris Wilson36d49392011-02-14 09:39:06 +00002435
2436 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
Eric Anholtd70d6052009-10-06 12:40:42 -07002437 }
2438
2439 *name = bo_gem->global_name;
2440 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07002441}
2442
Eric Anholt6a9eb082008-06-03 09:27:37 -07002443/**
2444 * Enables unlimited caching of buffer objects for reuse.
2445 *
2446 * This is potentially very memory expensive, as the cache at each bucket
2447 * size is only bounded by how many buffers of that size we've managed to have
2448 * in flight at once.
2449 */
2450void
Eric Anholt4b982642008-10-30 09:33:07 -07002451drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002452{
Eric Anholtd70d6052009-10-06 12:40:42 -07002453 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002454
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002455 bufmgr_gem->bo_reuse = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002456}
2457
Eric Anholt0e867312008-10-21 00:10:54 -07002458/**
Jesse Barnesb5096402009-09-15 11:02:58 -07002459 * Enable use of fenced reloc type.
2460 *
2461 * New code should enable this to avoid unnecessary fence register
2462 * allocation. If this option is not enabled, all relocs will have fence
2463 * register allocated.
2464 */
2465void
2466drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2467{
Eric Anholt766fa792010-03-02 16:04:14 -08002468 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
Jesse Barnesb5096402009-09-15 11:02:58 -07002469
Eric Anholt766fa792010-03-02 16:04:14 -08002470 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002471 bufmgr_gem->fenced_relocs = true;
Jesse Barnesb5096402009-09-15 11:02:58 -07002472}
2473
2474/**
Eric Anholt0e867312008-10-21 00:10:54 -07002475 * Return the additional aperture space required by the tree of buffer objects
2476 * rooted at bo.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002477 */
2478static int
Eric Anholt4b982642008-10-30 09:33:07 -07002479drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002480{
Eric Anholtd70d6052009-10-06 12:40:42 -07002481 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2482 int i;
2483 int total = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07002484
Eric Anholtd70d6052009-10-06 12:40:42 -07002485 if (bo == NULL || bo_gem->included_in_check_aperture)
2486 return 0;
Eric Anholt0e867312008-10-21 00:10:54 -07002487
Eric Anholtd70d6052009-10-06 12:40:42 -07002488 total += bo->size;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002489 bo_gem->included_in_check_aperture = true;
Eric Anholt0e867312008-10-21 00:10:54 -07002490
Eric Anholtd70d6052009-10-06 12:40:42 -07002491 for (i = 0; i < bo_gem->reloc_count; i++)
2492 total +=
2493 drm_intel_gem_bo_get_aperture_space(bo_gem->
Jesse Barnesb5096402009-09-15 11:02:58 -07002494 reloc_target_info[i].bo);
Eric Anholt0e867312008-10-21 00:10:54 -07002495
Eric Anholtd70d6052009-10-06 12:40:42 -07002496 return total;
Eric Anholt0e867312008-10-21 00:10:54 -07002497}
2498
2499/**
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002500 * Count the number of buffers in this list that need a fence reg
2501 *
2502 * If the count is greater than the number of available regs, we'll have
2503 * to ask the caller to resubmit a batch with fewer tiled buffers.
2504 *
Eric Anholt9209c9a2009-01-27 16:54:11 -08002505 * This function over-counts if the same buffer is used multiple times.
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002506 */
2507static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -07002508drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002509{
Eric Anholtd70d6052009-10-06 12:40:42 -07002510 int i;
2511 unsigned int total = 0;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002512
Eric Anholtd70d6052009-10-06 12:40:42 -07002513 for (i = 0; i < count; i++) {
2514 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002515
Eric Anholtd70d6052009-10-06 12:40:42 -07002516 if (bo_gem == NULL)
2517 continue;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002518
Eric Anholtd70d6052009-10-06 12:40:42 -07002519 total += bo_gem->reloc_tree_fences;
2520 }
2521 return total;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002522}
2523
2524/**
Eric Anholt4b982642008-10-30 09:33:07 -07002525 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2526 * for the next drm_intel_bufmgr_check_aperture_space() call.
Eric Anholt0e867312008-10-21 00:10:54 -07002527 */
2528static void
Eric Anholt4b982642008-10-30 09:33:07 -07002529drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
Eric Anholt0e867312008-10-21 00:10:54 -07002530{
Eric Anholtd70d6052009-10-06 12:40:42 -07002531 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2532 int i;
Eric Anholt0e867312008-10-21 00:10:54 -07002533
Eric Anholtd70d6052009-10-06 12:40:42 -07002534 if (bo == NULL || !bo_gem->included_in_check_aperture)
2535 return;
Eric Anholt0e867312008-10-21 00:10:54 -07002536
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002537 bo_gem->included_in_check_aperture = false;
Eric Anholt0e867312008-10-21 00:10:54 -07002538
Eric Anholtd70d6052009-10-06 12:40:42 -07002539 for (i = 0; i < bo_gem->reloc_count; i++)
2540 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
Jesse Barnesb5096402009-09-15 11:02:58 -07002541 reloc_target_info[i].bo);
Eric Anholt0e867312008-10-21 00:10:54 -07002542}
2543
2544/**
Keith Packardb13f4e12008-11-21 01:49:39 -08002545 * Return a conservative estimate for the amount of aperture required
2546 * for a collection of buffers. This may double-count some buffers.
2547 */
2548static unsigned int
2549drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2550{
Eric Anholtd70d6052009-10-06 12:40:42 -07002551 int i;
2552 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08002553
Eric Anholtd70d6052009-10-06 12:40:42 -07002554 for (i = 0; i < count; i++) {
2555 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2556 if (bo_gem != NULL)
2557 total += bo_gem->reloc_tree_size;
2558 }
2559 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08002560}
2561
2562/**
2563 * Return the amount of aperture needed for a collection of buffers.
2564 * This avoids double counting any buffers, at the cost of looking
2565 * at every buffer in the set.
2566 */
2567static unsigned int
2568drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2569{
Eric Anholtd70d6052009-10-06 12:40:42 -07002570 int i;
2571 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08002572
Eric Anholtd70d6052009-10-06 12:40:42 -07002573 for (i = 0; i < count; i++) {
2574 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2575 /* For the first buffer object in the array, we get an
2576 * accurate count back for its reloc_tree size (since nothing
2577 * had been flagged as being counted yet). We can save that
2578 * value out as a more conservative reloc_tree_size that
2579 * avoids double-counting target buffers. Since the first
2580 * buffer happens to usually be the batch buffer in our
2581 * callers, this can pull us back from doing the tree
2582 * walk on every new batch emit.
2583 */
2584 if (i == 0) {
2585 drm_intel_bo_gem *bo_gem =
2586 (drm_intel_bo_gem *) bo_array[i];
2587 bo_gem->reloc_tree_size = total;
2588 }
Eric Anholt7ce8d4c2009-02-27 13:46:31 -08002589 }
Keith Packardb13f4e12008-11-21 01:49:39 -08002590
Eric Anholtd70d6052009-10-06 12:40:42 -07002591 for (i = 0; i < count; i++)
2592 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2593 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08002594}
2595
2596/**
Eric Anholt0e867312008-10-21 00:10:54 -07002597 * Return -1 if the batchbuffer should be flushed before attempting to
2598 * emit rendering referencing the buffers pointed to by bo_array.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002599 *
Eric Anholt0e867312008-10-21 00:10:54 -07002600 * This is required because if we try to emit a batchbuffer with relocations
2601 * to a tree of buffers that won't simultaneously fit in the aperture,
2602 * the rendering will return an error at a point where the software is not
2603 * prepared to recover from it.
2604 *
2605 * However, we also want to emit the batchbuffer significantly before we reach
2606 * the limit, as a series of batchbuffers each of which references buffers
2607 * covering almost all of the aperture means that at each emit we end up
2608 * waiting to evict a buffer from the last rendering, and we get synchronous
2609 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2610 * get better parallelism.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002611 */
2612static int
Eric Anholt4b982642008-10-30 09:33:07 -07002613drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002614{
Eric Anholtd70d6052009-10-06 12:40:42 -07002615 drm_intel_bufmgr_gem *bufmgr_gem =
2616 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2617 unsigned int total = 0;
2618 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2619 int total_fences;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002620
Eric Anholtd70d6052009-10-06 12:40:42 -07002621 /* Check for fence reg constraints if necessary */
2622 if (bufmgr_gem->available_fences) {
2623 total_fences = drm_intel_gem_total_fences(bo_array, count);
2624 if (total_fences > bufmgr_gem->available_fences)
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002625 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07002626 }
Eric Anholt0e867312008-10-21 00:10:54 -07002627
Eric Anholtd70d6052009-10-06 12:40:42 -07002628 total = drm_intel_gem_estimate_batch_space(bo_array, count);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002629
Eric Anholtd70d6052009-10-06 12:40:42 -07002630 if (total > threshold)
2631 total = drm_intel_gem_compute_batch_space(bo_array, count);
Eric Anholt0e867312008-10-21 00:10:54 -07002632
Eric Anholtd70d6052009-10-06 12:40:42 -07002633 if (total > threshold) {
2634 DBG("check_space: overflowed available aperture, "
2635 "%dkb vs %dkb\n",
2636 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002637 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07002638 } else {
2639 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2640 (int)bufmgr_gem->gtt_size / 1024);
2641 return 0;
2642 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07002643}
2644
Keith Packard5b5ce302009-05-11 13:42:12 -07002645/*
2646 * Disable buffer reuse for objects which are shared with the kernel
2647 * as scanout buffers
2648 */
2649static int
2650drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2651{
Eric Anholtd70d6052009-10-06 12:40:42 -07002652 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Keith Packard5b5ce302009-05-11 13:42:12 -07002653
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002654 bo_gem->reusable = false;
Eric Anholtd70d6052009-10-06 12:40:42 -07002655 return 0;
Keith Packard5b5ce302009-05-11 13:42:12 -07002656}
2657
Eric Anholt769b1052009-10-01 19:09:26 -07002658static int
Chris Wilson07e75892010-05-11 08:54:06 +01002659drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2660{
2661 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2662
2663 return bo_gem->reusable;
2664}
2665
2666static int
Eric Anholt66d27142009-10-20 13:20:55 -07002667_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
Eric Anholt769b1052009-10-01 19:09:26 -07002668{
Eric Anholtd70d6052009-10-06 12:40:42 -07002669 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2670 int i;
Eric Anholt769b1052009-10-01 19:09:26 -07002671
Eric Anholtd70d6052009-10-06 12:40:42 -07002672 for (i = 0; i < bo_gem->reloc_count; i++) {
Jesse Barnesb5096402009-09-15 11:02:58 -07002673 if (bo_gem->reloc_target_info[i].bo == target_bo)
Eric Anholtd70d6052009-10-06 12:40:42 -07002674 return 1;
Eric Anholt4f7704a2010-06-10 08:58:08 -07002675 if (bo == bo_gem->reloc_target_info[i].bo)
2676 continue;
Jesse Barnesb5096402009-09-15 11:02:58 -07002677 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
Eric Anholtd70d6052009-10-06 12:40:42 -07002678 target_bo))
2679 return 1;
2680 }
2681
Eric Anholt769b1052009-10-01 19:09:26 -07002682 return 0;
Eric Anholt769b1052009-10-01 19:09:26 -07002683}
2684
Eric Anholt66d27142009-10-20 13:20:55 -07002685/** Return true if target_bo is referenced by bo's relocation tree. */
2686static int
2687drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2688{
2689 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2690
2691 if (bo == NULL || target_bo == NULL)
2692 return 0;
2693 if (target_bo_gem->used_as_reloc_target)
2694 return _drm_intel_gem_bo_references(bo, target_bo);
2695 return 0;
2696}
2697
Eric Anholt0ec768e2010-06-04 17:09:11 -07002698static void
2699add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2700{
2701 unsigned int i = bufmgr_gem->num_buckets;
2702
2703 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2704
2705 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2706 bufmgr_gem->cache_bucket[i].size = size;
2707 bufmgr_gem->num_buckets++;
2708}
2709
2710static void
2711init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2712{
2713 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2714
2715 /* OK, so power of two buckets was too wasteful of memory.
2716 * Give 3 other sizes between each power of two, to hopefully
2717 * cover things accurately enough. (The alternative is
2718 * probably to just go for exact matching of sizes, and assume
2719 * that for things like composited window resize the tiled
2720 * width/height alignment and rounding of sizes to pages will
2721 * get us useful cache hit rates anyway)
2722 */
2723 add_bucket(bufmgr_gem, 4096);
2724 add_bucket(bufmgr_gem, 4096 * 2);
2725 add_bucket(bufmgr_gem, 4096 * 3);
2726
2727 /* Initialize the linked lists for BO reuse cache. */
2728 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2729 add_bucket(bufmgr_gem, size);
2730
2731 add_bucket(bufmgr_gem, size + size * 1 / 4);
2732 add_bucket(bufmgr_gem, size + size * 2 / 4);
2733 add_bucket(bufmgr_gem, size + size * 3 / 4);
2734 }
2735}
2736
Chris Wilsone4b60f22011-12-05 21:29:05 +00002737void
2738drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2739{
2740 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2741
2742 bufmgr_gem->vma_max = limit;
2743
2744 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2745}
2746
Eric Anholt769b1052009-10-01 19:09:26 -07002747/**
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002748 * Get the PCI ID for the device. This can be overridden by setting the
2749 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2750 */
2751static int
2752get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2753{
2754 char *devid_override;
2755 int devid;
2756 int ret;
2757 drm_i915_getparam_t gp;
2758
2759 if (geteuid() == getuid()) {
2760 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2761 if (devid_override) {
2762 bufmgr_gem->no_exec = true;
2763 return strtod(devid_override, NULL);
2764 }
2765 }
2766
Eric Anholt5de5b742012-03-13 16:49:53 -07002767 VG_CLEAR(devid);
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002768 VG_CLEAR(gp);
2769 gp.param = I915_PARAM_CHIPSET_ID;
2770 gp.value = &devid;
2771 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2772 if (ret) {
2773 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2774 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2775 }
2776 return devid;
2777}
2778
2779int
2780drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2781{
2782 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2783
2784 return bufmgr_gem->pci_device;
2785}
2786
2787/**
Eric Anholt4db16a92011-10-11 15:59:03 -07002788 * Sets up AUB dumping.
2789 *
2790 * This is a trace file format that can be used with the simulator.
2791 * Packets are emitted in a format somewhat like GPU command packets.
2792 * You can set up a GTT and upload your objects into the referenced
2793 * space, then send off batchbuffers and get BMPs out the other end.
2794 */
2795void
2796drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2797{
2798 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2799 int entry = 0x200003;
2800 int i;
2801 int gtt_size = 0x10000;
2802
2803 if (!enable) {
2804 if (bufmgr_gem->aub_file) {
2805 fclose(bufmgr_gem->aub_file);
2806 bufmgr_gem->aub_file = NULL;
2807 }
2808 }
2809
2810 if (geteuid() != getuid())
2811 return;
2812
2813 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2814 if (!bufmgr_gem->aub_file)
2815 return;
2816
2817 /* Start allocating objects from just after the GTT. */
2818 bufmgr_gem->aub_offset = gtt_size;
2819
2820 /* Start with a (required) version packet. */
2821 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2822 aub_out(bufmgr_gem,
2823 (4 << AUB_HEADER_MAJOR_SHIFT) |
2824 (0 << AUB_HEADER_MINOR_SHIFT));
2825 for (i = 0; i < 8; i++) {
2826 aub_out(bufmgr_gem, 0); /* app name */
2827 }
2828 aub_out(bufmgr_gem, 0); /* timestamp */
2829 aub_out(bufmgr_gem, 0); /* timestamp */
2830 aub_out(bufmgr_gem, 0); /* comment len */
2831
2832 /* Set up the GTT. The max we can handle is 256M */
2833 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2834 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2835 aub_out(bufmgr_gem, 0); /* subtype */
2836 aub_out(bufmgr_gem, 0); /* offset */
2837 aub_out(bufmgr_gem, gtt_size); /* size */
2838 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2839 aub_out(bufmgr_gem, entry);
2840 }
2841}
2842
Ben Widawskyf7210fa2012-01-13 11:31:52 -08002843drm_intel_context *
2844drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
2845{
2846 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2847 struct drm_i915_gem_context_create create;
2848 drm_i915_getparam_t gp;
2849 drm_intel_context *context = NULL;
2850 int tmp = 0, ret;
2851
2852 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2853 if (ret != 0) {
Kenneth Graunke992e2af2012-07-12 13:41:11 -07002854 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2855 strerror(errno));
Ben Widawskyf7210fa2012-01-13 11:31:52 -08002856 return NULL;
2857 }
2858
2859 context = calloc(1, sizeof(*context));
2860 context->ctx_id = create.ctx_id;
2861 context->bufmgr = bufmgr;
2862
2863 return context;
2864}
2865
2866void
2867drm_intel_gem_context_destroy(drm_intel_context *ctx)
2868{
2869 drm_intel_bufmgr_gem *bufmgr_gem;
2870 struct drm_i915_gem_context_destroy destroy;
2871 int ret;
2872
2873 if (ctx == NULL)
2874 return;
2875
2876 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
2877 destroy.ctx_id = ctx->ctx_id;
2878 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
2879 &destroy);
2880 if (ret != 0)
2881 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2882 strerror(errno));
2883
2884 free(ctx);
2885}
2886
2887
Eric Anholt4db16a92011-10-11 15:59:03 -07002888/**
Paul Berryda02f722012-05-04 12:41:00 -07002889 * Annotate the given bo for use in aub dumping.
2890 *
2891 * \param annotations is an array of drm_intel_aub_annotation objects
2892 * describing the type of data in various sections of the bo. Each
2893 * element of the array specifies the type and subtype of a section of
2894 * the bo, and the past-the-end offset of that section. The elements
2895 * of \c annotations must be sorted so that ending_offset is
2896 * increasing.
2897 *
2898 * \param count is the number of elements in the \c annotations array.
2899 * If \c count is zero, then \c annotations will not be dereferenced.
2900 *
2901 * Annotations are copied into a private data structure, so caller may
2902 * re-use the memory pointed to by \c annotations after the call
2903 * returns.
2904 *
2905 * Annotations are stored for the lifetime of the bo; to reset to the
2906 * default state (no annotations), call this function with a \c count
2907 * of zero.
2908 */
2909void
2910drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
2911 drm_intel_aub_annotation *annotations,
2912 unsigned count)
2913{
2914 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2915 unsigned size = sizeof(*annotations) * count;
2916 drm_intel_aub_annotation *new_annotations =
2917 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
2918 if (new_annotations == NULL) {
2919 free(bo_gem->aub_annotations);
2920 bo_gem->aub_annotations = NULL;
2921 bo_gem->aub_annotation_count = 0;
2922 return;
2923 }
2924 memcpy(new_annotations, annotations, size);
2925 bo_gem->aub_annotations = new_annotations;
2926 bo_gem->aub_annotation_count = count;
2927}
2928
2929/**
Eric Anholt6a9eb082008-06-03 09:27:37 -07002930 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2931 * and manage map buffer objections.
2932 *
2933 * \param fd File descriptor of the opened DRM device.
2934 */
Eric Anholt4b982642008-10-30 09:33:07 -07002935drm_intel_bufmgr *
2936drm_intel_bufmgr_gem_init(int fd, int batch_size)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002937{
Eric Anholtd70d6052009-10-06 12:40:42 -07002938 drm_intel_bufmgr_gem *bufmgr_gem;
2939 struct drm_i915_gem_get_aperture aperture;
2940 drm_i915_getparam_t gp;
Daniel Vetter630dd262011-09-22 22:20:09 +02002941 int ret, tmp;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002942 bool exec2 = false;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002943
Eric Anholtd70d6052009-10-06 12:40:42 -07002944 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
Dave Airlie973d8d62010-02-02 10:57:12 +10002945 if (bufmgr_gem == NULL)
2946 return NULL;
2947
Eric Anholtd70d6052009-10-06 12:40:42 -07002948 bufmgr_gem->fd = fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002949
Eric Anholtd70d6052009-10-06 12:40:42 -07002950 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2951 free(bufmgr_gem);
2952 return NULL;
2953 }
Eric Anholt6df7b072008-06-12 23:22:26 -07002954
Chris Wilson62997222010-09-25 21:32:59 +01002955 ret = drmIoctl(bufmgr_gem->fd,
2956 DRM_IOCTL_I915_GEM_GET_APERTURE,
2957 &aperture);
Eric Anholt0e867312008-10-21 00:10:54 -07002958
Eric Anholtd70d6052009-10-06 12:40:42 -07002959 if (ret == 0)
2960 bufmgr_gem->gtt_size = aperture.aper_available_size;
2961 else {
2962 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2963 strerror(errno));
2964 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2965 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2966 "May lead to reduced performance or incorrect "
2967 "rendering.\n",
2968 (int)bufmgr_gem->gtt_size / 1024);
2969 }
Eric Anholt0e867312008-10-21 00:10:54 -07002970
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002971 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002972
Eric Anholt078bc5b2011-12-20 13:10:36 -08002973 if (IS_GEN2(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002974 bufmgr_gem->gen = 2;
Eric Anholt078bc5b2011-12-20 13:10:36 -08002975 else if (IS_GEN3(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002976 bufmgr_gem->gen = 3;
Eric Anholt078bc5b2011-12-20 13:10:36 -08002977 else if (IS_GEN4(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002978 bufmgr_gem->gen = 4;
Chad Versace592ac672012-01-27 10:02:16 -08002979 else if (IS_GEN5(bufmgr_gem->pci_device))
2980 bufmgr_gem->gen = 5;
2981 else if (IS_GEN6(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08002982 bufmgr_gem->gen = 6;
Chad Versace592ac672012-01-27 10:02:16 -08002983 else if (IS_GEN7(bufmgr_gem->pci_device))
2984 bufmgr_gem->gen = 7;
2985 else
2986 assert(0);
Eric Anholta1f9ea72010-03-02 08:49:36 -08002987
Eric Anholt078bc5b2011-12-20 13:10:36 -08002988 if (IS_GEN3(bufmgr_gem->pci_device) &&
2989 bufmgr_gem->gtt_size > 256*1024*1024) {
Daniel Vetter36cff1c2011-12-04 12:51:45 +01002990 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2991 * be used for tiled blits. To simplify the accounting, just
2992 * substract the unmappable part (fixed to 256MB on all known
2993 * gen3 devices) if the kernel advertises it. */
2994 bufmgr_gem->gtt_size -= 256*1024*1024;
2995 }
2996
Eric Anholt5de5b742012-03-13 16:49:53 -07002997 VG_CLEAR(gp);
Daniel Vetter630dd262011-09-22 22:20:09 +02002998 gp.value = &tmp;
2999
Jesse Barnesb5096402009-09-15 11:02:58 -07003000 gp.param = I915_PARAM_HAS_EXECBUF2;
Chris Wilson62997222010-09-25 21:32:59 +01003001 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Jesse Barnesb5096402009-09-15 11:02:58 -07003002 if (!ret)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07003003 exec2 = true;
Jesse Barnesb5096402009-09-15 11:02:58 -07003004
Zou Nan hai66375fd2010-06-02 10:07:37 +08003005 gp.param = I915_PARAM_HAS_BSD;
Chris Wilson62997222010-09-25 21:32:59 +01003006 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Chris Wilson057fab32010-10-26 11:35:11 +01003007 bufmgr_gem->has_bsd = ret == 0;
3008
3009 gp.param = I915_PARAM_HAS_BLT;
3010 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3011 bufmgr_gem->has_blt = ret == 0;
Zou Nan hai66375fd2010-06-02 10:07:37 +08003012
Chris Wilson36245772010-10-29 10:49:54 +01003013 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3014 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3015 bufmgr_gem->has_relaxed_fencing = ret == 0;
3016
Ben Widawsky971c0802012-06-05 11:30:48 -07003017 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3018 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3019 bufmgr_gem->has_wait_timeout = ret == 0;
3020
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -02003021 gp.param = I915_PARAM_HAS_LLC;
3022 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Eric Anholt3a888482012-02-27 17:26:05 -08003023 if (ret != 0) {
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -02003024 /* Kernel does not supports HAS_LLC query, fallback to GPU
3025 * generation detection and assume that we have LLC on GEN6/7
3026 */
3027 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3028 IS_GEN7(bufmgr_gem->pci_device));
3029 } else
3030 bufmgr_gem->has_llc = ret == 0;
3031
Eric Anholta1f9ea72010-03-02 08:49:36 -08003032 if (bufmgr_gem->gen < 4) {
Eric Anholtd70d6052009-10-06 12:40:42 -07003033 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3034 gp.value = &bufmgr_gem->available_fences;
Chris Wilson62997222010-09-25 21:32:59 +01003035 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Eric Anholtd70d6052009-10-06 12:40:42 -07003036 if (ret) {
3037 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3038 errno);
3039 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3040 *gp.value);
3041 bufmgr_gem->available_fences = 0;
Chris Wilsonfdcde592010-02-09 08:32:54 +00003042 } else {
3043 /* XXX The kernel reports the total number of fences,
3044 * including any that may be pinned.
3045 *
3046 * We presume that there will be at least one pinned
3047 * fence for the scanout buffer, but there may be more
3048 * than one scanout and the user may be manually
3049 * pinning buffers. Let's move to execbuffer2 and
3050 * thereby forget the insanity of using fences...
3051 */
3052 bufmgr_gem->available_fences -= 2;
3053 if (bufmgr_gem->available_fences < 0)
3054 bufmgr_gem->available_fences = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07003055 }
3056 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07003057
Eric Anholtd70d6052009-10-06 12:40:42 -07003058 /* Let's go with one relocation per every 2 dwords (but round down a bit
3059 * since a power of two will mean an extra page allocation for the reloc
3060 * buffer).
3061 *
3062 * Every 4 was too few for the blender benchmark.
3063 */
3064 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
Eric Anholt769b1052009-10-01 19:09:26 -07003065
Eric Anholtd70d6052009-10-06 12:40:42 -07003066 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3067 bufmgr_gem->bufmgr.bo_alloc_for_render =
3068 drm_intel_gem_bo_alloc_for_render;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07003069 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07003070 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3071 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3072 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3073 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3074 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3075 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3076 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3077 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
Jesse Barnesb5096402009-09-15 11:02:58 -07003078 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
Eric Anholtd70d6052009-10-06 12:40:42 -07003079 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3080 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3081 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3082 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3083 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
Jesse Barnesb5096402009-09-15 11:02:58 -07003084 /* Use the new one if available */
Zou Nan hai66375fd2010-06-02 10:07:37 +08003085 if (exec2) {
Jesse Barnesb5096402009-09-15 11:02:58 -07003086 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
Albert Damen49447a92010-11-07 15:54:32 +01003087 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
Zou Nan hai66375fd2010-06-02 10:07:37 +08003088 } else
Jesse Barnesb5096402009-09-15 11:02:58 -07003089 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
Eric Anholtd70d6052009-10-06 12:40:42 -07003090 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
Chris Wilson83a35b62009-11-11 13:04:38 +00003091 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
Eric Anholtd70d6052009-10-06 12:40:42 -07003092 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
3093 bufmgr_gem->bufmgr.debug = 0;
3094 bufmgr_gem->bufmgr.check_aperture_space =
3095 drm_intel_gem_check_aperture_space;
3096 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
Chris Wilson07e75892010-05-11 08:54:06 +01003097 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
Eric Anholtd70d6052009-10-06 12:40:42 -07003098 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3099 drm_intel_gem_get_pipe_from_crtc_id;
3100 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
Eric Anholt6a9eb082008-06-03 09:27:37 -07003101
Chris Wilson36d49392011-02-14 09:39:06 +00003102 DRMINITLISTHEAD(&bufmgr_gem->named);
Eric Anholt0ec768e2010-06-04 17:09:11 -07003103 init_cache_buckets(bufmgr_gem);
Eric Anholtd70d6052009-10-06 12:40:42 -07003104
Chris Wilsone4b60f22011-12-05 21:29:05 +00003105 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3106 bufmgr_gem->vma_max = -1; /* unlimited by default */
3107
Eric Anholtd70d6052009-10-06 12:40:42 -07003108 return &bufmgr_gem->bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07003109}