blob: 71f140f54dd0e611626124484c2b1214234492ba [file] [log] [blame]
Eric Anholt6a9eb082008-06-03 09:27:37 -07001/**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
Eric Anholtc9ce2ed2012-03-09 16:08:23 -08004 * Copyright © 2007-2012 Intel Corporation
Eric Anholt6a9eb082008-06-03 09:27:37 -07005 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
Eric Anholt368b3922008-09-10 13:54:34 -070037#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
Eric Anholt6a9eb082008-06-03 09:27:37 -070041#include <xf86drm.h>
Pauli Nieminen21105bc2010-03-10 13:35:59 +020042#include <xf86atomic.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080043#include <fcntl.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070044#include <stdio.h>
45#include <stdlib.h>
46#include <string.h>
47#include <unistd.h>
48#include <assert.h>
Eric Anholt6df7b072008-06-12 23:22:26 -070049#include <pthread.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070050#include <sys/ioctl.h>
Jesse Barnes276c07d2008-11-13 13:52:04 -080051#include <sys/stat.h>
52#include <sys/types.h>
Eric Anholt2c2bdb32011-10-21 16:53:16 -070053#include <stdbool.h>
Eric Anholt6a9eb082008-06-03 09:27:37 -070054
55#include "errno.h"
David Shao7d42b492012-11-10 00:24:56 -050056#ifndef ETIME
57#define ETIME ETIMEDOUT
58#endif
Emil Velikov42465fe2015-04-05 15:51:59 +010059#include "libdrm_macros.h"
Eric Anholt72abe982009-02-18 13:06:35 -080060#include "libdrm_lists.h"
Eric Anholtc4857422008-06-03 10:20:49 -070061#include "intel_bufmgr.h"
Eric Anholt738e36a2008-09-05 10:35:32 +010062#include "intel_bufmgr_priv.h"
Eric Anholtcbdd6272009-01-27 17:16:11 -080063#include "intel_chipset.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070064#include "string.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070065
66#include "i915_drm.h"
Chris Wilson9e24d0c2016-09-22 14:44:50 +010067#include "uthash.h"
Eric Anholt6a9eb082008-06-03 09:27:37 -070068
Chris Wilson90b23cc2012-02-09 10:23:10 +000069#ifdef HAVE_VALGRIND
70#include <valgrind.h>
71#include <memcheck.h>
72#define VG(x) x
73#else
74#define VG(x)
75#endif
76
Daniel Vettereb7a5b62015-02-11 11:59:52 +010077#define memclear(s) memset(&s, 0, sizeof(s))
Chris Wilson90b23cc2012-02-09 10:23:10 +000078
Eric Anholt6a9eb082008-06-03 09:27:37 -070079#define DBG(...) do { \
Eric Anholtd70d6052009-10-06 12:40:42 -070080 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
Eric Anholt6a9eb082008-06-03 09:27:37 -070082} while (0)
83
Eric Anholt0ec768e2010-06-04 17:09:11 -070084#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
Anuj Phogat5c68f9f2015-04-10 17:20:55 -070085#define MAX2(A, B) ((A) > (B) ? (A) : (B))
Eric Anholt0ec768e2010-06-04 17:09:11 -070086
Michel Thierry3350add2015-09-03 15:23:58 +010087/**
88 * upper_32_bits - return bits 32-63 of a number
89 * @n: the number we're accessing
90 *
91 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
92 * the "right shift count >= width of type" warning when that quantity is
93 * 32-bits.
94 */
95#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
96
97/**
98 * lower_32_bits - return bits 0-31 of a number
99 * @n: the number we're accessing
100 */
101#define lower_32_bits(n) ((__u32)(n))
102
Eric Anholt4b982642008-10-30 09:33:07 -0700103typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
Keith Packarda919ff52008-06-05 15:58:09 -0700104
Eric Anholt4b982642008-10-30 09:33:07 -0700105struct drm_intel_gem_bo_bucket {
Eric Anholtd70d6052009-10-06 12:40:42 -0700106 drmMMListHead head;
107 unsigned long size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700108};
109
Eric Anholt4b982642008-10-30 09:33:07 -0700110typedef struct _drm_intel_bufmgr_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -0700111 drm_intel_bufmgr bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700112
Lionel Landwerlin743af592014-09-12 13:48:36 +0100113 atomic_t refcount;
114
Eric Anholtd70d6052009-10-06 12:40:42 -0700115 int fd;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700116
Eric Anholtd70d6052009-10-06 12:40:42 -0700117 int max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700118
Eric Anholtd70d6052009-10-06 12:40:42 -0700119 pthread_mutex_t lock;
Eric Anholt6df7b072008-06-12 23:22:26 -0700120
Eric Anholtd70d6052009-10-06 12:40:42 -0700121 struct drm_i915_gem_exec_object *exec_objects;
Jesse Barnesb5096402009-09-15 11:02:58 -0700122 struct drm_i915_gem_exec_object2 *exec2_objects;
Eric Anholtd70d6052009-10-06 12:40:42 -0700123 drm_intel_bo **exec_bos;
124 int exec_size;
125 int exec_count;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700126
Eric Anholtd70d6052009-10-06 12:40:42 -0700127 /** Array of lists of cached gem objects of power-of-two sizes */
Eric Anholt0ec768e2010-06-04 17:09:11 -0700128 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
129 int num_buckets;
Chris Wilsonf16b4162010-06-21 15:21:48 +0100130 time_t time;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700131
Lionel Landwerlin743af592014-09-12 13:48:36 +0100132 drmMMListHead managers;
133
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100134 drm_intel_bo_gem *name_table;
135 drm_intel_bo_gem *handle_table;
136
Chris Wilsone4b60f22011-12-05 21:29:05 +0000137 drmMMListHead vma_cache;
Chris Wilsondd9a5b42011-12-06 13:12:37 +0000138 int vma_count, vma_open, vma_max;
Chris Wilson36d49392011-02-14 09:39:06 +0000139
Eric Anholtd70d6052009-10-06 12:40:42 -0700140 uint64_t gtt_size;
141 int available_fences;
142 int pci_device;
Eric Anholta1f9ea72010-03-02 08:49:36 -0800143 int gen;
Chris Wilson36245772010-10-29 10:49:54 +0100144 unsigned int has_bsd : 1;
145 unsigned int has_blt : 1;
146 unsigned int has_relaxed_fencing : 1;
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -0200147 unsigned int has_llc : 1;
Ben Widawsky971c0802012-06-05 11:30:48 -0700148 unsigned int has_wait_timeout : 1;
Chris Wilson36245772010-10-29 10:49:54 +0100149 unsigned int bo_reuse : 1;
Kenneth Graunke6e642db2011-10-11 14:38:34 -0700150 unsigned int no_exec : 1;
Xiang, Haihao01199992012-11-14 12:46:39 +0800151 unsigned int has_vebox : 1;
Chris Wilson1bd35da2016-08-20 18:36:42 +0100152 unsigned int has_exec_async : 1;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700153 bool fenced_relocs;
Eric Anholt4db16a92011-10-11 15:59:03 -0700154
Tvrtko Ursulin30921482015-04-17 11:57:28 +0100155 struct {
156 void *ptr;
157 uint32_t handle;
158 } userptr_active;
159
Eric Anholt4b982642008-10-30 09:33:07 -0700160} drm_intel_bufmgr_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700161
Jesse Barnesb5096402009-09-15 11:02:58 -0700162#define DRM_INTEL_RELOC_FENCE (1<<0)
163
164typedef struct _drm_intel_reloc_target_info {
165 drm_intel_bo *bo;
166 int flags;
167} drm_intel_reloc_target;
168
Eric Anholt4b982642008-10-30 09:33:07 -0700169struct _drm_intel_bo_gem {
Eric Anholtd70d6052009-10-06 12:40:42 -0700170 drm_intel_bo bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700171
Eric Anholtd70d6052009-10-06 12:40:42 -0700172 atomic_t refcount;
173 uint32_t gem_handle;
174 const char *name;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700175
Eric Anholtd70d6052009-10-06 12:40:42 -0700176 /**
177 * Kenel-assigned global name for this object
Keith Packardc3d96892013-11-22 05:31:01 -0800178 *
179 * List contains both flink named and prime fd'd objects
Eric Anholtd70d6052009-10-06 12:40:42 -0700180 */
181 unsigned int global_name;
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100182
183 UT_hash_handle handle_hh;
184 UT_hash_handle name_hh;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700185
Eric Anholtd70d6052009-10-06 12:40:42 -0700186 /**
187 * Index of the buffer within the validation list while preparing a
188 * batchbuffer execution.
189 */
190 int validate_index;
Keith Packard18f091d2008-12-15 15:08:12 -0800191
Eric Anholtd70d6052009-10-06 12:40:42 -0700192 /**
193 * Current tiling mode
194 */
195 uint32_t tiling_mode;
196 uint32_t swizzle_mode;
Chris Wilson056aa9b2010-06-21 14:31:29 +0100197 unsigned long stride;
Eric Anholt3f3c5be2009-07-09 17:49:46 -0700198
Chris Wilson1bd35da2016-08-20 18:36:42 +0100199 unsigned long kflags;
200
Eric Anholtd70d6052009-10-06 12:40:42 -0700201 time_t free_time;
Keith Packard329e0862008-06-05 16:05:35 -0700202
Eric Anholtd70d6052009-10-06 12:40:42 -0700203 /** Array passed to the DRM containing relocation information. */
204 struct drm_i915_gem_relocation_entry *relocs;
Jesse Barnesb5096402009-09-15 11:02:58 -0700205 /**
206 * Array of info structs corresponding to relocs[i].target_handle etc
207 */
208 drm_intel_reloc_target *reloc_target_info;
Eric Anholtd70d6052009-10-06 12:40:42 -0700209 /** Number of entries in relocs */
210 int reloc_count;
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200211 /** Array of BOs that are referenced by this buffer and will be softpinned */
212 drm_intel_bo **softpin_target;
213 /** Number softpinned BOs that are referenced by this buffer */
214 int softpin_target_count;
215 /** Maximum amount of softpinned BOs that are referenced by this buffer */
216 int softpin_target_size;
217
Eric Anholtd70d6052009-10-06 12:40:42 -0700218 /** Mapped address for the buffer, saved across map/unmap cycles */
219 void *mem_virtual;
220 /** GTT virtual address for the buffer, saved across map/unmap cycles */
221 void *gtt_virtual;
Chris Wilson455e9b42015-05-01 13:39:55 +0100222 /** WC CPU address for the buffer, saved across map/unmap cycles */
223 void *wc_virtual;
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100224 /**
225 * Virtual address of the buffer allocated by user, used for userptr
226 * objects only.
227 */
228 void *user_virtual;
Chris Wilsonc549a772011-12-05 10:14:34 +0000229 int map_count;
Chris Wilsone4b60f22011-12-05 21:29:05 +0000230 drmMMListHead vma_list;
Eric Anholt0e867312008-10-21 00:10:54 -0700231
Eric Anholtd70d6052009-10-06 12:40:42 -0700232 /** BO cache list */
233 drmMMListHead head;
Eric Anholt0e867312008-10-21 00:10:54 -0700234
Eric Anholtd70d6052009-10-06 12:40:42 -0700235 /**
236 * Boolean of whether this BO and its children have been included in
237 * the current drm_intel_bufmgr_check_aperture_space() total.
238 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700239 bool included_in_check_aperture;
Eric Anholt0e867312008-10-21 00:10:54 -0700240
Eric Anholtd70d6052009-10-06 12:40:42 -0700241 /**
242 * Boolean of whether this buffer has been used as a relocation
243 * target and had its size accounted for, and thus can't have any
244 * further relocations added to it.
245 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700246 bool used_as_reloc_target;
Keith Packard5b5ce302009-05-11 13:42:12 -0700247
Eric Anholtd70d6052009-10-06 12:40:42 -0700248 /**
Chris Wilson792fed12009-12-02 13:12:39 +0000249 * Boolean of whether we have encountered an error whilst building the relocation tree.
250 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700251 bool has_error;
Chris Wilson792fed12009-12-02 13:12:39 +0000252
253 /**
Eric Anholtd70d6052009-10-06 12:40:42 -0700254 * Boolean of whether this buffer can be re-used
255 */
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700256 bool reusable;
Eric Anholtd70d6052009-10-06 12:40:42 -0700257
258 /**
Eric Anholt02f93c22014-01-15 00:38:39 -0800259 * Boolean of whether the GPU is definitely not accessing the buffer.
260 *
261 * This is only valid when reusable, since non-reusable
Grazvydas Ignotas1924b672016-11-20 20:25:46 +0200262 * buffers are those that have been shared with other
Eric Anholt02f93c22014-01-15 00:38:39 -0800263 * processes, so we don't know their state.
264 */
265 bool idle;
266
267 /**
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100268 * Boolean of whether this buffer was allocated with userptr
269 */
270 bool is_userptr;
271
272 /**
Eric Anholtd70d6052009-10-06 12:40:42 -0700273 * Size in bytes of this buffer and its relocation descendents.
274 *
275 * Used to avoid costly tree walking in
276 * drm_intel_bufmgr_check_aperture in the common case.
277 */
278 int reloc_tree_size;
279
280 /**
281 * Number of potential fence registers required by this buffer and its
282 * relocations.
283 */
284 int reloc_tree_fences;
Eric Anholt4cb01ee2011-10-28 13:12:16 -0700285
Grazvydas Ignotas1924b672016-11-20 20:25:46 +0200286 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
Eric Anholt4cb01ee2011-10-28 13:12:16 -0700287 bool mapped_cpu_write;
Keith Packarda919ff52008-06-05 15:58:09 -0700288};
Eric Anholt6a9eb082008-06-03 09:27:37 -0700289
Keith Packardb13f4e12008-11-21 01:49:39 -0800290static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700291drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800292
293static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -0700294drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
Keith Packardb13f4e12008-11-21 01:49:39 -0800295
Eric Anholt6a9eb082008-06-03 09:27:37 -0700296static int
Eric Anholtd70d6052009-10-06 12:40:42 -0700297drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
298 uint32_t * swizzle_mode);
Keith Packard18f091d2008-12-15 15:08:12 -0800299
300static int
Chris Wilson1db22ff2010-06-21 14:27:23 +0100301drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
302 uint32_t tiling_mode,
303 uint32_t stride);
Keith Packard18f091d2008-12-15 15:08:12 -0800304
Eric Anholt0d7ad7e2009-10-20 14:19:38 -0700305static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
306 time_t time);
Chris Wilson04495ee2009-10-02 04:39:22 +0100307
Eric Anholtd70d6052009-10-06 12:40:42 -0700308static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
Keith Packard18f091d2008-12-15 15:08:12 -0800309
Eric Anholtd70d6052009-10-06 12:40:42 -0700310static void drm_intel_gem_bo_free(drm_intel_bo *bo);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100311
Emil Velikov0ec7f442015-08-31 20:38:54 +0100312static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
313{
314 return (drm_intel_bo_gem *)bo;
315}
316
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700317static unsigned long
318drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
319 uint32_t *tiling_mode)
320{
321 unsigned long min_size, max_size;
322 unsigned long i;
323
324 if (*tiling_mode == I915_TILING_NONE)
325 return size;
326
327 /* 965+ just need multiples of page size for tiling */
Eric Anholta1f9ea72010-03-02 08:49:36 -0800328 if (bufmgr_gem->gen >= 4)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700329 return ROUND_UP_TO(size, 4096);
330
331 /* Older chips need powers of two, of at least 512k or 1M */
Eric Anholtacbaff22010-03-02 15:24:50 -0800332 if (bufmgr_gem->gen == 3) {
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700333 min_size = 1024*1024;
334 max_size = 128*1024*1024;
335 } else {
336 min_size = 512*1024;
337 max_size = 64*1024*1024;
338 }
339
340 if (size > max_size) {
341 *tiling_mode = I915_TILING_NONE;
342 return size;
343 }
344
Chris Wilson36245772010-10-29 10:49:54 +0100345 /* Do we need to allocate every page for the fence? */
346 if (bufmgr_gem->has_relaxed_fencing)
347 return ROUND_UP_TO(size, 4096);
348
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700349 for (i = min_size; i < size; i <<= 1)
350 ;
351
352 return i;
353}
354
355/*
356 * Round a given pitch up to the minimum required for X tiling on a
357 * given chip. We use 512 as the minimum to allow for a later tiling
358 * change.
359 */
360static unsigned long
361drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
Chris Wilson726210f2010-06-24 11:38:00 +0100362 unsigned long pitch, uint32_t *tiling_mode)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700363{
Eric Anholt1d4d1e62010-03-04 16:09:40 -0800364 unsigned long tile_width;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700365 unsigned long i;
366
Eric Anholt7c697b12010-03-17 10:05:55 -0700367 /* If untiled, then just align it so that we can do rendering
368 * to it with the 3D engine.
369 */
Chris Wilson726210f2010-06-24 11:38:00 +0100370 if (*tiling_mode == I915_TILING_NONE)
Eric Anholt7c697b12010-03-17 10:05:55 -0700371 return ALIGN(pitch, 64);
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700372
Daniel Vetter194aa1b2011-09-22 22:20:53 +0200373 if (*tiling_mode == I915_TILING_X
Eric Anholt078bc5b2011-12-20 13:10:36 -0800374 || (IS_915(bufmgr_gem->pci_device)
375 && *tiling_mode == I915_TILING_Y))
Eric Anholt1d4d1e62010-03-04 16:09:40 -0800376 tile_width = 512;
377 else
378 tile_width = 128;
379
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700380 /* 965 is flexible */
Eric Anholta1f9ea72010-03-02 08:49:36 -0800381 if (bufmgr_gem->gen >= 4)
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700382 return ROUND_UP_TO(pitch, tile_width);
383
Chris Wilson726210f2010-06-24 11:38:00 +0100384 /* The older hardware has a maximum pitch of 8192 with tiled
385 * surfaces, so fallback to untiled if it's too large.
386 */
387 if (pitch > 8192) {
388 *tiling_mode = I915_TILING_NONE;
389 return ALIGN(pitch, 64);
390 }
391
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700392 /* Pre-965 needs power of two tile width */
393 for (i = tile_width; i < pitch; i <<= 1)
394 ;
395
396 return i;
397}
398
Eric Anholt4b982642008-10-30 09:33:07 -0700399static struct drm_intel_gem_bo_bucket *
400drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
401 unsigned long size)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700402{
Eric Anholtd70d6052009-10-06 12:40:42 -0700403 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700404
Eric Anholt0ec768e2010-06-04 17:09:11 -0700405 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -0700406 struct drm_intel_gem_bo_bucket *bucket =
407 &bufmgr_gem->cache_bucket[i];
408 if (bucket->size >= size) {
409 return bucket;
410 }
Eric Anholt78fa5902009-07-06 11:55:28 -0700411 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700412
Eric Anholtd70d6052009-10-06 12:40:42 -0700413 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700414}
415
Eric Anholtd70d6052009-10-06 12:40:42 -0700416static void
417drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700418{
Eric Anholtd70d6052009-10-06 12:40:42 -0700419 int i, j;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700420
Eric Anholtd70d6052009-10-06 12:40:42 -0700421 for (i = 0; i < bufmgr_gem->exec_count; i++) {
422 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
423 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700424
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200425 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
426 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
Chris Wilsone0f05b22017-01-28 16:32:23 +0000427 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
Eric Anholtd70d6052009-10-06 12:40:42 -0700428 bo_gem->name);
429 continue;
430 }
431
432 for (j = 0; j < bo_gem->reloc_count; j++) {
Jesse Barnesb5096402009-09-15 11:02:58 -0700433 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700434 drm_intel_bo_gem *target_gem =
435 (drm_intel_bo_gem *) target_bo;
436
Michał Winiarskib38a4b22015-12-15 16:28:55 +0100437 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
438 "%d (%s)@0x%08x %08x + 0x%08x\n",
Eric Anholtd70d6052009-10-06 12:40:42 -0700439 i,
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200440 bo_gem->gem_handle,
Chris Wilsone0f05b22017-01-28 16:32:23 +0000441 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200442 bo_gem->name,
Michał Winiarskib38a4b22015-12-15 16:28:55 +0100443 upper_32_bits(bo_gem->relocs[j].offset),
444 lower_32_bits(bo_gem->relocs[j].offset),
Eric Anholtd70d6052009-10-06 12:40:42 -0700445 target_gem->gem_handle,
446 target_gem->name,
Michał Winiarskib38a4b22015-12-15 16:28:55 +0100447 upper_32_bits(target_bo->offset64),
448 lower_32_bits(target_bo->offset64),
Eric Anholtd70d6052009-10-06 12:40:42 -0700449 bo_gem->relocs[j].delta);
450 }
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200451
452 for (j = 0; j < bo_gem->softpin_target_count; j++) {
453 drm_intel_bo *target_bo = bo_gem->softpin_target[j];
454 drm_intel_bo_gem *target_gem =
455 (drm_intel_bo_gem *) target_bo;
456 DBG("%2d: %d %s(%s) -> "
Michał Winiarskib38a4b22015-12-15 16:28:55 +0100457 "%d *(%s)@0x%08x %08x\n",
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200458 i,
459 bo_gem->gem_handle,
Chris Wilsone0f05b22017-01-28 16:32:23 +0000460 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200461 bo_gem->name,
462 target_gem->gem_handle,
463 target_gem->name,
Michał Winiarskib38a4b22015-12-15 16:28:55 +0100464 upper_32_bits(target_bo->offset64),
465 lower_32_bits(target_bo->offset64));
Michał Winiarski8b4d57e2015-09-09 16:07:10 +0200466 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700467 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700468}
469
Chris Wilson9fec2a82009-12-02 10:42:51 +0000470static inline void
Chris Wilson04495ee2009-10-02 04:39:22 +0100471drm_intel_gem_bo_reference(drm_intel_bo *bo)
472{
Eric Anholtd70d6052009-10-06 12:40:42 -0700473 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Chris Wilson04495ee2009-10-02 04:39:22 +0100474
Eric Anholtd70d6052009-10-06 12:40:42 -0700475 atomic_inc(&bo_gem->refcount);
Chris Wilson04495ee2009-10-02 04:39:22 +0100476}
477
Eric Anholt6a9eb082008-06-03 09:27:37 -0700478/**
479 * Adds the given buffer to the list of buffers to be validated (moved into the
480 * appropriate memory type) with the next batch submission.
481 *
482 * If a buffer is validated multiple times in a batch submission, it ends up
483 * with the intersection of the memory type flags and the union of the
484 * access flags.
485 */
486static void
Eric Anholt4b982642008-10-30 09:33:07 -0700487drm_intel_add_validate_buffer(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700488{
Eric Anholtd70d6052009-10-06 12:40:42 -0700489 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
490 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
491 int index;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700492
Eric Anholtd70d6052009-10-06 12:40:42 -0700493 if (bo_gem->validate_index != -1)
494 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700495
Eric Anholtd70d6052009-10-06 12:40:42 -0700496 /* Extend the array of validation entries as necessary. */
497 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
498 int new_size = bufmgr_gem->exec_size * 2;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700499
Eric Anholtd70d6052009-10-06 12:40:42 -0700500 if (new_size == 0)
501 new_size = 5;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700502
Eric Anholtd70d6052009-10-06 12:40:42 -0700503 bufmgr_gem->exec_objects =
504 realloc(bufmgr_gem->exec_objects,
505 sizeof(*bufmgr_gem->exec_objects) * new_size);
506 bufmgr_gem->exec_bos =
507 realloc(bufmgr_gem->exec_bos,
508 sizeof(*bufmgr_gem->exec_bos) * new_size);
509 bufmgr_gem->exec_size = new_size;
510 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700511
Eric Anholtd70d6052009-10-06 12:40:42 -0700512 index = bufmgr_gem->exec_count;
513 bo_gem->validate_index = index;
514 /* Fill in array entry */
515 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
516 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
517 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
Anuj Phogat5ba34e12015-04-10 17:20:56 -0700518 bufmgr_gem->exec_objects[index].alignment = bo->align;
Eric Anholtd70d6052009-10-06 12:40:42 -0700519 bufmgr_gem->exec_objects[index].offset = 0;
520 bufmgr_gem->exec_bos[index] = bo;
Eric Anholtd70d6052009-10-06 12:40:42 -0700521 bufmgr_gem->exec_count++;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700522}
523
Jesse Barnesb5096402009-09-15 11:02:58 -0700524static void
525drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
526{
527 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
528 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
529 int index;
Chris Wilsone0f05b22017-01-28 16:32:23 +0000530 unsigned long flags;
Michel Thierry3350add2015-09-03 15:23:58 +0100531
Chris Wilsone0f05b22017-01-28 16:32:23 +0000532 flags = 0;
Michel Thierry3350add2015-09-03 15:23:58 +0100533 if (need_fence)
534 flags |= EXEC_OBJECT_NEEDS_FENCE;
Jesse Barnesb5096402009-09-15 11:02:58 -0700535
Eric Anholt47102862010-03-03 10:07:27 -0800536 if (bo_gem->validate_index != -1) {
Michel Thierry3350add2015-09-03 15:23:58 +0100537 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
Jesse Barnesb5096402009-09-15 11:02:58 -0700538 return;
Eric Anholt47102862010-03-03 10:07:27 -0800539 }
Jesse Barnesb5096402009-09-15 11:02:58 -0700540
541 /* Extend the array of validation entries as necessary. */
542 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
543 int new_size = bufmgr_gem->exec_size * 2;
544
545 if (new_size == 0)
546 new_size = 5;
547
548 bufmgr_gem->exec2_objects =
549 realloc(bufmgr_gem->exec2_objects,
550 sizeof(*bufmgr_gem->exec2_objects) * new_size);
551 bufmgr_gem->exec_bos =
552 realloc(bufmgr_gem->exec_bos,
553 sizeof(*bufmgr_gem->exec_bos) * new_size);
554 bufmgr_gem->exec_size = new_size;
555 }
556
557 index = bufmgr_gem->exec_count;
558 bo_gem->validate_index = index;
559 /* Fill in array entry */
560 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
561 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
562 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
Anuj Phogat5ba34e12015-04-10 17:20:56 -0700563 bufmgr_gem->exec2_objects[index].alignment = bo->align;
Chris Wilson1bd35da2016-08-20 18:36:42 +0100564 bufmgr_gem->exec2_objects[index].offset = bo->offset64;
Chris Wilsone0f05b22017-01-28 16:32:23 +0000565 bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags;
Jesse Barnesb5096402009-09-15 11:02:58 -0700566 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
567 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
Chris Wilson1bd35da2016-08-20 18:36:42 +0100568 bufmgr_gem->exec_bos[index] = bo;
Jesse Barnesb5096402009-09-15 11:02:58 -0700569 bufmgr_gem->exec_count++;
570}
571
Eric Anholt6a9eb082008-06-03 09:27:37 -0700572#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
573 sizeof(uint32_t))
574
Chris Wilsone22fb792009-11-30 22:14:30 +0000575static void
576drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700577 drm_intel_bo_gem *bo_gem,
578 unsigned int alignment)
Chris Wilsone22fb792009-11-30 22:14:30 +0000579{
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700580 unsigned int size;
Chris Wilsone22fb792009-11-30 22:14:30 +0000581
582 assert(!bo_gem->used_as_reloc_target);
583
584 /* The older chipsets are far-less flexible in terms of tiling,
585 * and require tiled buffer to be size aligned in the aperture.
586 * This means that in the worst possible case we will need a hole
587 * twice as large as the object in order for it to fit into the
588 * aperture. Optimal packing is for wimps.
589 */
590 size = bo_gem->bo.size;
Chris Wilson51b89502010-11-22 09:50:06 +0000591 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700592 unsigned int min_size;
Chris Wilson51b89502010-11-22 09:50:06 +0000593
594 if (bufmgr_gem->has_relaxed_fencing) {
595 if (bufmgr_gem->gen == 3)
596 min_size = 1024*1024;
597 else
598 min_size = 512*1024;
599
600 while (min_size < size)
601 min_size *= 2;
602 } else
603 min_size = size;
604
605 /* Account for worst-case alignment. */
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700606 alignment = MAX2(alignment, min_size);
Chris Wilson51b89502010-11-22 09:50:06 +0000607 }
Chris Wilsone22fb792009-11-30 22:14:30 +0000608
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700609 bo_gem->reloc_tree_size = size + alignment;
Chris Wilsone22fb792009-11-30 22:14:30 +0000610}
611
Eric Anholt6a9eb082008-06-03 09:27:37 -0700612static int
Eric Anholt4b982642008-10-30 09:33:07 -0700613drm_intel_setup_reloc_list(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700614{
Eric Anholtd70d6052009-10-06 12:40:42 -0700615 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
616 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
617 unsigned int max_relocs = bufmgr_gem->max_relocs;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700618
Eric Anholtd70d6052009-10-06 12:40:42 -0700619 if (bo->size / 4 < max_relocs)
620 max_relocs = bo->size / 4;
Eric Anholt3c9bd062009-10-05 16:35:32 -0700621
Eric Anholtd70d6052009-10-06 12:40:42 -0700622 bo_gem->relocs = malloc(max_relocs *
623 sizeof(struct drm_i915_gem_relocation_entry));
Jesse Barnesb5096402009-09-15 11:02:58 -0700624 bo_gem->reloc_target_info = malloc(max_relocs *
Chris Wilson35061732010-04-11 18:40:38 +0100625 sizeof(drm_intel_reloc_target));
Jesse Barnesb5096402009-09-15 11:02:58 -0700626 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700627 bo_gem->has_error = true;
Chris Wilson792fed12009-12-02 13:12:39 +0000628
629 free (bo_gem->relocs);
630 bo_gem->relocs = NULL;
631
Jesse Barnesb5096402009-09-15 11:02:58 -0700632 free (bo_gem->reloc_target_info);
633 bo_gem->reloc_target_info = NULL;
Chris Wilson792fed12009-12-02 13:12:39 +0000634
635 return 1;
636 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700637
Eric Anholtd70d6052009-10-06 12:40:42 -0700638 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700639}
640
Eric Anholt8214a652009-08-27 18:32:07 -0700641static int
642drm_intel_gem_bo_busy(drm_intel_bo *bo)
643{
Eric Anholtd70d6052009-10-06 12:40:42 -0700644 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
645 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
646 struct drm_i915_gem_busy busy;
647 int ret;
Eric Anholt8214a652009-08-27 18:32:07 -0700648
Eric Anholt02f93c22014-01-15 00:38:39 -0800649 if (bo_gem->reusable && bo_gem->idle)
650 return false;
651
Daniel Vettereb7a5b62015-02-11 11:59:52 +0100652 memclear(busy);
Eric Anholtd70d6052009-10-06 12:40:42 -0700653 busy.handle = bo_gem->gem_handle;
Eric Anholt8214a652009-08-27 18:32:07 -0700654
Chris Wilson62997222010-09-25 21:32:59 +0100655 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
Eric Anholt02f93c22014-01-15 00:38:39 -0800656 if (ret == 0) {
657 bo_gem->idle = !busy.busy;
658 return busy.busy;
659 } else {
660 return false;
661 }
Eric Anholt8214a652009-08-27 18:32:07 -0700662}
663
Chris Wilson0fb215a2009-10-02 04:31:34 +0100664static int
Chris Wilson83a35b62009-11-11 13:04:38 +0000665drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
666 drm_intel_bo_gem *bo_gem, int state)
Chris Wilson0fb215a2009-10-02 04:31:34 +0100667{
Eric Anholtd70d6052009-10-06 12:40:42 -0700668 struct drm_i915_gem_madvise madv;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100669
Daniel Vettereb7a5b62015-02-11 11:59:52 +0100670 memclear(madv);
Eric Anholtd70d6052009-10-06 12:40:42 -0700671 madv.handle = bo_gem->gem_handle;
672 madv.madv = state;
673 madv.retained = 1;
Chris Wilson62997222010-09-25 21:32:59 +0100674 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
Chris Wilson0fb215a2009-10-02 04:31:34 +0100675
Eric Anholtd70d6052009-10-06 12:40:42 -0700676 return madv.retained;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100677}
678
Chris Wilson83a35b62009-11-11 13:04:38 +0000679static int
680drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
681{
682 return drm_intel_gem_bo_madvise_internal
683 ((drm_intel_bufmgr_gem *) bo->bufmgr,
684 (drm_intel_bo_gem *) bo,
685 madv);
686}
687
Chris Wilson0fb215a2009-10-02 04:31:34 +0100688/* drop the oldest entries that have been purged by the kernel */
689static void
690drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
691 struct drm_intel_gem_bo_bucket *bucket)
692{
Eric Anholtd70d6052009-10-06 12:40:42 -0700693 while (!DRMLISTEMPTY(&bucket->head)) {
694 drm_intel_bo_gem *bo_gem;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100695
Eric Anholtd70d6052009-10-06 12:40:42 -0700696 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
697 bucket->head.next, head);
Chris Wilson83a35b62009-11-11 13:04:38 +0000698 if (drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700699 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
700 break;
Chris Wilson0fb215a2009-10-02 04:31:34 +0100701
Eric Anholtd70d6052009-10-06 12:40:42 -0700702 DRMLISTDEL(&bo_gem->head);
703 drm_intel_gem_bo_free(&bo_gem->bo);
704 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100705}
706
Eric Anholt4b982642008-10-30 09:33:07 -0700707static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700708drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
709 const char *name,
710 unsigned long size,
Chris Wilson1db22ff2010-06-21 14:27:23 +0100711 unsigned long flags,
712 uint32_t tiling_mode,
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700713 unsigned long stride,
714 unsigned int alignment)
Eric Anholt6a9eb082008-06-03 09:27:37 -0700715{
Eric Anholtd70d6052009-10-06 12:40:42 -0700716 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
717 drm_intel_bo_gem *bo_gem;
718 unsigned int page_size = getpagesize();
719 int ret;
720 struct drm_intel_gem_bo_bucket *bucket;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700721 bool alloc_from_cache;
Eric Anholtd70d6052009-10-06 12:40:42 -0700722 unsigned long bo_size;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700723 bool for_render = false;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700724
725 if (flags & BO_ALLOC_FOR_RENDER)
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700726 for_render = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700727
Eric Anholtd70d6052009-10-06 12:40:42 -0700728 /* Round the allocated size up to a power of two number of pages. */
729 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700730
Eric Anholtd70d6052009-10-06 12:40:42 -0700731 /* If we don't have caching at this size, don't actually round the
732 * allocation up.
733 */
734 if (bucket == NULL) {
735 bo_size = size;
736 if (bo_size < page_size)
737 bo_size = page_size;
Eric Anholt72abe982009-02-18 13:06:35 -0800738 } else {
Eric Anholtd70d6052009-10-06 12:40:42 -0700739 bo_size = bucket->size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700740 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100741
Eric Anholtd70d6052009-10-06 12:40:42 -0700742 pthread_mutex_lock(&bufmgr_gem->lock);
743 /* Get a buffer out of the cache if available */
744retry:
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700745 alloc_from_cache = false;
Eric Anholtd70d6052009-10-06 12:40:42 -0700746 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
747 if (for_render) {
748 /* Allocate new render-target BOs from the tail (MRU)
749 * of the list, as it will likely be hot in the GPU
750 * cache and in the aperture for us.
751 */
752 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
753 bucket->head.prev, head);
754 DRMLISTDEL(&bo_gem->head);
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700755 alloc_from_cache = true;
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700756 bo_gem->bo.align = alignment;
Eric Anholtd70d6052009-10-06 12:40:42 -0700757 } else {
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700758 assert(alignment == 0);
Eric Anholtd70d6052009-10-06 12:40:42 -0700759 /* For non-render-target BOs (where we're probably
760 * going to map it first thing in order to fill it
761 * with data), check if the last BO in the cache is
762 * unbusy, and only reuse in that case. Otherwise,
763 * allocating a new buffer is probably faster than
764 * waiting for the GPU to finish.
765 */
766 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
767 bucket->head.next, head);
768 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700769 alloc_from_cache = true;
Eric Anholtd70d6052009-10-06 12:40:42 -0700770 DRMLISTDEL(&bo_gem->head);
771 }
772 }
773
774 if (alloc_from_cache) {
Chris Wilson83a35b62009-11-11 13:04:38 +0000775 if (!drm_intel_gem_bo_madvise_internal
Eric Anholtd70d6052009-10-06 12:40:42 -0700776 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
777 drm_intel_gem_bo_free(&bo_gem->bo);
778 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
779 bucket);
780 goto retry;
781 }
Chris Wilson1db22ff2010-06-21 14:27:23 +0100782
783 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
784 tiling_mode,
785 stride)) {
786 drm_intel_gem_bo_free(&bo_gem->bo);
787 goto retry;
788 }
Eric Anholtd70d6052009-10-06 12:40:42 -0700789 }
Chris Wilson0fb215a2009-10-02 04:31:34 +0100790 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700791
Eric Anholtd70d6052009-10-06 12:40:42 -0700792 if (!alloc_from_cache) {
793 struct drm_i915_gem_create create;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700794
Eric Anholtd70d6052009-10-06 12:40:42 -0700795 bo_gem = calloc(1, sizeof(*bo_gem));
796 if (!bo_gem)
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100797 goto err;
798
799 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
800 list (vma_list), so better set the list head here */
801 DRMINITLISTHEAD(&bo_gem->vma_list);
Keith Packarda919ff52008-06-05 15:58:09 -0700802
Eric Anholtd70d6052009-10-06 12:40:42 -0700803 bo_gem->bo.size = bo_size;
Chris Wilson90b23cc2012-02-09 10:23:10 +0000804
Daniel Vettereb7a5b62015-02-11 11:59:52 +0100805 memclear(create);
Eric Anholtd70d6052009-10-06 12:40:42 -0700806 create.size = bo_size;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700807
Chris Wilson62997222010-09-25 21:32:59 +0100808 ret = drmIoctl(bufmgr_gem->fd,
809 DRM_IOCTL_I915_GEM_CREATE,
810 &create);
Eric Anholtd70d6052009-10-06 12:40:42 -0700811 if (ret != 0) {
812 free(bo_gem);
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100813 goto err;
Eric Anholtd70d6052009-10-06 12:40:42 -0700814 }
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100815
816 bo_gem->gem_handle = create.handle;
Chris Wilson19c4cfc2017-03-08 21:00:59 +0000817 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
818 gem_handle, sizeof(bo_gem->gem_handle),
819 bo_gem);
820
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100821 bo_gem->bo.handle = bo_gem->gem_handle;
Eric Anholtd70d6052009-10-06 12:40:42 -0700822 bo_gem->bo.bufmgr = bufmgr;
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700823 bo_gem->bo.align = alignment;
Chris Wilson1db22ff2010-06-21 14:27:23 +0100824
825 bo_gem->tiling_mode = I915_TILING_NONE;
826 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
827 bo_gem->stride = 0;
828
829 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
830 tiling_mode,
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100831 stride))
832 goto err_free;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700833 }
Eric Anholt6a9eb082008-06-03 09:27:37 -0700834
Eric Anholtd70d6052009-10-06 12:40:42 -0700835 bo_gem->name = name;
836 atomic_set(&bo_gem->refcount, 1);
837 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -0700838 bo_gem->reloc_tree_fences = 0;
Eric Anholt2c2bdb32011-10-21 16:53:16 -0700839 bo_gem->used_as_reloc_target = false;
840 bo_gem->has_error = false;
841 bo_gem->reusable = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700842
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700843 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100844 pthread_mutex_unlock(&bufmgr_gem->lock);
Chris Wilsone22fb792009-11-30 22:14:30 +0000845
Eric Anholtd70d6052009-10-06 12:40:42 -0700846 DBG("bo_create: buf %d (%s) %ldb\n",
847 bo_gem->gem_handle, bo_gem->name, size);
Eric Anholt6a9eb082008-06-03 09:27:37 -0700848
Eric Anholtd70d6052009-10-06 12:40:42 -0700849 return &bo_gem->bo;
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100850
851err_free:
852 drm_intel_gem_bo_free(&bo_gem->bo);
853err:
854 pthread_mutex_unlock(&bufmgr_gem->lock);
855 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -0700856}
857
Eric Anholt72abe982009-02-18 13:06:35 -0800858static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700859drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
860 const char *name,
861 unsigned long size,
862 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800863{
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700864 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
Chris Wilson1db22ff2010-06-21 14:27:23 +0100865 BO_ALLOC_FOR_RENDER,
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700866 I915_TILING_NONE, 0,
867 alignment);
Eric Anholt72abe982009-02-18 13:06:35 -0800868}
869
870static drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -0700871drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
872 const char *name,
873 unsigned long size,
874 unsigned int alignment)
Eric Anholt72abe982009-02-18 13:06:35 -0800875{
Chris Wilson1db22ff2010-06-21 14:27:23 +0100876 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700877 I915_TILING_NONE, 0, 0);
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700878}
879
880static drm_intel_bo *
881drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
882 int x, int y, int cpp, uint32_t *tiling_mode,
883 unsigned long *pitch, unsigned long flags)
884{
885 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
Chris Wilsone65caeb2010-06-09 10:08:41 +0100886 unsigned long size, stride;
887 uint32_t tiling;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700888
Chris Wilsone65caeb2010-06-09 10:08:41 +0100889 do {
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100890 unsigned long aligned_y, height_alignment;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700891
Chris Wilsone65caeb2010-06-09 10:08:41 +0100892 tiling = *tiling_mode;
893
894 /* If we're tiled, our allocations are in 8 or 32-row blocks,
895 * so failure to align our height means that we won't allocate
896 * enough pages.
897 *
898 * If we're untiled, we still have to align to 2 rows high
899 * because the data port accesses 2x2 blocks even if the
900 * bottom row isn't to be rendered, so failure to align means
901 * we could walk off the end of the GTT and fault. This is
902 * documented on 965, and may be the case on older chipsets
903 * too so we try to be careful.
904 */
905 aligned_y = y;
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100906 height_alignment = 2;
907
Eric Anholt078bc5b2011-12-20 13:10:36 -0800908 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
Daniel Vetter06ebbf72011-03-26 15:04:04 +0100909 height_alignment = 16;
Daniel Vetter194aa1b2011-09-22 22:20:53 +0200910 else if (tiling == I915_TILING_X
Eric Anholt078bc5b2011-12-20 13:10:36 -0800911 || (IS_915(bufmgr_gem->pci_device)
912 && tiling == I915_TILING_Y))
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100913 height_alignment = 8;
Chris Wilsone65caeb2010-06-09 10:08:41 +0100914 else if (tiling == I915_TILING_Y)
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100915 height_alignment = 32;
Daniel Vetter9a71ed92011-02-22 18:53:56 +0100916 aligned_y = ALIGN(y, height_alignment);
Chris Wilsone65caeb2010-06-09 10:08:41 +0100917
918 stride = x * cpp;
Chris Wilson726210f2010-06-24 11:38:00 +0100919 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
Chris Wilsone65caeb2010-06-09 10:08:41 +0100920 size = stride * aligned_y;
921 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
922 } while (*tiling_mode != tiling);
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100923 *pitch = stride;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -0700924
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100925 if (tiling == I915_TILING_NONE)
Chris Wilson5eec2862010-06-21 14:20:56 +0100926 stride = 0;
927
Chris Wilson6ea2bda2010-06-22 13:03:52 +0100928 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
Anuj Phogat5c68f9f2015-04-10 17:20:55 -0700929 tiling, stride, 0);
Eric Anholt72abe982009-02-18 13:06:35 -0800930}
931
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100932static drm_intel_bo *
933drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
934 const char *name,
935 void *addr,
936 uint32_t tiling_mode,
937 uint32_t stride,
938 unsigned long size,
939 unsigned long flags)
940{
941 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
942 drm_intel_bo_gem *bo_gem;
943 int ret;
944 struct drm_i915_gem_userptr userptr;
945
946 /* Tiling with userptr surfaces is not supported
947 * on all hardware so refuse it for time being.
948 */
949 if (tiling_mode != I915_TILING_NONE)
950 return NULL;
951
952 bo_gem = calloc(1, sizeof(*bo_gem));
953 if (!bo_gem)
954 return NULL;
955
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100956 atomic_set(&bo_gem->refcount, 1);
957 DRMINITLISTHEAD(&bo_gem->vma_list);
958
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100959 bo_gem->bo.size = size;
960
Daniel Vettereb7a5b62015-02-11 11:59:52 +0100961 memclear(userptr);
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100962 userptr.user_ptr = (__u64)((unsigned long)addr);
963 userptr.user_size = size;
964 userptr.flags = flags;
965
966 ret = drmIoctl(bufmgr_gem->fd,
967 DRM_IOCTL_I915_GEM_USERPTR,
968 &userptr);
969 if (ret != 0) {
970 DBG("bo_create_userptr: "
971 "ioctl failed with user ptr %p size 0x%lx, "
972 "user flags 0x%lx\n", addr, size, flags);
973 free(bo_gem);
974 return NULL;
975 }
976
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100977 pthread_mutex_lock(&bufmgr_gem->lock);
978
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100979 bo_gem->gem_handle = userptr.handle;
980 bo_gem->bo.handle = bo_gem->gem_handle;
981 bo_gem->bo.bufmgr = bufmgr;
982 bo_gem->is_userptr = true;
983 bo_gem->bo.virtual = addr;
984 /* Save the address provided by user */
985 bo_gem->user_virtual = addr;
986 bo_gem->tiling_mode = I915_TILING_NONE;
987 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
988 bo_gem->stride = 0;
989
Chris Wilson9e24d0c2016-09-22 14:44:50 +0100990 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
991 gem_handle, sizeof(bo_gem->gem_handle),
992 bo_gem);
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100993
994 bo_gem->name = name;
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +0100995 bo_gem->validate_index = -1;
996 bo_gem->reloc_tree_fences = 0;
997 bo_gem->used_as_reloc_target = false;
998 bo_gem->has_error = false;
999 bo_gem->reusable = false;
1000
Anuj Phogat5c68f9f2015-04-10 17:20:55 -07001001 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001002 pthread_mutex_unlock(&bufmgr_gem->lock);
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001003
1004 DBG("bo_create_userptr: "
1005 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1006 addr, bo_gem->gem_handle, bo_gem->name,
1007 size, stride, tiling_mode);
1008
1009 return &bo_gem->bo;
1010}
1011
Chris Wilson32258e42014-11-04 14:26:49 +00001012static bool
1013has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1014{
1015 int ret;
1016 void *ptr;
1017 long pgsz;
1018 struct drm_i915_gem_userptr userptr;
Chris Wilson32258e42014-11-04 14:26:49 +00001019
1020 pgsz = sysconf(_SC_PAGESIZE);
1021 assert(pgsz > 0);
1022
1023 ret = posix_memalign(&ptr, pgsz, pgsz);
1024 if (ret) {
1025 DBG("Failed to get a page (%ld) for userptr detection!\n",
1026 pgsz);
1027 return false;
1028 }
1029
1030 memclear(userptr);
1031 userptr.user_ptr = (__u64)(unsigned long)ptr;
1032 userptr.user_size = pgsz;
1033
1034retry:
1035 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1036 if (ret) {
1037 if (errno == ENODEV && userptr.flags == 0) {
1038 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1039 goto retry;
1040 }
1041 free(ptr);
1042 return false;
1043 }
1044
Tvrtko Ursulin30921482015-04-17 11:57:28 +01001045 /* We don't release the userptr bo here as we want to keep the
1046 * kernel mm tracking alive for our lifetime. The first time we
1047 * create a userptr object the kernel has to install a mmu_notifer
1048 * which is a heavyweight operation (e.g. it requires taking all
1049 * mm_locks and stop_machine()).
1050 */
1051
1052 bufmgr_gem->userptr_active.ptr = ptr;
1053 bufmgr_gem->userptr_active.handle = userptr.handle;
Chris Wilson32258e42014-11-04 14:26:49 +00001054
1055 return true;
1056}
1057
1058static drm_intel_bo *
1059check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1060 const char *name,
1061 void *addr,
1062 uint32_t tiling_mode,
1063 uint32_t stride,
1064 unsigned long size,
1065 unsigned long flags)
1066{
1067 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1068 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1069 else
1070 bufmgr->bo_alloc_userptr = NULL;
1071
1072 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1073 tiling_mode, stride, size, flags);
1074}
1075
Eric Anholt6a9eb082008-06-03 09:27:37 -07001076/**
Eric Anholt4b982642008-10-30 09:33:07 -07001077 * Returns a drm_intel_bo wrapping the given buffer object handle.
Eric Anholt6a9eb082008-06-03 09:27:37 -07001078 *
1079 * This can be used when one application needs to pass a buffer object
1080 * to another.
1081 */
Emil Velikov0f8da822015-03-31 22:32:11 +01001082drm_intel_bo *
Eric Anholtd70d6052009-10-06 12:40:42 -07001083drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1084 const char *name,
Eric Anholt4b982642008-10-30 09:33:07 -07001085 unsigned int handle)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001086{
Eric Anholtd70d6052009-10-06 12:40:42 -07001087 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1088 drm_intel_bo_gem *bo_gem;
1089 int ret;
1090 struct drm_gem_open open_arg;
1091 struct drm_i915_gem_get_tiling get_tiling;
Chris Wilson36d49392011-02-14 09:39:06 +00001092
1093 /* At the moment most applications only have a few named bo.
1094 * For instance, in a DRI client only the render buffers passed
1095 * between X and the client are named. And since X returns the
1096 * alternating names for the front/back buffer a linear search
1097 * provides a sufficiently fast match.
1098 */
Rafal Sapala0fa1dbf2014-08-05 14:51:38 -04001099 pthread_mutex_lock(&bufmgr_gem->lock);
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001100 HASH_FIND(name_hh, bufmgr_gem->name_table,
1101 &handle, sizeof(handle), bo_gem);
1102 if (bo_gem) {
1103 drm_intel_gem_bo_reference(&bo_gem->bo);
1104 goto out;
Chris Wilson36d49392011-02-14 09:39:06 +00001105 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001106
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001107 memclear(open_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001108 open_arg.name = handle;
Chris Wilson62997222010-09-25 21:32:59 +01001109 ret = drmIoctl(bufmgr_gem->fd,
1110 DRM_IOCTL_GEM_OPEN,
1111 &open_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001112 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001113 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1114 name, handle, strerror(errno));
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001115 bo_gem = NULL;
1116 goto out;
Eric Anholtd70d6052009-10-06 12:40:42 -07001117 }
Keith Packardc3d96892013-11-22 05:31:01 -08001118 /* Now see if someone has used a prime handle to get this
1119 * object from the kernel before by looking through the list
1120 * again for a matching gem_handle
1121 */
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001122 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
1123 &open_arg.handle, sizeof(open_arg.handle), bo_gem);
1124 if (bo_gem) {
1125 drm_intel_gem_bo_reference(&bo_gem->bo);
1126 goto out;
Keith Packardc3d96892013-11-22 05:31:01 -08001127 }
1128
1129 bo_gem = calloc(1, sizeof(*bo_gem));
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001130 if (!bo_gem)
1131 goto out;
1132
1133 atomic_set(&bo_gem->refcount, 1);
1134 DRMINITLISTHEAD(&bo_gem->vma_list);
Keith Packardc3d96892013-11-22 05:31:01 -08001135
Eric Anholtd70d6052009-10-06 12:40:42 -07001136 bo_gem->bo.size = open_arg.size;
1137 bo_gem->bo.offset = 0;
Kenneth Graunkeedf17db2014-01-13 14:14:36 -08001138 bo_gem->bo.offset64 = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001139 bo_gem->bo.virtual = NULL;
1140 bo_gem->bo.bufmgr = bufmgr;
1141 bo_gem->name = name;
Eric Anholtd70d6052009-10-06 12:40:42 -07001142 bo_gem->validate_index = -1;
1143 bo_gem->gem_handle = open_arg.handle;
Chris Wilson53581b62011-02-14 09:27:05 +00001144 bo_gem->bo.handle = open_arg.handle;
Eric Anholtd70d6052009-10-06 12:40:42 -07001145 bo_gem->global_name = handle;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001146 bo_gem->reusable = false;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001147
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001148 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1149 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
1150 HASH_ADD(name_hh, bufmgr_gem->name_table,
1151 global_name, sizeof(bo_gem->global_name), bo_gem);
1152
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001153 memclear(get_tiling);
Eric Anholtd70d6052009-10-06 12:40:42 -07001154 get_tiling.handle = bo_gem->gem_handle;
Chris Wilson62997222010-09-25 21:32:59 +01001155 ret = drmIoctl(bufmgr_gem->fd,
1156 DRM_IOCTL_I915_GEM_GET_TILING,
1157 &get_tiling);
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001158 if (ret != 0)
1159 goto err_unref;
1160
Eric Anholtd70d6052009-10-06 12:40:42 -07001161 bo_gem->tiling_mode = get_tiling.tiling_mode;
1162 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
Chris Wilson056aa9b2010-06-21 14:31:29 +01001163 /* XXX stride is unknown */
Anuj Phogat5c68f9f2015-04-10 17:20:55 -07001164 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
Eric Anholtd70d6052009-10-06 12:40:42 -07001165 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001166
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001167out:
1168 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtd70d6052009-10-06 12:40:42 -07001169 return &bo_gem->bo;
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001170
1171err_unref:
1172 drm_intel_gem_bo_free(&bo_gem->bo);
1173 pthread_mutex_unlock(&bufmgr_gem->lock);
1174 return NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001175}
1176
1177static void
Eric Anholt4b982642008-10-30 09:33:07 -07001178drm_intel_gem_bo_free(drm_intel_bo *bo)
Eric Anholt500c81d2008-06-06 17:13:16 -07001179{
Eric Anholtd70d6052009-10-06 12:40:42 -07001180 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1181 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1182 struct drm_gem_close close;
1183 int ret;
Eric Anholt500c81d2008-06-06 17:13:16 -07001184
Chris Wilsone4b60f22011-12-05 21:29:05 +00001185 DRMLISTDEL(&bo_gem->vma_list);
1186 if (bo_gem->mem_virtual) {
Chris Wilson90b23cc2012-02-09 10:23:10 +00001187 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
Emil Velikov537b1ca2014-09-07 19:47:06 +01001188 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001189 bufmgr_gem->vma_count--;
1190 }
Chris Wilson455e9b42015-05-01 13:39:55 +01001191 if (bo_gem->wc_virtual) {
1192 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1193 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1194 bufmgr_gem->vma_count--;
1195 }
Chris Wilsone4b60f22011-12-05 21:29:05 +00001196 if (bo_gem->gtt_virtual) {
Emil Velikov537b1ca2014-09-07 19:47:06 +01001197 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001198 bufmgr_gem->vma_count--;
1199 }
1200
Chris Wilson9e24d0c2016-09-22 14:44:50 +01001201 if (bo_gem->global_name)
1202 HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1203 HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1204
Eric Anholtd70d6052009-10-06 12:40:42 -07001205 /* Close this object */
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001206 memclear(close);
Eric Anholtd70d6052009-10-06 12:40:42 -07001207 close.handle = bo_gem->gem_handle;
Chris Wilson62997222010-09-25 21:32:59 +01001208 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
Eric Anholtd70d6052009-10-06 12:40:42 -07001209 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001210 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1211 bo_gem->gem_handle, bo_gem->name, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001212 }
1213 free(bo);
Eric Anholt500c81d2008-06-06 17:13:16 -07001214}
1215
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001216static void
1217drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1218{
1219#if HAVE_VALGRIND
1220 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1221
1222 if (bo_gem->mem_virtual)
1223 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1224
Chris Wilson455e9b42015-05-01 13:39:55 +01001225 if (bo_gem->wc_virtual)
1226 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1227
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001228 if (bo_gem->gtt_virtual)
1229 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1230#endif
1231}
1232
Eric Anholt3f3c5be2009-07-09 17:49:46 -07001233/** Frees all cached buffers significantly older than @time. */
1234static void
1235drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1236{
Chris Wilson04495ee2009-10-02 04:39:22 +01001237 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001238
Chris Wilsonf16b4162010-06-21 15:21:48 +01001239 if (bufmgr_gem->time == time)
1240 return;
1241
Eric Anholt0ec768e2010-06-04 17:09:11 -07001242 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001243 struct drm_intel_gem_bo_bucket *bucket =
1244 &bufmgr_gem->cache_bucket[i];
Chris Wilson04495ee2009-10-02 04:39:22 +01001245
Eric Anholtd70d6052009-10-06 12:40:42 -07001246 while (!DRMLISTEMPTY(&bucket->head)) {
1247 drm_intel_bo_gem *bo_gem;
Chris Wilson04495ee2009-10-02 04:39:22 +01001248
Eric Anholtd70d6052009-10-06 12:40:42 -07001249 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1250 bucket->head.next, head);
1251 if (time - bo_gem->free_time <= 1)
1252 break;
Chris Wilson04495ee2009-10-02 04:39:22 +01001253
Eric Anholtd70d6052009-10-06 12:40:42 -07001254 DRMLISTDEL(&bo_gem->head);
Chris Wilson04495ee2009-10-02 04:39:22 +01001255
Eric Anholtd70d6052009-10-06 12:40:42 -07001256 drm_intel_gem_bo_free(&bo_gem->bo);
1257 }
1258 }
Chris Wilsonf16b4162010-06-21 15:21:48 +01001259
1260 bufmgr_gem->time = time;
Chris Wilson04495ee2009-10-02 04:39:22 +01001261}
1262
Chris Wilsone4b60f22011-12-05 21:29:05 +00001263static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1264{
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001265 int limit;
1266
1267 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1268 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001269
1270 if (bufmgr_gem->vma_max < 0)
1271 return;
1272
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001273 /* We may need to evict a few entries in order to create new mmaps */
1274 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1275 if (limit < 0)
1276 limit = 0;
1277
1278 while (bufmgr_gem->vma_count > limit) {
Chris Wilsone4b60f22011-12-05 21:29:05 +00001279 drm_intel_bo_gem *bo_gem;
1280
1281 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1282 bufmgr_gem->vma_cache.next,
1283 vma_list);
1284 assert(bo_gem->map_count == 0);
Chris Wilson0ab22512011-12-14 08:20:10 +00001285 DRMLISTDELINIT(&bo_gem->vma_list);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001286
1287 if (bo_gem->mem_virtual) {
Emil Velikov537b1ca2014-09-07 19:47:06 +01001288 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001289 bo_gem->mem_virtual = NULL;
1290 bufmgr_gem->vma_count--;
1291 }
Chris Wilson455e9b42015-05-01 13:39:55 +01001292 if (bo_gem->wc_virtual) {
1293 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1294 bo_gem->wc_virtual = NULL;
1295 bufmgr_gem->vma_count--;
1296 }
Chris Wilsone4b60f22011-12-05 21:29:05 +00001297 if (bo_gem->gtt_virtual) {
Emil Velikov537b1ca2014-09-07 19:47:06 +01001298 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001299 bo_gem->gtt_virtual = NULL;
1300 bufmgr_gem->vma_count--;
1301 }
1302 }
1303}
1304
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001305static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1306 drm_intel_bo_gem *bo_gem)
Chris Wilsone4b60f22011-12-05 21:29:05 +00001307{
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001308 bufmgr_gem->vma_open--;
Chris Wilsone4b60f22011-12-05 21:29:05 +00001309 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1310 if (bo_gem->mem_virtual)
1311 bufmgr_gem->vma_count++;
Chris Wilson455e9b42015-05-01 13:39:55 +01001312 if (bo_gem->wc_virtual)
1313 bufmgr_gem->vma_count++;
Chris Wilsone4b60f22011-12-05 21:29:05 +00001314 if (bo_gem->gtt_virtual)
1315 bufmgr_gem->vma_count++;
1316 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1317}
1318
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001319static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1320 drm_intel_bo_gem *bo_gem)
Chris Wilsone4b60f22011-12-05 21:29:05 +00001321{
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001322 bufmgr_gem->vma_open++;
Chris Wilsone4b60f22011-12-05 21:29:05 +00001323 DRMLISTDEL(&bo_gem->vma_list);
1324 if (bo_gem->mem_virtual)
1325 bufmgr_gem->vma_count--;
Chris Wilson455e9b42015-05-01 13:39:55 +01001326 if (bo_gem->wc_virtual)
1327 bufmgr_gem->vma_count--;
Chris Wilsone4b60f22011-12-05 21:29:05 +00001328 if (bo_gem->gtt_virtual)
1329 bufmgr_gem->vma_count--;
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001330 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001331}
1332
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001333static void
1334drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
Chris Wilson04495ee2009-10-02 04:39:22 +01001335{
Eric Anholtd70d6052009-10-06 12:40:42 -07001336 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1337 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1338 struct drm_intel_gem_bo_bucket *bucket;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001339 int i;
Chris Wilson04495ee2009-10-02 04:39:22 +01001340
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001341 /* Unreference all the target buffers */
1342 for (i = 0; i < bo_gem->reloc_count; i++) {
Eric Anholt4f7704a2010-06-10 08:58:08 -07001343 if (bo_gem->reloc_target_info[i].bo != bo) {
1344 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1345 reloc_target_info[i].bo,
1346 time);
1347 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001348 }
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02001349 for (i = 0; i < bo_gem->softpin_target_count; i++)
1350 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1351 time);
Chris Wilson1bd35da2016-08-20 18:36:42 +01001352 bo_gem->kflags = 0;
Chris Wilsonb666f412009-11-30 23:07:19 +00001353 bo_gem->reloc_count = 0;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001354 bo_gem->used_as_reloc_target = false;
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02001355 bo_gem->softpin_target_count = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001356
1357 DBG("bo_unreference final: %d (%s)\n",
1358 bo_gem->gem_handle, bo_gem->name);
1359
Chris Wilson57473c72009-12-02 13:36:22 +00001360 /* release memory associated with this object */
Jesse Barnesb5096402009-09-15 11:02:58 -07001361 if (bo_gem->reloc_target_info) {
1362 free(bo_gem->reloc_target_info);
1363 bo_gem->reloc_target_info = NULL;
Chris Wilson57473c72009-12-02 13:36:22 +00001364 }
1365 if (bo_gem->relocs) {
1366 free(bo_gem->relocs);
1367 bo_gem->relocs = NULL;
1368 }
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02001369 if (bo_gem->softpin_target) {
1370 free(bo_gem->softpin_target);
1371 bo_gem->softpin_target = NULL;
1372 bo_gem->softpin_target_size = 0;
1373 }
Chris Wilson57473c72009-12-02 13:36:22 +00001374
Chris Wilson5c5332b2011-12-05 10:39:49 +00001375 /* Clear any left-over mappings */
1376 if (bo_gem->map_count) {
1377 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1378 bo_gem->map_count = 0;
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001379 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001380 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
Chris Wilson5c5332b2011-12-05 10:39:49 +00001381 }
Chris Wilson5c5332b2011-12-05 10:39:49 +00001382
Eric Anholtd70d6052009-10-06 12:40:42 -07001383 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1384 /* Put the buffer into our internal cache for reuse if we can. */
Eric Anholtd70d6052009-10-06 12:40:42 -07001385 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
Chris Wilson60aa8032009-11-30 20:02:05 +00001386 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1387 I915_MADV_DONTNEED)) {
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001388 bo_gem->free_time = time;
Eric Anholtd70d6052009-10-06 12:40:42 -07001389
1390 bo_gem->name = NULL;
1391 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07001392
1393 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
Eric Anholtd70d6052009-10-06 12:40:42 -07001394 } else {
1395 drm_intel_gem_bo_free(bo);
1396 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001397}
1398
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001399static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1400 time_t time)
1401{
1402 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1403
1404 assert(atomic_read(&bo_gem->refcount) > 0);
Eric Anholtd70d6052009-10-06 12:40:42 -07001405 if (atomic_dec_and_test(&bo_gem->refcount))
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001406 drm_intel_gem_bo_unreference_final(bo, time);
Eric Anholtd70d6052009-10-06 12:40:42 -07001407}
1408
1409static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1410{
1411 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1412
1413 assert(atomic_read(&bo_gem->refcount) > 0);
Lionel Landwerlin88025ad2014-09-12 13:48:37 +01001414
1415 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001416 drm_intel_bufmgr_gem *bufmgr_gem =
1417 (drm_intel_bufmgr_gem *) bo->bufmgr;
Eric Anholt0d7ad7e2009-10-20 14:19:38 -07001418 struct timespec time;
1419
1420 clock_gettime(CLOCK_MONOTONIC, &time);
1421
Eric Anholtd70d6052009-10-06 12:40:42 -07001422 pthread_mutex_lock(&bufmgr_gem->lock);
Lionel Landwerlin88025ad2014-09-12 13:48:37 +01001423
1424 if (atomic_dec_and_test(&bo_gem->refcount)) {
1425 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1426 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1427 }
1428
Eric Anholtd70d6052009-10-06 12:40:42 -07001429 pthread_mutex_unlock(&bufmgr_gem->lock);
1430 }
1431}
1432
1433static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1434{
1435 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1436 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1437 struct drm_i915_gem_set_domain set_domain;
1438 int ret;
1439
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001440 if (bo_gem->is_userptr) {
1441 /* Return the same user ptr */
1442 bo->virtual = bo_gem->user_virtual;
1443 return 0;
1444 }
1445
Chris Wilsona3305b02010-05-13 08:24:28 +01001446 pthread_mutex_lock(&bufmgr_gem->lock);
1447
Chris Wilsone4b60f22011-12-05 21:29:05 +00001448 if (bo_gem->map_count++ == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001449 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001450
Eric Anholtd70d6052009-10-06 12:40:42 -07001451 if (!bo_gem->mem_virtual) {
1452 struct drm_i915_gem_mmap mmap_arg;
Carl Worthafd245d2009-04-29 14:43:55 -07001453
Chris Wilson015286f2011-12-11 17:35:06 +00001454 DBG("bo_map: %d (%s), map_count=%d\n",
1455 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
Eric Anholtd70d6052009-10-06 12:40:42 -07001456
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001457 memclear(mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001458 mmap_arg.handle = bo_gem->gem_handle;
Eric Anholtd70d6052009-10-06 12:40:42 -07001459 mmap_arg.size = bo->size;
Chris Wilson62997222010-09-25 21:32:59 +01001460 ret = drmIoctl(bufmgr_gem->fd,
1461 DRM_IOCTL_I915_GEM_MMAP,
1462 &mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001463 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001464 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001465 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1466 __FILE__, __LINE__, bo_gem->gem_handle,
1467 bo_gem->name, strerror(errno));
Chris Wilsone4b60f22011-12-05 21:29:05 +00001468 if (--bo_gem->map_count == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001469 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsona3305b02010-05-13 08:24:28 +01001470 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtd70d6052009-10-06 12:40:42 -07001471 return ret;
1472 }
Chris Wilson90b23cc2012-02-09 10:23:10 +00001473 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
Eric Anholtd70d6052009-10-06 12:40:42 -07001474 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1475 }
1476 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1477 bo_gem->mem_virtual);
1478 bo->virtual = bo_gem->mem_virtual;
1479
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001480 memclear(set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001481 set_domain.handle = bo_gem->gem_handle;
1482 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1483 if (write_enable)
1484 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1485 else
1486 set_domain.write_domain = 0;
Chris Wilson62997222010-09-25 21:32:59 +01001487 ret = drmIoctl(bufmgr_gem->fd,
1488 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1489 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001490 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001491 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1492 __FILE__, __LINE__, bo_gem->gem_handle,
1493 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001494 }
1495
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001496 if (write_enable)
1497 bo_gem->mapped_cpu_write = true;
1498
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001499 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1500 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
Chris Wilsona3305b02010-05-13 08:24:28 +01001501 pthread_mutex_unlock(&bufmgr_gem->lock);
1502
Eric Anholtd70d6052009-10-06 12:40:42 -07001503 return 0;
1504}
1505
Eric Anholt99c73372012-02-10 04:12:15 -08001506static int
1507map_gtt(drm_intel_bo *bo)
Eric Anholtd70d6052009-10-06 12:40:42 -07001508{
1509 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1510 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholtd70d6052009-10-06 12:40:42 -07001511 int ret;
1512
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001513 if (bo_gem->is_userptr)
1514 return -EINVAL;
1515
Chris Wilsone4b60f22011-12-05 21:29:05 +00001516 if (bo_gem->map_count++ == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001517 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
Chris Wilsone4b60f22011-12-05 21:29:05 +00001518
Eric Anholtd70d6052009-10-06 12:40:42 -07001519 /* Get a mapping of the buffer if we haven't before. */
1520 if (bo_gem->gtt_virtual == NULL) {
1521 struct drm_i915_gem_mmap_gtt mmap_arg;
1522
Chris Wilson015286f2011-12-11 17:35:06 +00001523 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1524 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
Eric Anholtd70d6052009-10-06 12:40:42 -07001525
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001526 memclear(mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001527 mmap_arg.handle = bo_gem->gem_handle;
1528
1529 /* Get the fake offset back... */
Chris Wilson62997222010-09-25 21:32:59 +01001530 ret = drmIoctl(bufmgr_gem->fd,
1531 DRM_IOCTL_I915_GEM_MMAP_GTT,
1532 &mmap_arg);
Eric Anholtd70d6052009-10-06 12:40:42 -07001533 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001534 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001535 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1536 __FILE__, __LINE__,
1537 bo_gem->gem_handle, bo_gem->name,
1538 strerror(errno));
Chris Wilsonc5f0ed12011-12-13 10:30:54 +00001539 if (--bo_gem->map_count == 0)
1540 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Eric Anholtd70d6052009-10-06 12:40:42 -07001541 return ret;
1542 }
1543
1544 /* and mmap it */
Emil Velikov537b1ca2014-09-07 19:47:06 +01001545 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1546 MAP_SHARED, bufmgr_gem->fd,
1547 mmap_arg.offset);
Eric Anholtd70d6052009-10-06 12:40:42 -07001548 if (bo_gem->gtt_virtual == MAP_FAILED) {
Chris Wilson08371bc2009-12-08 22:35:24 +00001549 bo_gem->gtt_virtual = NULL;
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001550 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001551 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1552 __FILE__, __LINE__,
1553 bo_gem->gem_handle, bo_gem->name,
1554 strerror(errno));
Chris Wilsone4b60f22011-12-05 21:29:05 +00001555 if (--bo_gem->map_count == 0)
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001556 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001557 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -07001558 }
1559 }
1560
1561 bo->virtual = bo_gem->gtt_virtual;
1562
1563 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1564 bo_gem->gtt_virtual);
1565
Eric Anholt99c73372012-02-10 04:12:15 -08001566 return 0;
1567}
1568
Emil Velikov0f8da822015-03-31 22:32:11 +01001569int
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001570drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
Eric Anholt99c73372012-02-10 04:12:15 -08001571{
1572 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1573 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1574 struct drm_i915_gem_set_domain set_domain;
1575 int ret;
1576
1577 pthread_mutex_lock(&bufmgr_gem->lock);
1578
1579 ret = map_gtt(bo);
1580 if (ret) {
1581 pthread_mutex_unlock(&bufmgr_gem->lock);
1582 return ret;
1583 }
1584
1585 /* Now move it to the GTT domain so that the GPU and CPU
1586 * caches are flushed and the GPU isn't actively using the
1587 * buffer.
1588 *
1589 * The pagefault handler does this domain change for us when
1590 * it has unbound the BO from the GTT, but it's up to us to
1591 * tell it when we're about to use things if we had done
1592 * rendering and it still happens to be bound to the GTT.
1593 */
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001594 memclear(set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001595 set_domain.handle = bo_gem->gem_handle;
1596 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1597 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilson62997222010-09-25 21:32:59 +01001598 ret = drmIoctl(bufmgr_gem->fd,
1599 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1600 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001601 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001602 DBG("%s:%d: Error setting domain %d: %s\n",
1603 __FILE__, __LINE__, bo_gem->gem_handle,
1604 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001605 }
1606
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001607 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1608 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
Chris Wilsona3305b02010-05-13 08:24:28 +01001609 pthread_mutex_unlock(&bufmgr_gem->lock);
1610
Chris Wilsonc3ddfea2010-06-29 20:12:44 +01001611 return 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001612}
1613
Eric Anholt99c73372012-02-10 04:12:15 -08001614/**
1615 * Performs a mapping of the buffer object like the normal GTT
1616 * mapping, but avoids waiting for the GPU to be done reading from or
1617 * rendering to the buffer.
1618 *
1619 * This is used in the implementation of GL_ARB_map_buffer_range: The
1620 * user asks to create a buffer, then does a mapping, fills some
1621 * space, runs a drawing command, then asks to map it again without
1622 * synchronizing because it guarantees that it won't write over the
1623 * data that the GPU is busy using (or, more specifically, that if it
1624 * does write over the data, it acknowledges that rendering is
1625 * undefined).
1626 */
1627
Emil Velikov0f8da822015-03-31 22:32:11 +01001628int
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001629drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
Eric Anholt99c73372012-02-10 04:12:15 -08001630{
1631 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
Ben Widawsky743372e2013-12-26 16:30:09 -08001632#ifdef HAVE_VALGRIND
Chia-I Wufea54082013-07-10 10:49:59 +08001633 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Ben Widawsky743372e2013-12-26 16:30:09 -08001634#endif
Eric Anholt99c73372012-02-10 04:12:15 -08001635 int ret;
1636
1637 /* If the CPU cache isn't coherent with the GTT, then use a
1638 * regular synchronized mapping. The problem is that we don't
1639 * track where the buffer was last used on the CPU side in
1640 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1641 * we would potentially corrupt the buffer even when the user
1642 * does reasonable things.
1643 */
1644 if (!bufmgr_gem->has_llc)
1645 return drm_intel_gem_bo_map_gtt(bo);
1646
1647 pthread_mutex_lock(&bufmgr_gem->lock);
Chia-I Wufea54082013-07-10 10:49:59 +08001648
Eric Anholt99c73372012-02-10 04:12:15 -08001649 ret = map_gtt(bo);
Chia-I Wufea54082013-07-10 10:49:59 +08001650 if (ret == 0) {
1651 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1652 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1653 }
1654
Eric Anholt99c73372012-02-10 04:12:15 -08001655 pthread_mutex_unlock(&bufmgr_gem->lock);
1656
1657 return ret;
1658}
1659
Eric Anholtd70d6052009-10-06 12:40:42 -07001660static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1661{
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001662 drm_intel_bufmgr_gem *bufmgr_gem;
Eric Anholtd70d6052009-10-06 12:40:42 -07001663 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001664 int ret = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07001665
1666 if (bo == NULL)
1667 return 0;
1668
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001669 if (bo_gem->is_userptr)
1670 return 0;
1671
1672 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1673
Chris Wilsona3305b02010-05-13 08:24:28 +01001674 pthread_mutex_lock(&bufmgr_gem->lock);
1675
Chris Wilson015286f2011-12-11 17:35:06 +00001676 if (bo_gem->map_count <= 0) {
1677 DBG("attempted to unmap an unmapped bo\n");
1678 pthread_mutex_unlock(&bufmgr_gem->lock);
1679 /* Preserve the old behaviour of just treating this as a
1680 * no-op rather than reporting the error.
1681 */
1682 return 0;
1683 }
Chris Wilsone4b60f22011-12-05 21:29:05 +00001684
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001685 if (bo_gem->mapped_cpu_write) {
Chris Wilson90b23cc2012-02-09 10:23:10 +00001686 struct drm_i915_gem_sw_finish sw_finish;
1687
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001688 /* Cause a flush to happen if the buffer's pinned for
1689 * scanout, so the results show up in a timely manner.
1690 * Unlike GTT set domains, this only does work if the
1691 * buffer should be scanout-related.
1692 */
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001693 memclear(sw_finish);
Eric Anholt4cb01ee2011-10-28 13:12:16 -07001694 sw_finish.handle = bo_gem->gem_handle;
1695 ret = drmIoctl(bufmgr_gem->fd,
1696 DRM_IOCTL_I915_GEM_SW_FINISH,
1697 &sw_finish);
1698 ret = ret == -1 ? -errno : 0;
1699
1700 bo_gem->mapped_cpu_write = false;
1701 }
Eric Anholtd70d6052009-10-06 12:40:42 -07001702
Chris Wilsonc549a772011-12-05 10:14:34 +00001703 /* We need to unmap after every innovation as we cannot track
Grazvydas Ignotas1924b672016-11-20 20:25:46 +02001704 * an open vma for every bo as that will exhaust the system
Chris Wilsonc549a772011-12-05 10:14:34 +00001705 * limits and cause later failures.
1706 */
1707 if (--bo_gem->map_count == 0) {
Chris Wilsondd9a5b42011-12-06 13:12:37 +00001708 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
Chris Wilson23eeb7e2012-02-09 10:29:22 +00001709 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
Chris Wilsonc549a772011-12-05 10:14:34 +00001710 bo->virtual = NULL;
1711 }
Chris Wilsona3305b02010-05-13 08:24:28 +01001712 pthread_mutex_unlock(&bufmgr_gem->lock);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001713
1714 return ret;
Carl Worthafd245d2009-04-29 14:43:55 -07001715}
1716
Emil Velikov0f8da822015-03-31 22:32:11 +01001717int
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001718drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
Eric Anholtd0ae6832011-10-28 13:13:08 -07001719{
1720 return drm_intel_gem_bo_unmap(bo);
1721}
1722
Eric Anholt6a9eb082008-06-03 09:27:37 -07001723static int
Eric Anholtd70d6052009-10-06 12:40:42 -07001724drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1725 unsigned long size, const void *data)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001726{
Eric Anholtd70d6052009-10-06 12:40:42 -07001727 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1728 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1729 struct drm_i915_gem_pwrite pwrite;
1730 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001731
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001732 if (bo_gem->is_userptr)
1733 return -EINVAL;
1734
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001735 memclear(pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001736 pwrite.handle = bo_gem->gem_handle;
1737 pwrite.offset = offset;
1738 pwrite.size = size;
1739 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
Chris Wilson62997222010-09-25 21:32:59 +01001740 ret = drmIoctl(bufmgr_gem->fd,
1741 DRM_IOCTL_I915_GEM_PWRITE,
1742 &pwrite);
Eric Anholtd70d6052009-10-06 12:40:42 -07001743 if (ret != 0) {
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001744 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001745 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1746 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1747 (int)size, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001748 }
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001749
1750 return ret;
Eric Anholtd70d6052009-10-06 12:40:42 -07001751}
1752
1753static int
1754drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1755{
1756 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1757 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1758 int ret;
1759
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001760 memclear(get_pipe_from_crtc_id);
Eric Anholtd70d6052009-10-06 12:40:42 -07001761 get_pipe_from_crtc_id.crtc_id = crtc_id;
Chris Wilson62997222010-09-25 21:32:59 +01001762 ret = drmIoctl(bufmgr_gem->fd,
1763 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1764 &get_pipe_from_crtc_id);
Eric Anholtd70d6052009-10-06 12:40:42 -07001765 if (ret != 0) {
1766 /* We return -1 here to signal that we don't
1767 * know which pipe is associated with this crtc.
1768 * This lets the caller know that this information
1769 * isn't available; using the wrong pipe for
1770 * vblank waiting can cause the chipset to lock up
1771 */
1772 return -1;
1773 }
1774
1775 return get_pipe_from_crtc_id.pipe;
1776}
1777
1778static int
1779drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1780 unsigned long size, void *data)
1781{
1782 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1783 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1784 struct drm_i915_gem_pread pread;
1785 int ret;
1786
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01001787 if (bo_gem->is_userptr)
1788 return -EINVAL;
1789
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001790 memclear(pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001791 pread.handle = bo_gem->gem_handle;
1792 pread.offset = offset;
1793 pread.size = size;
1794 pread.data_ptr = (uint64_t) (uintptr_t) data;
Chris Wilson62997222010-09-25 21:32:59 +01001795 ret = drmIoctl(bufmgr_gem->fd,
1796 DRM_IOCTL_I915_GEM_PREAD,
1797 &pread);
Eric Anholtd70d6052009-10-06 12:40:42 -07001798 if (ret != 0) {
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001799 ret = -errno;
Chris Wilson96214862010-10-01 16:50:09 +01001800 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1801 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1802 (int)size, strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001803 }
Chris Wilson3e21e3b2010-03-04 21:17:48 +00001804
Chris Wilsonacb4aa62009-12-02 12:40:26 +00001805 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001806}
1807
Eric Anholt877b2ce2010-11-09 13:51:45 -08001808/** Waits for all GPU rendering with the object to have completed. */
Eric Anholt6a9eb082008-06-03 09:27:37 -07001809static void
Eric Anholt4b982642008-10-30 09:33:07 -07001810drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001811{
Eric Anholt877b2ce2010-11-09 13:51:45 -08001812 drm_intel_gem_bo_start_gtt_access(bo, 1);
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001813}
1814
1815/**
Ben Widawsky971c0802012-06-05 11:30:48 -07001816 * Waits on a BO for the given amount of time.
1817 *
1818 * @bo: buffer object to wait for
1819 * @timeout_ns: amount of time to wait in nanoseconds.
Daniel Vetterfcff9e22015-03-06 18:56:57 +01001820 * If value is less than 0, an infinite wait will occur.
Ben Widawsky971c0802012-06-05 11:30:48 -07001821 *
Daniel Vetterfcff9e22015-03-06 18:56:57 +01001822 * Returns 0 if the wait was successful ie. the last batch referencing the
1823 * object has completed within the allotted time. Otherwise some negative return
1824 * value describes the error. Of particular interest is -ETIME when the wait has
1825 * failed to yield the desired result.
Ben Widawsky971c0802012-06-05 11:30:48 -07001826 *
1827 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1828 * the operation to give up after a certain amount of time. Another subtle
1829 * difference is the internal locking semantics are different (this variant does
1830 * not hold the lock for the duration of the wait). This makes the wait subject
1831 * to a larger userspace race window.
1832 *
1833 * The implementation shall wait until the object is no longer actively
1834 * referenced within a batch buffer at the time of the call. The wait will
1835 * not guarantee that the buffer is re-issued via another thread, or an flinked
1836 * handle. Userspace must make sure this race does not occur if such precision
1837 * is important.
Daniel Vetterfcff9e22015-03-06 18:56:57 +01001838 *
1839 * Note that some kernels have broken the inifite wait for negative values
1840 * promise, upgrade to latest stable kernels if this is the case.
Ben Widawsky971c0802012-06-05 11:30:48 -07001841 */
Emil Velikov0f8da822015-03-31 22:32:11 +01001842int
Maarten Lankhorst07fead42014-07-31 15:07:27 +02001843drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
Ben Widawsky971c0802012-06-05 11:30:48 -07001844{
1845 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1846 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1847 struct drm_i915_gem_wait wait;
1848 int ret;
1849
1850 if (!bufmgr_gem->has_wait_timeout) {
1851 DBG("%s:%d: Timed wait is not supported. Falling back to "
1852 "infinite wait\n", __FILE__, __LINE__);
1853 if (timeout_ns) {
1854 drm_intel_gem_bo_wait_rendering(bo);
1855 return 0;
1856 } else {
1857 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1858 }
1859 }
1860
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001861 memclear(wait);
Ben Widawsky971c0802012-06-05 11:30:48 -07001862 wait.bo_handle = bo_gem->gem_handle;
1863 wait.timeout_ns = timeout_ns;
Ben Widawsky971c0802012-06-05 11:30:48 -07001864 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1865 if (ret == -1)
1866 return -errno;
1867
1868 return ret;
1869}
1870
1871/**
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001872 * Sets the object to the GTT read and possibly write domain, used by the X
1873 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1874 *
1875 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1876 * can do tiled pixmaps this way.
1877 */
Emil Velikov0f8da822015-03-31 22:32:11 +01001878void
Eric Anholt6fb1ad72008-11-13 11:44:22 -08001879drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1880{
Eric Anholtd70d6052009-10-06 12:40:42 -07001881 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1882 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1883 struct drm_i915_gem_set_domain set_domain;
1884 int ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001885
Daniel Vettereb7a5b62015-02-11 11:59:52 +01001886 memclear(set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001887 set_domain.handle = bo_gem->gem_handle;
1888 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1889 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
Chris Wilson62997222010-09-25 21:32:59 +01001890 ret = drmIoctl(bufmgr_gem->fd,
1891 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1892 &set_domain);
Eric Anholtd70d6052009-10-06 12:40:42 -07001893 if (ret != 0) {
Chris Wilson96214862010-10-01 16:50:09 +01001894 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1895 __FILE__, __LINE__, bo_gem->gem_handle,
1896 set_domain.read_domains, set_domain.write_domain,
1897 strerror(errno));
Eric Anholtd70d6052009-10-06 12:40:42 -07001898 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001899}
1900
1901static void
Eric Anholt4b982642008-10-30 09:33:07 -07001902drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001903{
Eric Anholtd70d6052009-10-06 12:40:42 -07001904 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
Tvrtko Ursulin30921482015-04-17 11:57:28 +01001905 struct drm_gem_close close_bo;
1906 int i, ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001907
Jesse Barnesb5096402009-09-15 11:02:58 -07001908 free(bufmgr_gem->exec2_objects);
Eric Anholtd70d6052009-10-06 12:40:42 -07001909 free(bufmgr_gem->exec_objects);
1910 free(bufmgr_gem->exec_bos);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001911
Eric Anholtd70d6052009-10-06 12:40:42 -07001912 pthread_mutex_destroy(&bufmgr_gem->lock);
Eric Anholt6df7b072008-06-12 23:22:26 -07001913
Eric Anholtd70d6052009-10-06 12:40:42 -07001914 /* Free any cached buffer objects we were going to reuse */
Eric Anholt0ec768e2010-06-04 17:09:11 -07001915 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
Eric Anholtd70d6052009-10-06 12:40:42 -07001916 struct drm_intel_gem_bo_bucket *bucket =
1917 &bufmgr_gem->cache_bucket[i];
1918 drm_intel_bo_gem *bo_gem;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001919
Eric Anholtd70d6052009-10-06 12:40:42 -07001920 while (!DRMLISTEMPTY(&bucket->head)) {
1921 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1922 bucket->head.next, head);
1923 DRMLISTDEL(&bo_gem->head);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001924
Eric Anholtd70d6052009-10-06 12:40:42 -07001925 drm_intel_gem_bo_free(&bo_gem->bo);
1926 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001927 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001928
Tvrtko Ursulin30921482015-04-17 11:57:28 +01001929 /* Release userptr bo kept hanging around for optimisation. */
1930 if (bufmgr_gem->userptr_active.ptr) {
1931 memclear(close_bo);
1932 close_bo.handle = bufmgr_gem->userptr_active.handle;
1933 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1934 free(bufmgr_gem->userptr_active.ptr);
1935 if (ret)
1936 fprintf(stderr,
1937 "Failed to release test userptr object! (%d) "
1938 "i915 kernel driver may not be sane!\n", errno);
1939 }
1940
Eric Anholtd70d6052009-10-06 12:40:42 -07001941 free(bufmgr);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001942}
1943
1944/**
1945 * Adds the target buffer to the validation list and adds the relocation
1946 * to the reloc_buffer's relocation list.
1947 *
1948 * The relocation entry at the given offset must already contain the
1949 * precomputed relocation value, because the kernel will optimize out
1950 * the relocation entry write when the buffer hasn't moved from the
1951 * last known offset in target_bo.
1952 */
1953static int
Jesse Barnesb5096402009-09-15 11:02:58 -07001954do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1955 drm_intel_bo *target_bo, uint32_t target_offset,
1956 uint32_t read_domains, uint32_t write_domain,
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001957 bool need_fence)
Eric Anholt6a9eb082008-06-03 09:27:37 -07001958{
Eric Anholtd70d6052009-10-06 12:40:42 -07001959 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1960 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1961 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001962 bool fenced_command;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001963
Chris Wilson97077332009-12-01 23:01:34 +00001964 if (bo_gem->has_error)
Chris Wilson792fed12009-12-02 13:12:39 +00001965 return -ENOMEM;
Chris Wilson792fed12009-12-02 13:12:39 +00001966
1967 if (target_bo_gem->has_error) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001968 bo_gem->has_error = true;
Chris Wilson792fed12009-12-02 13:12:39 +00001969 return -ENOMEM;
1970 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07001971
Jesse Barnesb5096402009-09-15 11:02:58 -07001972 /* We never use HW fences for rendering on 965+ */
Eric Anholta1f9ea72010-03-02 08:49:36 -08001973 if (bufmgr_gem->gen >= 4)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001974 need_fence = false;
Jesse Barnesb5096402009-09-15 11:02:58 -07001975
Chris Wilson537703f2010-12-07 20:34:22 +00001976 fenced_command = need_fence;
1977 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07001978 need_fence = false;
Chris Wilson537703f2010-12-07 20:34:22 +00001979
Eric Anholtd70d6052009-10-06 12:40:42 -07001980 /* Create a new relocation list if needed */
Chris Wilson97077332009-12-01 23:01:34 +00001981 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
Chris Wilson792fed12009-12-02 13:12:39 +00001982 return -ENOMEM;
Eric Anholt6a9eb082008-06-03 09:27:37 -07001983
Eric Anholtd70d6052009-10-06 12:40:42 -07001984 /* Check overflow */
1985 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001986
Eric Anholtd70d6052009-10-06 12:40:42 -07001987 /* Check args */
1988 assert(offset <= bo->size - 4);
1989 assert((write_domain & (write_domain - 1)) == 0);
Eric Anholt6a9eb082008-06-03 09:27:37 -07001990
Chris Wilsonec65f8d2013-05-08 16:30:44 +01001991 /* An object needing a fence is a tiled buffer, so it won't have
1992 * relocs to other buffers.
1993 */
1994 if (need_fence) {
1995 assert(target_bo_gem->reloc_count == 0);
1996 target_bo_gem->reloc_tree_fences = 1;
1997 }
1998
Eric Anholtd70d6052009-10-06 12:40:42 -07001999 /* Make sure that we're not adding a reloc to something whose size has
2000 * already been accounted for.
2001 */
2002 assert(!bo_gem->used_as_reloc_target);
Eric Anholtf1791372010-06-07 14:22:36 -07002003 if (target_bo_gem != bo_gem) {
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002004 target_bo_gem->used_as_reloc_target = true;
Eric Anholtf1791372010-06-07 14:22:36 -07002005 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
Chris Wilsonec65f8d2013-05-08 16:30:44 +01002006 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
Eric Anholtf1791372010-06-07 14:22:36 -07002007 }
Eric Anholt0e867312008-10-21 00:10:54 -07002008
Jesse Barnesb5096402009-09-15 11:02:58 -07002009 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
Eric Anholt4f7704a2010-06-10 08:58:08 -07002010 if (target_bo != bo)
2011 drm_intel_gem_bo_reference(target_bo);
Chris Wilsonaf3d2822010-12-03 10:48:12 +00002012 if (fenced_command)
Jesse Barnesb5096402009-09-15 11:02:58 -07002013 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
2014 DRM_INTEL_RELOC_FENCE;
2015 else
2016 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002017
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002018 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2019 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2020 bo_gem->relocs[bo_gem->reloc_count].target_handle =
2021 target_bo_gem->gem_handle;
2022 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2023 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2024 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
Eric Anholtd70d6052009-10-06 12:40:42 -07002025 bo_gem->reloc_count++;
Eric Anholt6df7b072008-06-12 23:22:26 -07002026
Eric Anholtd70d6052009-10-06 12:40:42 -07002027 return 0;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002028}
2029
Michel Thierry3350add2015-09-03 15:23:58 +01002030static void
2031drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2032{
2033 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Chris Wilsonec80fd32017-02-11 11:04:50 +00002034
2035 if (enable)
2036 bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2037 else
2038 bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
Michel Thierry3350add2015-09-03 15:23:58 +01002039}
2040
Jesse Barnesb5096402009-09-15 11:02:58 -07002041static int
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002042drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2043{
2044 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2045 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2046 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2047 if (bo_gem->has_error)
2048 return -ENOMEM;
2049
2050 if (target_bo_gem->has_error) {
2051 bo_gem->has_error = true;
2052 return -ENOMEM;
2053 }
2054
Chris Wilsone0f05b22017-01-28 16:32:23 +00002055 if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002056 return -EINVAL;
2057 if (target_bo_gem == bo_gem)
2058 return -EINVAL;
2059
2060 if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2061 int new_size = bo_gem->softpin_target_size * 2;
2062 if (new_size == 0)
2063 new_size = bufmgr_gem->max_relocs;
2064
2065 bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2066 sizeof(drm_intel_bo *));
2067 if (!bo_gem->softpin_target)
2068 return -ENOMEM;
2069
2070 bo_gem->softpin_target_size = new_size;
2071 }
2072 bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2073 drm_intel_gem_bo_reference(target_bo);
2074 bo_gem->softpin_target_count++;
2075
2076 return 0;
2077}
2078
2079static int
Jesse Barnesb5096402009-09-15 11:02:58 -07002080drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2081 drm_intel_bo *target_bo, uint32_t target_offset,
2082 uint32_t read_domains, uint32_t write_domain)
2083{
2084 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002085 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
Jesse Barnesb5096402009-09-15 11:02:58 -07002086
Chris Wilsone0f05b22017-01-28 16:32:23 +00002087 if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002088 return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2089 else
2090 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2091 read_domains, write_domain,
2092 !bufmgr_gem->fenced_relocs);
Jesse Barnesb5096402009-09-15 11:02:58 -07002093}
2094
2095static int
2096drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
2097 drm_intel_bo *target_bo,
2098 uint32_t target_offset,
2099 uint32_t read_domains, uint32_t write_domain)
2100{
2101 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002102 read_domains, write_domain, true);
Jesse Barnesb5096402009-09-15 11:02:58 -07002103}
2104
Emil Velikov0f8da822015-03-31 22:32:11 +01002105int
Eric Anholt515cea62011-10-21 18:48:20 -07002106drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2107{
2108 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2109
2110 return bo_gem->reloc_count;
2111}
2112
2113/**
2114 * Removes existing relocation entries in the BO after "start".
2115 *
2116 * This allows a user to avoid a two-step process for state setup with
2117 * counting up all the buffer objects and doing a
2118 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2119 * relocations for the state setup. Instead, save the state of the
2120 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2121 * state, and then check if it still fits in the aperture.
2122 *
2123 * Any further drm_intel_bufmgr_check_aperture_space() queries
2124 * involving this buffer in the tree are undefined after this call.
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002125 *
2126 * This also removes all softpinned targets being referenced by the BO.
Eric Anholt515cea62011-10-21 18:48:20 -07002127 */
Emil Velikov0f8da822015-03-31 22:32:11 +01002128void
Eric Anholt515cea62011-10-21 18:48:20 -07002129drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2130{
Lionel Landwerlin86b37c62014-09-12 13:48:38 +01002131 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
Eric Anholt515cea62011-10-21 18:48:20 -07002132 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2133 int i;
2134 struct timespec time;
2135
2136 clock_gettime(CLOCK_MONOTONIC, &time);
2137
2138 assert(bo_gem->reloc_count >= start);
Lionel Landwerlin86b37c62014-09-12 13:48:38 +01002139
Eric Anholt515cea62011-10-21 18:48:20 -07002140 /* Unreference the cleared target buffers */
Lionel Landwerlin86b37c62014-09-12 13:48:38 +01002141 pthread_mutex_lock(&bufmgr_gem->lock);
2142
Eric Anholt515cea62011-10-21 18:48:20 -07002143 for (i = start; i < bo_gem->reloc_count; i++) {
Chris Wilsonfdda9702013-01-11 00:55:12 +00002144 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2145 if (&target_bo_gem->bo != bo) {
2146 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2147 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
Eric Anholt515cea62011-10-21 18:48:20 -07002148 time.tv_sec);
2149 }
2150 }
2151 bo_gem->reloc_count = start;
Lionel Landwerlin86b37c62014-09-12 13:48:38 +01002152
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002153 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2154 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2155 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2156 }
2157 bo_gem->softpin_target_count = 0;
2158
Lionel Landwerlin86b37c62014-09-12 13:48:38 +01002159 pthread_mutex_unlock(&bufmgr_gem->lock);
2160
Eric Anholt515cea62011-10-21 18:48:20 -07002161}
2162
Eric Anholt6a9eb082008-06-03 09:27:37 -07002163/**
2164 * Walk the tree of relocations rooted at BO and accumulate the list of
2165 * validations to be performed and update the relocation buffers with
2166 * index values into the validation list.
2167 */
2168static void
Eric Anholt4b982642008-10-30 09:33:07 -07002169drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002170{
Eric Anholtd70d6052009-10-06 12:40:42 -07002171 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2172 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002173
Eric Anholtd70d6052009-10-06 12:40:42 -07002174 if (bo_gem->relocs == NULL)
2175 return;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002176
Eric Anholtd70d6052009-10-06 12:40:42 -07002177 for (i = 0; i < bo_gem->reloc_count; i++) {
Jesse Barnesb5096402009-09-15 11:02:58 -07002178 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002179
Eric Anholtf1791372010-06-07 14:22:36 -07002180 if (target_bo == bo)
2181 continue;
2182
Chris Wilson23eeb7e2012-02-09 10:29:22 +00002183 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2184
Eric Anholtd70d6052009-10-06 12:40:42 -07002185 /* Continue walking the tree depth-first. */
2186 drm_intel_gem_bo_process_reloc(target_bo);
Eric Anholt6a9eb082008-06-03 09:27:37 -07002187
Eric Anholtd70d6052009-10-06 12:40:42 -07002188 /* Add the target to the validate list */
2189 drm_intel_add_validate_buffer(target_bo);
2190 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07002191}
2192
Eric Anholt6a9eb082008-06-03 09:27:37 -07002193static void
Jesse Barnesb5096402009-09-15 11:02:58 -07002194drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2195{
2196 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2197 int i;
2198
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002199 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
Jesse Barnesb5096402009-09-15 11:02:58 -07002200 return;
2201
2202 for (i = 0; i < bo_gem->reloc_count; i++) {
2203 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2204 int need_fence;
2205
Eric Anholtf1791372010-06-07 14:22:36 -07002206 if (target_bo == bo)
2207 continue;
2208
Chris Wilson23eeb7e2012-02-09 10:29:22 +00002209 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2210
Jesse Barnesb5096402009-09-15 11:02:58 -07002211 /* Continue walking the tree depth-first. */
2212 drm_intel_gem_bo_process_reloc2(target_bo);
2213
2214 need_fence = (bo_gem->reloc_target_info[i].flags &
2215 DRM_INTEL_RELOC_FENCE);
2216
2217 /* Add the target to the validate list */
2218 drm_intel_add_validate_buffer2(target_bo, need_fence);
2219 }
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002220
2221 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2222 drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2223
2224 if (target_bo == bo)
2225 continue;
2226
2227 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2228 drm_intel_gem_bo_process_reloc2(target_bo);
2229 drm_intel_add_validate_buffer2(target_bo, false);
2230 }
Jesse Barnesb5096402009-09-15 11:02:58 -07002231}
2232
2233
2234static void
Eric Anholtd70d6052009-10-06 12:40:42 -07002235drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002236{
Eric Anholtd70d6052009-10-06 12:40:42 -07002237 int i;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002238
Eric Anholtd70d6052009-10-06 12:40:42 -07002239 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2240 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2241 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002242
Eric Anholtd70d6052009-10-06 12:40:42 -07002243 /* Update the buffer offset */
Kenneth Graunkeedf17db2014-01-13 14:14:36 -08002244 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
Michel Thierry3350add2015-09-03 15:23:58 +01002245 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2246 bo_gem->gem_handle, bo_gem->name,
2247 upper_32_bits(bo->offset64),
2248 lower_32_bits(bo->offset64),
2249 upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2250 lower_32_bits(bufmgr_gem->exec_objects[i].offset));
Kenneth Graunkeedf17db2014-01-13 14:14:36 -08002251 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
Eric Anholtd70d6052009-10-06 12:40:42 -07002252 bo->offset = bufmgr_gem->exec_objects[i].offset;
2253 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07002254 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07002255}
2256
Jesse Barnesb5096402009-09-15 11:02:58 -07002257static void
2258drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2259{
2260 int i;
2261
2262 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2263 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2264 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2265
2266 /* Update the buffer offset */
Kenneth Graunkeedf17db2014-01-13 14:14:36 -08002267 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002268 /* If we're seeing softpinned object here it means that the kernel
2269 * has relocated our object... Indicating a programming error
2270 */
Chris Wilsone0f05b22017-01-28 16:32:23 +00002271 assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
Michał Winiarskib38a4b22015-12-15 16:28:55 +01002272 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
Michel Thierry3350add2015-09-03 15:23:58 +01002273 bo_gem->gem_handle, bo_gem->name,
Michał Winiarskib38a4b22015-12-15 16:28:55 +01002274 upper_32_bits(bo->offset64),
2275 lower_32_bits(bo->offset64),
2276 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2277 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
Kenneth Graunkeedf17db2014-01-13 14:14:36 -08002278 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
Jesse Barnesb5096402009-09-15 11:02:58 -07002279 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2280 }
2281 }
2282}
2283
Emil Velikov0f8da822015-03-31 22:32:11 +01002284void
Eric Anholt4db16a92011-10-11 15:59:03 -07002285drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2286 int x1, int y1, int width, int height,
2287 enum aub_dump_bmp_format format,
2288 int pitch, int offset)
2289{
Eric Anholt4db16a92011-10-11 15:59:03 -07002290}
2291
Eric Anholtf9d98be2008-09-08 08:51:40 -07002292static int
Eric Anholt4b982642008-10-30 09:33:07 -07002293drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
Eric Anholtd70d6052009-10-06 12:40:42 -07002294 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002295{
Eric Anholtd70d6052009-10-06 12:40:42 -07002296 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2297 struct drm_i915_gem_execbuffer execbuf;
2298 int ret, i;
Eric Anholtf9d98be2008-09-08 08:51:40 -07002299
Emil Velikov0ec7f442015-08-31 20:38:54 +01002300 if (to_bo_gem(bo)->has_error)
Chris Wilson792fed12009-12-02 13:12:39 +00002301 return -ENOMEM;
2302
Eric Anholtd70d6052009-10-06 12:40:42 -07002303 pthread_mutex_lock(&bufmgr_gem->lock);
2304 /* Update indices and set up the validate list. */
2305 drm_intel_gem_bo_process_reloc(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07002306
Eric Anholtd70d6052009-10-06 12:40:42 -07002307 /* Add the batch buffer to the validation list. There are no
2308 * relocations pointing to it.
2309 */
2310 drm_intel_add_validate_buffer(bo);
Eric Anholtf9d98be2008-09-08 08:51:40 -07002311
Daniel Vettereb7a5b62015-02-11 11:59:52 +01002312 memclear(execbuf);
Eric Anholtd70d6052009-10-06 12:40:42 -07002313 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2314 execbuf.buffer_count = bufmgr_gem->exec_count;
2315 execbuf.batch_start_offset = 0;
2316 execbuf.batch_len = used;
2317 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2318 execbuf.num_cliprects = num_cliprects;
2319 execbuf.DR1 = 0;
2320 execbuf.DR4 = DR4;
Eric Anholtf9d98be2008-09-08 08:51:40 -07002321
Chris Wilson62997222010-09-25 21:32:59 +01002322 ret = drmIoctl(bufmgr_gem->fd,
2323 DRM_IOCTL_I915_GEM_EXECBUFFER,
2324 &execbuf);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002325 if (ret != 0) {
2326 ret = -errno;
2327 if (errno == ENOSPC) {
Chris Wilson96214862010-10-01 16:50:09 +01002328 DBG("Execbuffer fails to pin. "
2329 "Estimate: %u. Actual: %u. Available: %u\n",
2330 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2331 bufmgr_gem->
2332 exec_count),
2333 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2334 bufmgr_gem->
2335 exec_count),
2336 (unsigned int)bufmgr_gem->gtt_size);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002337 }
Eric Anholtd70d6052009-10-06 12:40:42 -07002338 }
2339 drm_intel_update_buffer_offsets(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07002340
Eric Anholtd70d6052009-10-06 12:40:42 -07002341 if (bufmgr_gem->bufmgr.debug)
2342 drm_intel_gem_dump_validation_list(bufmgr_gem);
Eric Anholt6a9eb082008-06-03 09:27:37 -07002343
Eric Anholtd70d6052009-10-06 12:40:42 -07002344 for (i = 0; i < bufmgr_gem->exec_count; i++) {
Emil Velikov0ec7f442015-08-31 20:38:54 +01002345 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
Eric Anholt6a9eb082008-06-03 09:27:37 -07002346
Eric Anholt02f93c22014-01-15 00:38:39 -08002347 bo_gem->idle = false;
2348
Eric Anholtd70d6052009-10-06 12:40:42 -07002349 /* Disconnect the buffer from the validate list */
2350 bo_gem->validate_index = -1;
Eric Anholtd70d6052009-10-06 12:40:42 -07002351 bufmgr_gem->exec_bos[i] = NULL;
2352 }
2353 bufmgr_gem->exec_count = 0;
2354 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtf9d98be2008-09-08 08:51:40 -07002355
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002356 return ret;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002357}
2358
Keith Packard8e41ce12008-08-04 00:34:08 -07002359static int
Ben Widawsky3ed38712012-03-18 18:28:28 -07002360do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2361 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
Chris Wilsonc4b00762016-08-20 12:38:46 +01002362 int in_fence, int *out_fence,
Ben Widawsky3ed38712012-03-18 18:28:28 -07002363 unsigned int flags)
Jesse Barnesb5096402009-09-15 11:02:58 -07002364{
2365 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2366 struct drm_i915_gem_execbuffer2 execbuf;
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002367 int ret = 0;
2368 int i;
Jesse Barnesb5096402009-09-15 11:02:58 -07002369
Emil Velikov0ec7f442015-08-31 20:38:54 +01002370 if (to_bo_gem(bo)->has_error)
Emil Velikov94425f62015-08-15 15:43:46 +01002371 return -ENOMEM;
2372
Chris Wilson0184bb12010-12-19 13:01:15 +00002373 switch (flags & 0x7) {
Chris Wilson057fab32010-10-26 11:35:11 +01002374 default:
Zou Nan hai66375fd2010-06-02 10:07:37 +08002375 return -EINVAL;
Chris Wilson057fab32010-10-26 11:35:11 +01002376 case I915_EXEC_BLT:
2377 if (!bufmgr_gem->has_blt)
2378 return -EINVAL;
2379 break;
2380 case I915_EXEC_BSD:
2381 if (!bufmgr_gem->has_bsd)
2382 return -EINVAL;
2383 break;
Xiang, Haihao01199992012-11-14 12:46:39 +08002384 case I915_EXEC_VEBOX:
2385 if (!bufmgr_gem->has_vebox)
2386 return -EINVAL;
2387 break;
Chris Wilson057fab32010-10-26 11:35:11 +01002388 case I915_EXEC_RENDER:
2389 case I915_EXEC_DEFAULT:
2390 break;
2391 }
Zou Nan hai66375fd2010-06-02 10:07:37 +08002392
Jesse Barnesb5096402009-09-15 11:02:58 -07002393 pthread_mutex_lock(&bufmgr_gem->lock);
2394 /* Update indices and set up the validate list. */
2395 drm_intel_gem_bo_process_reloc2(bo);
2396
2397 /* Add the batch buffer to the validation list. There are no relocations
2398 * pointing to it.
2399 */
2400 drm_intel_add_validate_buffer2(bo, 0);
2401
Daniel Vettereb7a5b62015-02-11 11:59:52 +01002402 memclear(execbuf);
Jesse Barnesb5096402009-09-15 11:02:58 -07002403 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2404 execbuf.buffer_count = bufmgr_gem->exec_count;
2405 execbuf.batch_start_offset = 0;
2406 execbuf.batch_len = used;
2407 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2408 execbuf.num_cliprects = num_cliprects;
2409 execbuf.DR1 = 0;
2410 execbuf.DR4 = DR4;
Chris Wilson0184bb12010-12-19 13:01:15 +00002411 execbuf.flags = flags;
Ben Widawsky3ed38712012-03-18 18:28:28 -07002412 if (ctx == NULL)
2413 i915_execbuffer2_set_context_id(execbuf, 0);
2414 else
2415 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
Jesse Barnesb5096402009-09-15 11:02:58 -07002416 execbuf.rsvd2 = 0;
Chris Wilsonc4b00762016-08-20 12:38:46 +01002417 if (in_fence != -1) {
2418 execbuf.rsvd2 = in_fence;
2419 execbuf.flags |= I915_EXEC_FENCE_IN;
2420 }
2421 if (out_fence != NULL) {
2422 *out_fence = -1;
2423 execbuf.flags |= I915_EXEC_FENCE_OUT;
2424 }
Jesse Barnesb5096402009-09-15 11:02:58 -07002425
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002426 if (bufmgr_gem->no_exec)
2427 goto skip_execution;
2428
Chris Wilson62997222010-09-25 21:32:59 +01002429 ret = drmIoctl(bufmgr_gem->fd,
Chris Wilsonc4b00762016-08-20 12:38:46 +01002430 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
Chris Wilson62997222010-09-25 21:32:59 +01002431 &execbuf);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00002432 if (ret != 0) {
2433 ret = -errno;
Chris Wilson13e82702010-06-21 15:38:06 +01002434 if (ret == -ENOSPC) {
Chris Wilson96214862010-10-01 16:50:09 +01002435 DBG("Execbuffer fails to pin. "
2436 "Estimate: %u. Actual: %u. Available: %u\n",
2437 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2438 bufmgr_gem->exec_count),
2439 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2440 bufmgr_gem->exec_count),
2441 (unsigned int) bufmgr_gem->gtt_size);
Chris Wilson3e21e3b2010-03-04 21:17:48 +00002442 }
Jesse Barnesb5096402009-09-15 11:02:58 -07002443 }
2444 drm_intel_update_buffer_offsets2(bufmgr_gem);
2445
Chris Wilsonc4b00762016-08-20 12:38:46 +01002446 if (ret == 0 && out_fence != NULL)
2447 *out_fence = execbuf.rsvd2 >> 32;
2448
Kenneth Graunke6e642db2011-10-11 14:38:34 -07002449skip_execution:
Jesse Barnesb5096402009-09-15 11:02:58 -07002450 if (bufmgr_gem->bufmgr.debug)
2451 drm_intel_gem_dump_validation_list(bufmgr_gem);
2452
2453 for (i = 0; i < bufmgr_gem->exec_count; i++) {
Emil Velikov0ec7f442015-08-31 20:38:54 +01002454 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
Jesse Barnesb5096402009-09-15 11:02:58 -07002455
Eric Anholt02f93c22014-01-15 00:38:39 -08002456 bo_gem->idle = false;
2457
Jesse Barnesb5096402009-09-15 11:02:58 -07002458 /* Disconnect the buffer from the validate list */
2459 bo_gem->validate_index = -1;
2460 bufmgr_gem->exec_bos[i] = NULL;
2461 }
2462 bufmgr_gem->exec_count = 0;
2463 pthread_mutex_unlock(&bufmgr_gem->lock);
2464
Chris Wilson3e21e3b2010-03-04 21:17:48 +00002465 return ret;
Jesse Barnesb5096402009-09-15 11:02:58 -07002466}
2467
2468static int
Zou Nan hai66375fd2010-06-02 10:07:37 +08002469drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2470 drm_clip_rect_t *cliprects, int num_cliprects,
2471 int DR4)
2472{
Ben Widawsky3ed38712012-03-18 18:28:28 -07002473 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
Chris Wilsonc4b00762016-08-20 12:38:46 +01002474 -1, NULL, I915_EXEC_RENDER);
Ben Widawsky3ed38712012-03-18 18:28:28 -07002475}
2476
2477static int
2478drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2479 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2480 unsigned int flags)
2481{
2482 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
Chris Wilsonc4b00762016-08-20 12:38:46 +01002483 -1, NULL, flags);
Ben Widawsky3ed38712012-03-18 18:28:28 -07002484}
2485
Emil Velikov0f8da822015-03-31 22:32:11 +01002486int
Ben Widawsky3ed38712012-03-18 18:28:28 -07002487drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2488 int used, unsigned int flags)
2489{
Chris Wilsonc4b00762016-08-20 12:38:46 +01002490 return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
2491}
2492
2493int
2494drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
2495 drm_intel_context *ctx,
2496 int used,
2497 int in_fence,
2498 int *out_fence,
2499 unsigned int flags)
2500{
2501 return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
Zou Nan hai66375fd2010-06-02 10:07:37 +08002502}
2503
2504static int
Eric Anholt4b982642008-10-30 09:33:07 -07002505drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
Keith Packard8e41ce12008-08-04 00:34:08 -07002506{
Eric Anholtd70d6052009-10-06 12:40:42 -07002507 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2508 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2509 struct drm_i915_gem_pin pin;
2510 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002511
Daniel Vettereb7a5b62015-02-11 11:59:52 +01002512 memclear(pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002513 pin.handle = bo_gem->gem_handle;
2514 pin.alignment = alignment;
Keith Packard8e41ce12008-08-04 00:34:08 -07002515
Chris Wilson62997222010-09-25 21:32:59 +01002516 ret = drmIoctl(bufmgr_gem->fd,
2517 DRM_IOCTL_I915_GEM_PIN,
2518 &pin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002519 if (ret != 0)
2520 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07002521
Kenneth Graunkeedf17db2014-01-13 14:14:36 -08002522 bo->offset64 = pin.offset;
Eric Anholtd70d6052009-10-06 12:40:42 -07002523 bo->offset = pin.offset;
2524 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07002525}
2526
2527static int
Eric Anholt4b982642008-10-30 09:33:07 -07002528drm_intel_gem_bo_unpin(drm_intel_bo *bo)
Keith Packard8e41ce12008-08-04 00:34:08 -07002529{
Eric Anholtd70d6052009-10-06 12:40:42 -07002530 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2531 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2532 struct drm_i915_gem_unpin unpin;
2533 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002534
Daniel Vettereb7a5b62015-02-11 11:59:52 +01002535 memclear(unpin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002536 unpin.handle = bo_gem->gem_handle;
Keith Packard8e41ce12008-08-04 00:34:08 -07002537
Chris Wilson62997222010-09-25 21:32:59 +01002538 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
Eric Anholtd70d6052009-10-06 12:40:42 -07002539 if (ret != 0)
2540 return -errno;
Keith Packard8e41ce12008-08-04 00:34:08 -07002541
Eric Anholtd70d6052009-10-06 12:40:42 -07002542 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07002543}
2544
2545static int
Chris Wilson1db22ff2010-06-21 14:27:23 +01002546drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2547 uint32_t tiling_mode,
2548 uint32_t stride)
Keith Packard8e41ce12008-08-04 00:34:08 -07002549{
Eric Anholtd70d6052009-10-06 12:40:42 -07002550 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2551 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2552 struct drm_i915_gem_set_tiling set_tiling;
2553 int ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002554
Chris Wilsonaba35022010-06-22 13:00:22 +01002555 if (bo_gem->global_name == 0 &&
2556 tiling_mode == bo_gem->tiling_mode &&
Chris Wilson056aa9b2010-06-21 14:31:29 +01002557 stride == bo_gem->stride)
Eric Anholtd70d6052009-10-06 12:40:42 -07002558 return 0;
Keith Packard18f091d2008-12-15 15:08:12 -08002559
Eric Anholtd70d6052009-10-06 12:40:42 -07002560 memset(&set_tiling, 0, sizeof(set_tiling));
Chris Wilson8ffd2e12009-12-01 13:08:04 +00002561 do {
Chris Wilson62997222010-09-25 21:32:59 +01002562 /* set_tiling is slightly broken and overwrites the
2563 * input on the error path, so we have to open code
2564 * rmIoctl.
2565 */
Chris Wilson1db22ff2010-06-21 14:27:23 +01002566 set_tiling.handle = bo_gem->gem_handle;
2567 set_tiling.tiling_mode = tiling_mode;
Chris Wilson4f0f8712010-02-10 09:45:13 +00002568 set_tiling.stride = stride;
2569
Chris Wilson8ffd2e12009-12-01 13:08:04 +00002570 ret = ioctl(bufmgr_gem->fd,
2571 DRM_IOCTL_I915_GEM_SET_TILING,
2572 &set_tiling);
Chris Wilson62997222010-09-25 21:32:59 +01002573 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
Chris Wilson1db22ff2010-06-21 14:27:23 +01002574 if (ret == -1)
2575 return -errno;
2576
2577 bo_gem->tiling_mode = set_tiling.tiling_mode;
2578 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
Chris Wilsonaba35022010-06-22 13:00:22 +01002579 bo_gem->stride = set_tiling.stride;
Chris Wilson1db22ff2010-06-21 14:27:23 +01002580 return 0;
2581}
2582
2583static int
2584drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2585 uint32_t stride)
2586{
2587 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2588 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2589 int ret;
2590
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01002591 /* Tiling with userptr surfaces is not supported
2592 * on all hardware so refuse it for time being.
2593 */
2594 if (bo_gem->is_userptr)
2595 return -EINVAL;
2596
Chris Wilsoncd34cbe2010-06-22 11:07:26 +01002597 /* Linear buffers have no stride. By ensuring that we only ever use
2598 * stride 0 with linear buffers, we simplify our code.
2599 */
Chris Wilsonc7bbaca2010-06-22 11:15:56 +01002600 if (*tiling_mode == I915_TILING_NONE)
Chris Wilsoncd34cbe2010-06-22 11:07:26 +01002601 stride = 0;
2602
Chris Wilson1db22ff2010-06-21 14:27:23 +01002603 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2604 if (ret == 0)
Anuj Phogat5c68f9f2015-04-10 17:20:55 -07002605 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
Chris Wilsone22fb792009-11-30 22:14:30 +00002606
Keith Packard18f091d2008-12-15 15:08:12 -08002607 *tiling_mode = bo_gem->tiling_mode;
Chris Wilsonfcf3e612010-05-24 18:35:41 +01002608 return ret;
Keith Packard8e41ce12008-08-04 00:34:08 -07002609}
2610
2611static int
Eric Anholtd70d6052009-10-06 12:40:42 -07002612drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2613 uint32_t * swizzle_mode)
Keith Packard8e41ce12008-08-04 00:34:08 -07002614{
Eric Anholtd70d6052009-10-06 12:40:42 -07002615 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Eric Anholt99338382008-10-14 13:18:11 -07002616
Eric Anholtd70d6052009-10-06 12:40:42 -07002617 *tiling_mode = bo_gem->tiling_mode;
2618 *swizzle_mode = bo_gem->swizzle_mode;
2619 return 0;
Eric Anholt99338382008-10-14 13:18:11 -07002620}
2621
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002622static int
2623drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2624{
2625 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2626
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002627 bo->offset64 = offset;
2628 bo->offset = offset;
Chris Wilsone0f05b22017-01-28 16:32:23 +00002629 bo_gem->kflags |= EXEC_OBJECT_PINNED;
2630
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02002631 return 0;
2632}
2633
Emil Velikov0f8da822015-03-31 22:32:11 +01002634drm_intel_bo *
Dave Airlieff65de92012-07-15 00:22:46 +00002635drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2636{
2637 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2638 int ret;
2639 uint32_t handle;
2640 drm_intel_bo_gem *bo_gem;
2641 struct drm_i915_gem_get_tiling get_tiling;
2642
Rafał Sapałacf40cf02015-07-24 11:22:34 +02002643 pthread_mutex_lock(&bufmgr_gem->lock);
Dave Airlieff65de92012-07-15 00:22:46 +00002644 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
Rafał Sapałacf40cf02015-07-24 11:22:34 +02002645 if (ret) {
2646 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2647 pthread_mutex_unlock(&bufmgr_gem->lock);
2648 return NULL;
2649 }
Keith Packardc3d96892013-11-22 05:31:01 -08002650
2651 /*
2652 * See if the kernel has already returned this buffer to us. Just as
2653 * for named buffers, we must not create two bo's pointing at the same
2654 * kernel object
2655 */
Chris Wilsonfe4579e2016-10-24 21:17:13 +01002656 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002657 &handle, sizeof(handle), bo_gem);
2658 if (bo_gem) {
2659 drm_intel_gem_bo_reference(&bo_gem->bo);
2660 goto out;
Keith Packardc3d96892013-11-22 05:31:01 -08002661 }
2662
Dave Airlieff65de92012-07-15 00:22:46 +00002663 bo_gem = calloc(1, sizeof(*bo_gem));
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002664 if (!bo_gem)
2665 goto out;
2666
2667 atomic_set(&bo_gem->refcount, 1);
2668 DRMINITLISTHEAD(&bo_gem->vma_list);
2669
Kristian Høgsberg9c52c3d2013-10-10 14:40:58 -07002670 /* Determine size of bo. The fd-to-handle ioctl really should
2671 * return the size, but it doesn't. If we have kernel 3.12 or
2672 * later, we can lseek on the prime fd to get the size. Older
2673 * kernels will just fail, in which case we fall back to the
2674 * provided (estimated or guess size). */
2675 ret = lseek(prime_fd, 0, SEEK_END);
2676 if (ret != -1)
2677 bo_gem->bo.size = ret;
2678 else
2679 bo_gem->bo.size = size;
2680
Dave Airlieff65de92012-07-15 00:22:46 +00002681 bo_gem->bo.handle = handle;
2682 bo_gem->bo.bufmgr = bufmgr;
2683
2684 bo_gem->gem_handle = handle;
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002685 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2686 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
Dave Airlieff65de92012-07-15 00:22:46 +00002687
2688 bo_gem->name = "prime";
2689 bo_gem->validate_index = -1;
2690 bo_gem->reloc_tree_fences = 0;
2691 bo_gem->used_as_reloc_target = false;
2692 bo_gem->has_error = false;
2693 bo_gem->reusable = false;
2694
Daniel Vettereb7a5b62015-02-11 11:59:52 +01002695 memclear(get_tiling);
Dave Airlieff65de92012-07-15 00:22:46 +00002696 get_tiling.handle = bo_gem->gem_handle;
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002697 if (drmIoctl(bufmgr_gem->fd,
2698 DRM_IOCTL_I915_GEM_GET_TILING,
2699 &get_tiling))
2700 goto err;
2701
Dave Airlieff65de92012-07-15 00:22:46 +00002702 bo_gem->tiling_mode = get_tiling.tiling_mode;
2703 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2704 /* XXX stride is unknown */
Anuj Phogat5c68f9f2015-04-10 17:20:55 -07002705 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
Dave Airlieff65de92012-07-15 00:22:46 +00002706
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002707out:
2708 pthread_mutex_unlock(&bufmgr_gem->lock);
Dave Airlieff65de92012-07-15 00:22:46 +00002709 return &bo_gem->bo;
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002710
2711err:
2712 drm_intel_gem_bo_free(&bo_gem->bo);
2713 pthread_mutex_unlock(&bufmgr_gem->lock);
2714 return NULL;
Dave Airlieff65de92012-07-15 00:22:46 +00002715}
2716
Emil Velikov0f8da822015-03-31 22:32:11 +01002717int
Dave Airlieff65de92012-07-15 00:22:46 +00002718drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2719{
2720 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2721 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2722
Kristian Høgsberg1b7ce582012-09-14 16:35:19 -04002723 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2724 DRM_CLOEXEC, prime_fd) != 0)
2725 return -errno;
2726
2727 bo_gem->reusable = false;
2728
2729 return 0;
Dave Airlieff65de92012-07-15 00:22:46 +00002730}
2731
Eric Anholt99338382008-10-14 13:18:11 -07002732static int
Eric Anholtd70d6052009-10-06 12:40:42 -07002733drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
Keith Packard8e41ce12008-08-04 00:34:08 -07002734{
Eric Anholtd70d6052009-10-06 12:40:42 -07002735 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2736 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Keith Packard8e41ce12008-08-04 00:34:08 -07002737
Eric Anholtd70d6052009-10-06 12:40:42 -07002738 if (!bo_gem->global_name) {
Chris Wilson90b23cc2012-02-09 10:23:10 +00002739 struct drm_gem_flink flink;
2740
Daniel Vettereb7a5b62015-02-11 11:59:52 +01002741 memclear(flink);
Eric Anholtd70d6052009-10-06 12:40:42 -07002742 flink.handle = bo_gem->gem_handle;
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002743 if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2744 return -errno;
Eric Anholtd70d6052009-10-06 12:40:42 -07002745
Rafal Sapala0fa1dbf2014-08-05 14:51:38 -04002746 pthread_mutex_lock(&bufmgr_gem->lock);
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002747 if (!bo_gem->global_name) {
Dongwon Kim44f220a2017-01-13 16:07:00 -08002748 bo_gem->global_name = flink.name;
2749 bo_gem->reusable = false;
2750
Chris Wilson9e24d0c2016-09-22 14:44:50 +01002751 HASH_ADD(name_hh, bufmgr_gem->name_table,
2752 global_name, sizeof(bo_gem->global_name),
2753 bo_gem);
Rafal Sapala0fa1dbf2014-08-05 14:51:38 -04002754 }
Rafal Sapala0fa1dbf2014-08-05 14:51:38 -04002755 pthread_mutex_unlock(&bufmgr_gem->lock);
Eric Anholtd70d6052009-10-06 12:40:42 -07002756 }
2757
2758 *name = bo_gem->global_name;
2759 return 0;
Keith Packard8e41ce12008-08-04 00:34:08 -07002760}
2761
Eric Anholt6a9eb082008-06-03 09:27:37 -07002762/**
2763 * Enables unlimited caching of buffer objects for reuse.
2764 *
2765 * This is potentially very memory expensive, as the cache at each bucket
2766 * size is only bounded by how many buffers of that size we've managed to have
2767 * in flight at once.
2768 */
Emil Velikov0f8da822015-03-31 22:32:11 +01002769void
Eric Anholt4b982642008-10-30 09:33:07 -07002770drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002771{
Eric Anholtd70d6052009-10-06 12:40:42 -07002772 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002773
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002774 bufmgr_gem->bo_reuse = true;
Eric Anholt6a9eb082008-06-03 09:27:37 -07002775}
2776
Eric Anholt0e867312008-10-21 00:10:54 -07002777/**
Chris Wilson1bd35da2016-08-20 18:36:42 +01002778 * Disables implicit synchronisation before executing the bo
2779 *
2780 * This will cause rendering corruption unless you correctly manage explicit
2781 * fences for all rendering involving this buffer - including use by others.
2782 * Disabling the implicit serialisation is only required if that serialisation
2783 * is too coarse (for example, you have split the buffer into many
2784 * non-overlapping regions and are sharing the whole buffer between concurrent
2785 * independent command streams).
2786 *
2787 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2788 * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
2789 * or subsequent execbufs involving the bo will generate EINVAL.
2790 */
2791void
2792drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
2793{
2794 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2795
2796 bo_gem->kflags |= EXEC_OBJECT_ASYNC;
2797}
2798
2799/**
Chris Wilsondfd536c2017-01-27 20:25:04 +00002800 * Enables implicit synchronisation before executing the bo
2801 *
2802 * This is the default behaviour of the kernel, to wait upon prior writes
2803 * completing on the object before rendering with it, or to wait for prior
2804 * reads to complete before writing into the object.
2805 * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2806 * the kernel never to insert a stall before using the object. Then this
2807 * function can be used to restore the implicit sync before subsequent
2808 * rendering.
2809 */
2810void
2811drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
2812{
2813 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2814
2815 bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
2816}
2817
2818/**
Chris Wilson1bd35da2016-08-20 18:36:42 +01002819 * Query whether the kernel supports disabling of its implicit synchronisation
2820 * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
2821 */
2822int
2823drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
2824{
2825 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2826
2827 return bufmgr_gem->has_exec_async;
2828}
2829
2830/**
Jesse Barnesb5096402009-09-15 11:02:58 -07002831 * Enable use of fenced reloc type.
2832 *
2833 * New code should enable this to avoid unnecessary fence register
2834 * allocation. If this option is not enabled, all relocs will have fence
2835 * register allocated.
2836 */
Emil Velikov0f8da822015-03-31 22:32:11 +01002837void
Jesse Barnesb5096402009-09-15 11:02:58 -07002838drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2839{
Eric Anholt766fa792010-03-02 16:04:14 -08002840 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
Jesse Barnesb5096402009-09-15 11:02:58 -07002841
Eric Anholt766fa792010-03-02 16:04:14 -08002842 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002843 bufmgr_gem->fenced_relocs = true;
Jesse Barnesb5096402009-09-15 11:02:58 -07002844}
2845
2846/**
Eric Anholt0e867312008-10-21 00:10:54 -07002847 * Return the additional aperture space required by the tree of buffer objects
2848 * rooted at bo.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002849 */
2850static int
Eric Anholt4b982642008-10-30 09:33:07 -07002851drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002852{
Eric Anholtd70d6052009-10-06 12:40:42 -07002853 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2854 int i;
2855 int total = 0;
Eric Anholt0e867312008-10-21 00:10:54 -07002856
Eric Anholtd70d6052009-10-06 12:40:42 -07002857 if (bo == NULL || bo_gem->included_in_check_aperture)
2858 return 0;
Eric Anholt0e867312008-10-21 00:10:54 -07002859
Eric Anholtd70d6052009-10-06 12:40:42 -07002860 total += bo->size;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002861 bo_gem->included_in_check_aperture = true;
Eric Anholt0e867312008-10-21 00:10:54 -07002862
Eric Anholtd70d6052009-10-06 12:40:42 -07002863 for (i = 0; i < bo_gem->reloc_count; i++)
2864 total +=
2865 drm_intel_gem_bo_get_aperture_space(bo_gem->
Jesse Barnesb5096402009-09-15 11:02:58 -07002866 reloc_target_info[i].bo);
Eric Anholt0e867312008-10-21 00:10:54 -07002867
Eric Anholtd70d6052009-10-06 12:40:42 -07002868 return total;
Eric Anholt0e867312008-10-21 00:10:54 -07002869}
2870
2871/**
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002872 * Count the number of buffers in this list that need a fence reg
2873 *
2874 * If the count is greater than the number of available regs, we'll have
2875 * to ask the caller to resubmit a batch with fewer tiled buffers.
2876 *
Eric Anholt9209c9a2009-01-27 16:54:11 -08002877 * This function over-counts if the same buffer is used multiple times.
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002878 */
2879static unsigned int
Eric Anholtd70d6052009-10-06 12:40:42 -07002880drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002881{
Eric Anholtd70d6052009-10-06 12:40:42 -07002882 int i;
2883 unsigned int total = 0;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002884
Eric Anholtd70d6052009-10-06 12:40:42 -07002885 for (i = 0; i < count; i++) {
2886 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002887
Eric Anholtd70d6052009-10-06 12:40:42 -07002888 if (bo_gem == NULL)
2889 continue;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002890
Eric Anholtd70d6052009-10-06 12:40:42 -07002891 total += bo_gem->reloc_tree_fences;
2892 }
2893 return total;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002894}
2895
2896/**
Eric Anholt4b982642008-10-30 09:33:07 -07002897 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2898 * for the next drm_intel_bufmgr_check_aperture_space() call.
Eric Anholt0e867312008-10-21 00:10:54 -07002899 */
2900static void
Eric Anholt4b982642008-10-30 09:33:07 -07002901drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
Eric Anholt0e867312008-10-21 00:10:54 -07002902{
Eric Anholtd70d6052009-10-06 12:40:42 -07002903 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2904 int i;
Eric Anholt0e867312008-10-21 00:10:54 -07002905
Eric Anholtd70d6052009-10-06 12:40:42 -07002906 if (bo == NULL || !bo_gem->included_in_check_aperture)
2907 return;
Eric Anholt0e867312008-10-21 00:10:54 -07002908
Eric Anholt2c2bdb32011-10-21 16:53:16 -07002909 bo_gem->included_in_check_aperture = false;
Eric Anholt0e867312008-10-21 00:10:54 -07002910
Eric Anholtd70d6052009-10-06 12:40:42 -07002911 for (i = 0; i < bo_gem->reloc_count; i++)
2912 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
Jesse Barnesb5096402009-09-15 11:02:58 -07002913 reloc_target_info[i].bo);
Eric Anholt0e867312008-10-21 00:10:54 -07002914}
2915
2916/**
Keith Packardb13f4e12008-11-21 01:49:39 -08002917 * Return a conservative estimate for the amount of aperture required
2918 * for a collection of buffers. This may double-count some buffers.
2919 */
2920static unsigned int
2921drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2922{
Eric Anholtd70d6052009-10-06 12:40:42 -07002923 int i;
2924 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08002925
Eric Anholtd70d6052009-10-06 12:40:42 -07002926 for (i = 0; i < count; i++) {
2927 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2928 if (bo_gem != NULL)
2929 total += bo_gem->reloc_tree_size;
2930 }
2931 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08002932}
2933
2934/**
2935 * Return the amount of aperture needed for a collection of buffers.
2936 * This avoids double counting any buffers, at the cost of looking
2937 * at every buffer in the set.
2938 */
2939static unsigned int
2940drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2941{
Eric Anholtd70d6052009-10-06 12:40:42 -07002942 int i;
2943 unsigned int total = 0;
Keith Packardb13f4e12008-11-21 01:49:39 -08002944
Eric Anholtd70d6052009-10-06 12:40:42 -07002945 for (i = 0; i < count; i++) {
2946 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2947 /* For the first buffer object in the array, we get an
2948 * accurate count back for its reloc_tree size (since nothing
2949 * had been flagged as being counted yet). We can save that
2950 * value out as a more conservative reloc_tree_size that
2951 * avoids double-counting target buffers. Since the first
2952 * buffer happens to usually be the batch buffer in our
2953 * callers, this can pull us back from doing the tree
2954 * walk on every new batch emit.
2955 */
2956 if (i == 0) {
2957 drm_intel_bo_gem *bo_gem =
2958 (drm_intel_bo_gem *) bo_array[i];
2959 bo_gem->reloc_tree_size = total;
2960 }
Eric Anholt7ce8d4c2009-02-27 13:46:31 -08002961 }
Keith Packardb13f4e12008-11-21 01:49:39 -08002962
Eric Anholtd70d6052009-10-06 12:40:42 -07002963 for (i = 0; i < count; i++)
2964 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2965 return total;
Keith Packardb13f4e12008-11-21 01:49:39 -08002966}
2967
2968/**
Eric Anholt0e867312008-10-21 00:10:54 -07002969 * Return -1 if the batchbuffer should be flushed before attempting to
2970 * emit rendering referencing the buffers pointed to by bo_array.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002971 *
Eric Anholt0e867312008-10-21 00:10:54 -07002972 * This is required because if we try to emit a batchbuffer with relocations
2973 * to a tree of buffers that won't simultaneously fit in the aperture,
2974 * the rendering will return an error at a point where the software is not
2975 * prepared to recover from it.
2976 *
2977 * However, we also want to emit the batchbuffer significantly before we reach
2978 * the limit, as a series of batchbuffers each of which references buffers
2979 * covering almost all of the aperture means that at each emit we end up
2980 * waiting to evict a buffer from the last rendering, and we get synchronous
2981 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2982 * get better parallelism.
Eric Anholt6a9eb082008-06-03 09:27:37 -07002983 */
2984static int
Eric Anholt4b982642008-10-30 09:33:07 -07002985drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
Eric Anholt6a9eb082008-06-03 09:27:37 -07002986{
Eric Anholtd70d6052009-10-06 12:40:42 -07002987 drm_intel_bufmgr_gem *bufmgr_gem =
2988 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2989 unsigned int total = 0;
2990 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2991 int total_fences;
Jesse Barnes2fa5f282009-01-23 14:13:45 -08002992
Eric Anholtd70d6052009-10-06 12:40:42 -07002993 /* Check for fence reg constraints if necessary */
2994 if (bufmgr_gem->available_fences) {
2995 total_fences = drm_intel_gem_total_fences(bo_array, count);
2996 if (total_fences > bufmgr_gem->available_fences)
Chris Wilsonacb4aa62009-12-02 12:40:26 +00002997 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07002998 }
Eric Anholt0e867312008-10-21 00:10:54 -07002999
Eric Anholtd70d6052009-10-06 12:40:42 -07003000 total = drm_intel_gem_estimate_batch_space(bo_array, count);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08003001
Eric Anholtd70d6052009-10-06 12:40:42 -07003002 if (total > threshold)
3003 total = drm_intel_gem_compute_batch_space(bo_array, count);
Eric Anholt0e867312008-10-21 00:10:54 -07003004
Eric Anholtd70d6052009-10-06 12:40:42 -07003005 if (total > threshold) {
3006 DBG("check_space: overflowed available aperture, "
3007 "%dkb vs %dkb\n",
3008 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
Chris Wilsonacb4aa62009-12-02 12:40:26 +00003009 return -ENOSPC;
Eric Anholtd70d6052009-10-06 12:40:42 -07003010 } else {
3011 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
3012 (int)bufmgr_gem->gtt_size / 1024);
3013 return 0;
3014 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07003015}
3016
Keith Packard5b5ce302009-05-11 13:42:12 -07003017/*
3018 * Disable buffer reuse for objects which are shared with the kernel
3019 * as scanout buffers
3020 */
3021static int
3022drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
3023{
Eric Anholtd70d6052009-10-06 12:40:42 -07003024 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
Keith Packard5b5ce302009-05-11 13:42:12 -07003025
Eric Anholt2c2bdb32011-10-21 16:53:16 -07003026 bo_gem->reusable = false;
Eric Anholtd70d6052009-10-06 12:40:42 -07003027 return 0;
Keith Packard5b5ce302009-05-11 13:42:12 -07003028}
3029
Eric Anholt769b1052009-10-01 19:09:26 -07003030static int
Chris Wilson07e75892010-05-11 08:54:06 +01003031drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3032{
3033 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3034
3035 return bo_gem->reusable;
3036}
3037
3038static int
Eric Anholt66d27142009-10-20 13:20:55 -07003039_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
Eric Anholt769b1052009-10-01 19:09:26 -07003040{
Eric Anholtd70d6052009-10-06 12:40:42 -07003041 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3042 int i;
Eric Anholt769b1052009-10-01 19:09:26 -07003043
Eric Anholtd70d6052009-10-06 12:40:42 -07003044 for (i = 0; i < bo_gem->reloc_count; i++) {
Jesse Barnesb5096402009-09-15 11:02:58 -07003045 if (bo_gem->reloc_target_info[i].bo == target_bo)
Eric Anholtd70d6052009-10-06 12:40:42 -07003046 return 1;
Eric Anholt4f7704a2010-06-10 08:58:08 -07003047 if (bo == bo_gem->reloc_target_info[i].bo)
3048 continue;
Jesse Barnesb5096402009-09-15 11:02:58 -07003049 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
Eric Anholtd70d6052009-10-06 12:40:42 -07003050 target_bo))
3051 return 1;
3052 }
3053
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02003054 for (i = 0; i< bo_gem->softpin_target_count; i++) {
3055 if (bo_gem->softpin_target[i] == target_bo)
3056 return 1;
3057 if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3058 return 1;
3059 }
3060
Eric Anholt769b1052009-10-01 19:09:26 -07003061 return 0;
Eric Anholt769b1052009-10-01 19:09:26 -07003062}
3063
Eric Anholt66d27142009-10-20 13:20:55 -07003064/** Return true if target_bo is referenced by bo's relocation tree. */
3065static int
3066drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3067{
3068 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3069
3070 if (bo == NULL || target_bo == NULL)
3071 return 0;
3072 if (target_bo_gem->used_as_reloc_target)
3073 return _drm_intel_gem_bo_references(bo, target_bo);
3074 return 0;
3075}
3076
Eric Anholt0ec768e2010-06-04 17:09:11 -07003077static void
3078add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3079{
3080 unsigned int i = bufmgr_gem->num_buckets;
3081
3082 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3083
3084 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3085 bufmgr_gem->cache_bucket[i].size = size;
3086 bufmgr_gem->num_buckets++;
3087}
3088
3089static void
3090init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3091{
3092 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3093
3094 /* OK, so power of two buckets was too wasteful of memory.
3095 * Give 3 other sizes between each power of two, to hopefully
3096 * cover things accurately enough. (The alternative is
3097 * probably to just go for exact matching of sizes, and assume
3098 * that for things like composited window resize the tiled
3099 * width/height alignment and rounding of sizes to pages will
3100 * get us useful cache hit rates anyway)
3101 */
3102 add_bucket(bufmgr_gem, 4096);
3103 add_bucket(bufmgr_gem, 4096 * 2);
3104 add_bucket(bufmgr_gem, 4096 * 3);
3105
3106 /* Initialize the linked lists for BO reuse cache. */
3107 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3108 add_bucket(bufmgr_gem, size);
3109
3110 add_bucket(bufmgr_gem, size + size * 1 / 4);
3111 add_bucket(bufmgr_gem, size + size * 2 / 4);
3112 add_bucket(bufmgr_gem, size + size * 3 / 4);
3113 }
3114}
3115
Emil Velikov0f8da822015-03-31 22:32:11 +01003116void
Chris Wilsone4b60f22011-12-05 21:29:05 +00003117drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3118{
3119 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3120
3121 bufmgr_gem->vma_max = limit;
3122
3123 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3124}
3125
Neil Roberts319108f2015-11-09 16:27:52 +01003126static int
3127parse_devid_override(const char *devid_override)
3128{
3129 static const struct {
3130 const char *name;
3131 int pci_id;
3132 } name_map[] = {
3133 { "brw", PCI_CHIP_I965_GM },
3134 { "g4x", PCI_CHIP_GM45_GM },
3135 { "ilk", PCI_CHIP_ILD_G },
3136 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
3137 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
3138 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
3139 { "byt", PCI_CHIP_VALLEYVIEW_3 },
3140 { "bdw", 0x1620 | BDW_ULX },
3141 { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
3142 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
3143 };
3144 unsigned int i;
3145
3146 for (i = 0; i < ARRAY_SIZE(name_map); i++) {
3147 if (!strcmp(name_map[i].name, devid_override))
3148 return name_map[i].pci_id;
3149 }
3150
3151 return strtod(devid_override, NULL);
3152}
3153
Eric Anholt769b1052009-10-01 19:09:26 -07003154/**
Kenneth Graunke6e642db2011-10-11 14:38:34 -07003155 * Get the PCI ID for the device. This can be overridden by setting the
3156 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3157 */
3158static int
3159get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3160{
3161 char *devid_override;
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003162 int devid = 0;
Kenneth Graunke6e642db2011-10-11 14:38:34 -07003163 int ret;
3164 drm_i915_getparam_t gp;
3165
3166 if (geteuid() == getuid()) {
3167 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3168 if (devid_override) {
3169 bufmgr_gem->no_exec = true;
Neil Roberts319108f2015-11-09 16:27:52 +01003170 return parse_devid_override(devid_override);
Kenneth Graunke6e642db2011-10-11 14:38:34 -07003171 }
3172 }
3173
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003174 memclear(gp);
Kenneth Graunke6e642db2011-10-11 14:38:34 -07003175 gp.param = I915_PARAM_CHIPSET_ID;
3176 gp.value = &devid;
3177 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3178 if (ret) {
3179 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3180 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3181 }
3182 return devid;
3183}
3184
Emil Velikov0f8da822015-03-31 22:32:11 +01003185int
Kenneth Graunke6e642db2011-10-11 14:38:34 -07003186drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3187{
3188 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3189
3190 return bufmgr_gem->pci_device;
3191}
3192
3193/**
Damien Lespiaufbd106a2013-02-20 12:11:49 +00003194 * Sets the AUB filename.
3195 *
3196 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3197 * for it to have any effect.
3198 */
Emil Velikov0f8da822015-03-31 22:32:11 +01003199void
Damien Lespiaufbd106a2013-02-20 12:11:49 +00003200drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3201 const char *filename)
3202{
Damien Lespiaufbd106a2013-02-20 12:11:49 +00003203}
3204
3205/**
Eric Anholt4db16a92011-10-11 15:59:03 -07003206 * Sets up AUB dumping.
3207 *
3208 * This is a trace file format that can be used with the simulator.
3209 * Packets are emitted in a format somewhat like GPU command packets.
3210 * You can set up a GTT and upload your objects into the referenced
3211 * space, then send off batchbuffers and get BMPs out the other end.
3212 */
Emil Velikov0f8da822015-03-31 22:32:11 +01003213void
Eric Anholt4db16a92011-10-11 15:59:03 -07003214drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3215{
Kristian Høgsberg Kristensencd2f91e2015-07-31 10:47:50 -07003216 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3217 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
3218 "then run (for example)\n\n"
3219 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3220 "See the intel_aubdump man page for more details.\n");
Eric Anholt4db16a92011-10-11 15:59:03 -07003221}
3222
Emil Velikov0f8da822015-03-31 22:32:11 +01003223drm_intel_context *
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003224drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3225{
3226 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3227 struct drm_i915_gem_context_create create;
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003228 drm_intel_context *context = NULL;
Damien Lespiauc10b08d2012-07-26 17:50:09 +01003229 int ret;
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003230
Ben Widawsky3d34fe22013-12-26 16:37:00 -08003231 context = calloc(1, sizeof(*context));
3232 if (!context)
3233 return NULL;
3234
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003235 memclear(create);
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003236 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3237 if (ret != 0) {
Kenneth Graunke992e2af2012-07-12 13:41:11 -07003238 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3239 strerror(errno));
Ben Widawsky3d34fe22013-12-26 16:37:00 -08003240 free(context);
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003241 return NULL;
3242 }
3243
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003244 context->ctx_id = create.ctx_id;
3245 context->bufmgr = bufmgr;
3246
3247 return context;
3248}
3249
Robert Bragg770f6bc2015-01-26 16:11:26 +00003250int
3251drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
3252{
3253 if (ctx == NULL)
3254 return -EINVAL;
3255
3256 *ctx_id = ctx->ctx_id;
3257
3258 return 0;
3259}
3260
Emil Velikov0f8da822015-03-31 22:32:11 +01003261void
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003262drm_intel_gem_context_destroy(drm_intel_context *ctx)
3263{
3264 drm_intel_bufmgr_gem *bufmgr_gem;
3265 struct drm_i915_gem_context_destroy destroy;
3266 int ret;
3267
3268 if (ctx == NULL)
3269 return;
3270
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003271 memclear(destroy);
Kenneth Graunkea9412fa2012-08-12 13:33:05 -07003272
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003273 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3274 destroy.ctx_id = ctx->ctx_id;
3275 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3276 &destroy);
3277 if (ret != 0)
3278 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3279 strerror(errno));
3280
3281 free(ctx);
3282}
3283
Emil Velikov0f8da822015-03-31 22:32:11 +01003284int
Ian Romanick5a41b022013-11-15 10:24:43 -08003285drm_intel_get_reset_stats(drm_intel_context *ctx,
3286 uint32_t *reset_count,
3287 uint32_t *active,
3288 uint32_t *pending)
3289{
3290 drm_intel_bufmgr_gem *bufmgr_gem;
3291 struct drm_i915_reset_stats stats;
3292 int ret;
3293
3294 if (ctx == NULL)
3295 return -EINVAL;
3296
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003297 memclear(stats);
Ian Romanick5a41b022013-11-15 10:24:43 -08003298
3299 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3300 stats.ctx_id = ctx->ctx_id;
3301 ret = drmIoctl(bufmgr_gem->fd,
3302 DRM_IOCTL_I915_GET_RESET_STATS,
3303 &stats);
3304 if (ret == 0) {
3305 if (reset_count != NULL)
3306 *reset_count = stats.reset_count;
3307
3308 if (active != NULL)
3309 *active = stats.batch_active;
3310
3311 if (pending != NULL)
3312 *pending = stats.batch_pending;
3313 }
3314
3315 return ret;
3316}
3317
Emil Velikov0f8da822015-03-31 22:32:11 +01003318int
Eric Anholt2607dad2012-08-01 16:43:16 -07003319drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3320 uint32_t offset,
3321 uint64_t *result)
3322{
3323 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3324 struct drm_i915_reg_read reg_read;
3325 int ret;
3326
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003327 memclear(reg_read);
Eric Anholt2607dad2012-08-01 16:43:16 -07003328 reg_read.offset = offset;
3329
3330 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
3331
3332 *result = reg_read.val;
3333 return ret;
3334}
3335
Emil Velikov0f8da822015-03-31 22:32:11 +01003336int
Jeff McGeed556e062015-03-09 16:13:03 -07003337drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3338{
3339 drm_i915_getparam_t gp;
3340 int ret;
3341
3342 memclear(gp);
3343 gp.value = (int*)subslice_total;
3344 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3345 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3346 if (ret)
3347 return -errno;
3348
3349 return 0;
3350}
3351
Emil Velikov0f8da822015-03-31 22:32:11 +01003352int
Jeff McGeed556e062015-03-09 16:13:03 -07003353drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3354{
3355 drm_i915_getparam_t gp;
3356 int ret;
3357
3358 memclear(gp);
3359 gp.value = (int*)eu_total;
3360 gp.param = I915_PARAM_EU_TOTAL;
3361 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3362 if (ret)
3363 return -errno;
3364
3365 return 0;
3366}
Ben Widawskyf7210fa2012-01-13 11:31:52 -08003367
Yang Rong98887142016-08-02 15:50:34 +08003368int
3369drm_intel_get_pooled_eu(int fd)
3370{
3371 drm_i915_getparam_t gp;
3372 int ret = -1;
3373
3374 memclear(gp);
3375 gp.param = I915_PARAM_HAS_POOLED_EU;
3376 gp.value = &ret;
3377 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3378 return -errno;
3379
3380 return ret;
3381}
3382
3383int
3384drm_intel_get_min_eu_in_pool(int fd)
3385{
3386 drm_i915_getparam_t gp;
3387 int ret = -1;
3388
3389 memclear(gp);
3390 gp.param = I915_PARAM_MIN_EU_IN_POOL;
3391 gp.value = &ret;
3392 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3393 return -errno;
3394
3395 return ret;
3396}
3397
Eric Anholt4db16a92011-10-11 15:59:03 -07003398/**
Paul Berryda02f722012-05-04 12:41:00 -07003399 * Annotate the given bo for use in aub dumping.
3400 *
3401 * \param annotations is an array of drm_intel_aub_annotation objects
3402 * describing the type of data in various sections of the bo. Each
3403 * element of the array specifies the type and subtype of a section of
3404 * the bo, and the past-the-end offset of that section. The elements
3405 * of \c annotations must be sorted so that ending_offset is
3406 * increasing.
3407 *
3408 * \param count is the number of elements in the \c annotations array.
3409 * If \c count is zero, then \c annotations will not be dereferenced.
3410 *
3411 * Annotations are copied into a private data structure, so caller may
3412 * re-use the memory pointed to by \c annotations after the call
3413 * returns.
3414 *
3415 * Annotations are stored for the lifetime of the bo; to reset to the
3416 * default state (no annotations), call this function with a \c count
3417 * of zero.
3418 */
Emil Velikov0f8da822015-03-31 22:32:11 +01003419void
Paul Berryda02f722012-05-04 12:41:00 -07003420drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3421 drm_intel_aub_annotation *annotations,
3422 unsigned count)
3423{
Paul Berryda02f722012-05-04 12:41:00 -07003424}
3425
Lionel Landwerlin743af592014-09-12 13:48:36 +01003426static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3427static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3428
3429static drm_intel_bufmgr_gem *
3430drm_intel_bufmgr_gem_find(int fd)
3431{
3432 drm_intel_bufmgr_gem *bufmgr_gem;
3433
3434 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3435 if (bufmgr_gem->fd == fd) {
3436 atomic_inc(&bufmgr_gem->refcount);
3437 return bufmgr_gem;
3438 }
3439 }
3440
3441 return NULL;
3442}
3443
3444static void
3445drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3446{
3447 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3448
3449 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3450 pthread_mutex_lock(&bufmgr_list_mutex);
3451
3452 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3453 DRMLISTDEL(&bufmgr_gem->managers);
3454 drm_intel_bufmgr_gem_destroy(bufmgr);
3455 }
3456
3457 pthread_mutex_unlock(&bufmgr_list_mutex);
3458 }
3459}
3460
Chris Wilson455e9b42015-05-01 13:39:55 +01003461void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3462{
3463 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3464 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3465
3466 if (bo_gem->gtt_virtual)
3467 return bo_gem->gtt_virtual;
3468
3469 if (bo_gem->is_userptr)
3470 return NULL;
3471
3472 pthread_mutex_lock(&bufmgr_gem->lock);
3473 if (bo_gem->gtt_virtual == NULL) {
3474 struct drm_i915_gem_mmap_gtt mmap_arg;
3475 void *ptr;
3476
3477 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3478 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3479
3480 if (bo_gem->map_count++ == 0)
3481 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3482
3483 memclear(mmap_arg);
3484 mmap_arg.handle = bo_gem->gem_handle;
3485
3486 /* Get the fake offset back... */
3487 ptr = MAP_FAILED;
3488 if (drmIoctl(bufmgr_gem->fd,
3489 DRM_IOCTL_I915_GEM_MMAP_GTT,
3490 &mmap_arg) == 0) {
3491 /* and mmap it */
3492 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3493 MAP_SHARED, bufmgr_gem->fd,
3494 mmap_arg.offset);
3495 }
3496 if (ptr == MAP_FAILED) {
3497 if (--bo_gem->map_count == 0)
3498 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3499 ptr = NULL;
3500 }
3501
3502 bo_gem->gtt_virtual = ptr;
3503 }
3504 pthread_mutex_unlock(&bufmgr_gem->lock);
3505
3506 return bo_gem->gtt_virtual;
3507}
3508
3509void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3510{
3511 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3512 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3513
3514 if (bo_gem->mem_virtual)
3515 return bo_gem->mem_virtual;
3516
3517 if (bo_gem->is_userptr) {
3518 /* Return the same user ptr */
3519 return bo_gem->user_virtual;
3520 }
3521
3522 pthread_mutex_lock(&bufmgr_gem->lock);
3523 if (!bo_gem->mem_virtual) {
3524 struct drm_i915_gem_mmap mmap_arg;
3525
3526 if (bo_gem->map_count++ == 0)
3527 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3528
3529 DBG("bo_map: %d (%s), map_count=%d\n",
3530 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3531
3532 memclear(mmap_arg);
3533 mmap_arg.handle = bo_gem->gem_handle;
3534 mmap_arg.size = bo->size;
3535 if (drmIoctl(bufmgr_gem->fd,
3536 DRM_IOCTL_I915_GEM_MMAP,
3537 &mmap_arg)) {
3538 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3539 __FILE__, __LINE__, bo_gem->gem_handle,
3540 bo_gem->name, strerror(errno));
3541 if (--bo_gem->map_count == 0)
3542 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3543 } else {
3544 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3545 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3546 }
3547 }
3548 pthread_mutex_unlock(&bufmgr_gem->lock);
3549
3550 return bo_gem->mem_virtual;
3551}
3552
3553void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3554{
3555 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3556 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3557
3558 if (bo_gem->wc_virtual)
3559 return bo_gem->wc_virtual;
3560
3561 if (bo_gem->is_userptr)
3562 return NULL;
3563
3564 pthread_mutex_lock(&bufmgr_gem->lock);
3565 if (!bo_gem->wc_virtual) {
3566 struct drm_i915_gem_mmap mmap_arg;
3567
3568 if (bo_gem->map_count++ == 0)
3569 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3570
3571 DBG("bo_map: %d (%s), map_count=%d\n",
3572 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3573
3574 memclear(mmap_arg);
3575 mmap_arg.handle = bo_gem->gem_handle;
3576 mmap_arg.size = bo->size;
3577 mmap_arg.flags = I915_MMAP_WC;
3578 if (drmIoctl(bufmgr_gem->fd,
3579 DRM_IOCTL_I915_GEM_MMAP,
3580 &mmap_arg)) {
3581 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3582 __FILE__, __LINE__, bo_gem->gem_handle,
3583 bo_gem->name, strerror(errno));
3584 if (--bo_gem->map_count == 0)
3585 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3586 } else {
3587 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3588 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3589 }
3590 }
3591 pthread_mutex_unlock(&bufmgr_gem->lock);
3592
3593 return bo_gem->wc_virtual;
3594}
3595
Paul Berryda02f722012-05-04 12:41:00 -07003596/**
Eric Anholt6a9eb082008-06-03 09:27:37 -07003597 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3598 * and manage map buffer objections.
3599 *
3600 * \param fd File descriptor of the opened DRM device.
3601 */
Emil Velikov0f8da822015-03-31 22:32:11 +01003602drm_intel_bufmgr *
Eric Anholt4b982642008-10-30 09:33:07 -07003603drm_intel_bufmgr_gem_init(int fd, int batch_size)
Eric Anholt6a9eb082008-06-03 09:27:37 -07003604{
Eric Anholtd70d6052009-10-06 12:40:42 -07003605 drm_intel_bufmgr_gem *bufmgr_gem;
3606 struct drm_i915_gem_get_aperture aperture;
3607 drm_i915_getparam_t gp;
Daniel Vetter630dd262011-09-22 22:20:09 +02003608 int ret, tmp;
Eric Anholt2c2bdb32011-10-21 16:53:16 -07003609 bool exec2 = false;
Eric Anholt6a9eb082008-06-03 09:27:37 -07003610
Lionel Landwerlin743af592014-09-12 13:48:36 +01003611 pthread_mutex_lock(&bufmgr_list_mutex);
3612
3613 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3614 if (bufmgr_gem)
3615 goto exit;
3616
Eric Anholtd70d6052009-10-06 12:40:42 -07003617 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
Dave Airlie973d8d62010-02-02 10:57:12 +10003618 if (bufmgr_gem == NULL)
Lionel Landwerlin743af592014-09-12 13:48:36 +01003619 goto exit;
Dave Airlie973d8d62010-02-02 10:57:12 +10003620
Eric Anholtd70d6052009-10-06 12:40:42 -07003621 bufmgr_gem->fd = fd;
Lionel Landwerlin743af592014-09-12 13:48:36 +01003622 atomic_set(&bufmgr_gem->refcount, 1);
Eric Anholt6a9eb082008-06-03 09:27:37 -07003623
Eric Anholtd70d6052009-10-06 12:40:42 -07003624 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3625 free(bufmgr_gem);
Lionel Landwerlin743af592014-09-12 13:48:36 +01003626 bufmgr_gem = NULL;
3627 goto exit;
Eric Anholtd70d6052009-10-06 12:40:42 -07003628 }
Eric Anholt6df7b072008-06-12 23:22:26 -07003629
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003630 memclear(aperture);
Chris Wilson62997222010-09-25 21:32:59 +01003631 ret = drmIoctl(bufmgr_gem->fd,
3632 DRM_IOCTL_I915_GEM_GET_APERTURE,
3633 &aperture);
Eric Anholt0e867312008-10-21 00:10:54 -07003634
Eric Anholtd70d6052009-10-06 12:40:42 -07003635 if (ret == 0)
3636 bufmgr_gem->gtt_size = aperture.aper_available_size;
3637 else {
3638 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3639 strerror(errno));
3640 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3641 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3642 "May lead to reduced performance or incorrect "
3643 "rendering.\n",
3644 (int)bufmgr_gem->gtt_size / 1024);
3645 }
Eric Anholt0e867312008-10-21 00:10:54 -07003646
Kenneth Graunke6e642db2011-10-11 14:38:34 -07003647 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
Jesse Barnes2fa5f282009-01-23 14:13:45 -08003648
Eric Anholt078bc5b2011-12-20 13:10:36 -08003649 if (IS_GEN2(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08003650 bufmgr_gem->gen = 2;
Eric Anholt078bc5b2011-12-20 13:10:36 -08003651 else if (IS_GEN3(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08003652 bufmgr_gem->gen = 3;
Eric Anholt078bc5b2011-12-20 13:10:36 -08003653 else if (IS_GEN4(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08003654 bufmgr_gem->gen = 4;
Chad Versace592ac672012-01-27 10:02:16 -08003655 else if (IS_GEN5(bufmgr_gem->pci_device))
3656 bufmgr_gem->gen = 5;
3657 else if (IS_GEN6(bufmgr_gem->pci_device))
Eric Anholta1f9ea72010-03-02 08:49:36 -08003658 bufmgr_gem->gen = 6;
Chad Versace592ac672012-01-27 10:02:16 -08003659 else if (IS_GEN7(bufmgr_gem->pci_device))
Chris Wilson9a2b57d2012-07-25 16:28:59 +01003660 bufmgr_gem->gen = 7;
Ben Widawsky5b348f32013-02-13 16:09:33 +00003661 else if (IS_GEN8(bufmgr_gem->pci_device))
3662 bufmgr_gem->gen = 8;
Damien Lespiauf1e15d12013-02-13 16:09:37 +00003663 else if (IS_GEN9(bufmgr_gem->pci_device))
3664 bufmgr_gem->gen = 9;
Ben Widawsky5c490bd2016-08-24 14:51:43 -07003665 else if (IS_GEN10(bufmgr_gem->pci_device))
3666 bufmgr_gem->gen = 10;
Chris Wilson9a2b57d2012-07-25 16:28:59 +01003667 else {
3668 free(bufmgr_gem);
Lionel Landwerlin743af592014-09-12 13:48:36 +01003669 bufmgr_gem = NULL;
3670 goto exit;
Chris Wilson9a2b57d2012-07-25 16:28:59 +01003671 }
Eric Anholta1f9ea72010-03-02 08:49:36 -08003672
Eric Anholt078bc5b2011-12-20 13:10:36 -08003673 if (IS_GEN3(bufmgr_gem->pci_device) &&
3674 bufmgr_gem->gtt_size > 256*1024*1024) {
Daniel Vetter36cff1c2011-12-04 12:51:45 +01003675 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3676 * be used for tiled blits. To simplify the accounting, just
Eric Engestrom723a6942016-04-03 19:48:09 +01003677 * subtract the unmappable part (fixed to 256MB on all known
Daniel Vetter36cff1c2011-12-04 12:51:45 +01003678 * gen3 devices) if the kernel advertises it. */
3679 bufmgr_gem->gtt_size -= 256*1024*1024;
3680 }
3681
Daniel Vettereb7a5b62015-02-11 11:59:52 +01003682 memclear(gp);
Daniel Vetter630dd262011-09-22 22:20:09 +02003683 gp.value = &tmp;
3684
Jesse Barnesb5096402009-09-15 11:02:58 -07003685 gp.param = I915_PARAM_HAS_EXECBUF2;
Chris Wilson62997222010-09-25 21:32:59 +01003686 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Jesse Barnesb5096402009-09-15 11:02:58 -07003687 if (!ret)
Eric Anholt2c2bdb32011-10-21 16:53:16 -07003688 exec2 = true;
Jesse Barnesb5096402009-09-15 11:02:58 -07003689
Zou Nan hai66375fd2010-06-02 10:07:37 +08003690 gp.param = I915_PARAM_HAS_BSD;
Chris Wilson62997222010-09-25 21:32:59 +01003691 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Chris Wilson057fab32010-10-26 11:35:11 +01003692 bufmgr_gem->has_bsd = ret == 0;
3693
3694 gp.param = I915_PARAM_HAS_BLT;
3695 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3696 bufmgr_gem->has_blt = ret == 0;
Zou Nan hai66375fd2010-06-02 10:07:37 +08003697
Chris Wilson36245772010-10-29 10:49:54 +01003698 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3699 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3700 bufmgr_gem->has_relaxed_fencing = ret == 0;
3701
Chris Wilson1bd35da2016-08-20 18:36:42 +01003702 gp.param = I915_PARAM_HAS_EXEC_ASYNC;
3703 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3704 bufmgr_gem->has_exec_async = ret == 0;
3705
Chris Wilson32258e42014-11-04 14:26:49 +00003706 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
Tvrtko Ursulinae8edc72014-06-19 15:52:03 +01003707
Ben Widawsky971c0802012-06-05 11:30:48 -07003708 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3709 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3710 bufmgr_gem->has_wait_timeout = ret == 0;
3711
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -02003712 gp.param = I915_PARAM_HAS_LLC;
3713 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Eric Anholt3a888482012-02-27 17:26:05 -08003714 if (ret != 0) {
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -02003715 /* Kernel does not supports HAS_LLC query, fallback to GPU
3716 * generation detection and assume that we have LLC on GEN6/7
3717 */
3718 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3719 IS_GEN7(bufmgr_gem->pci_device));
3720 } else
Chris Wilson75830a02012-10-07 10:05:19 +01003721 bufmgr_gem->has_llc = *gp.value;
Eugeni Dodonov151cdcf2012-01-17 15:20:19 -02003722
Xiang, Haihao01199992012-11-14 12:46:39 +08003723 gp.param = I915_PARAM_HAS_VEBOX;
3724 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3725 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3726
Michał Winiarski8b4d57e2015-09-09 16:07:10 +02003727 gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3728 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3729 if (ret == 0 && *gp.value > 0)
3730 bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3731
Eric Anholta1f9ea72010-03-02 08:49:36 -08003732 if (bufmgr_gem->gen < 4) {
Eric Anholtd70d6052009-10-06 12:40:42 -07003733 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3734 gp.value = &bufmgr_gem->available_fences;
Chris Wilson62997222010-09-25 21:32:59 +01003735 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
Eric Anholtd70d6052009-10-06 12:40:42 -07003736 if (ret) {
3737 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3738 errno);
3739 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3740 *gp.value);
3741 bufmgr_gem->available_fences = 0;
Chris Wilsonfdcde592010-02-09 08:32:54 +00003742 } else {
3743 /* XXX The kernel reports the total number of fences,
3744 * including any that may be pinned.
3745 *
3746 * We presume that there will be at least one pinned
3747 * fence for the scanout buffer, but there may be more
3748 * than one scanout and the user may be manually
3749 * pinning buffers. Let's move to execbuffer2 and
3750 * thereby forget the insanity of using fences...
3751 */
3752 bufmgr_gem->available_fences -= 2;
3753 if (bufmgr_gem->available_fences < 0)
3754 bufmgr_gem->available_fences = 0;
Eric Anholtd70d6052009-10-06 12:40:42 -07003755 }
3756 }
Eric Anholt6a9eb082008-06-03 09:27:37 -07003757
Michel Thierry3350add2015-09-03 15:23:58 +01003758 if (bufmgr_gem->gen >= 8) {
3759 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3760 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3761 if (ret == 0 && *gp.value == 3)
3762 bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3763 }
3764
Eric Anholtd70d6052009-10-06 12:40:42 -07003765 /* Let's go with one relocation per every 2 dwords (but round down a bit
3766 * since a power of two will mean an extra page allocation for the reloc
3767 * buffer).
3768 *
3769 * Every 4 was too few for the blender benchmark.
3770 */
3771 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
Eric Anholt769b1052009-10-01 19:09:26 -07003772
Eric Anholtd70d6052009-10-06 12:40:42 -07003773 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3774 bufmgr_gem->bufmgr.bo_alloc_for_render =
3775 drm_intel_gem_bo_alloc_for_render;
Jesse Barnes3a7dfcd2009-10-06 14:34:06 -07003776 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
Eric Anholtd70d6052009-10-06 12:40:42 -07003777 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3778 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3779 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3780 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3781 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3782 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3783 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3784 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
Jesse Barnesb5096402009-09-15 11:02:58 -07003785 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
Eric Anholtd70d6052009-10-06 12:40:42 -07003786 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3787 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3788 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3789 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3790 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
Jesse Barnesb5096402009-09-15 11:02:58 -07003791 /* Use the new one if available */
Zou Nan hai66375fd2010-06-02 10:07:37 +08003792 if (exec2) {
Jesse Barnesb5096402009-09-15 11:02:58 -07003793 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
Albert Damen49447a92010-11-07 15:54:32 +01003794 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
Zou Nan hai66375fd2010-06-02 10:07:37 +08003795 } else
Jesse Barnesb5096402009-09-15 11:02:58 -07003796 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
Eric Anholtd70d6052009-10-06 12:40:42 -07003797 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
Chris Wilson83a35b62009-11-11 13:04:38 +00003798 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
Lionel Landwerlin743af592014-09-12 13:48:36 +01003799 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
Eric Anholtd70d6052009-10-06 12:40:42 -07003800 bufmgr_gem->bufmgr.debug = 0;
3801 bufmgr_gem->bufmgr.check_aperture_space =
3802 drm_intel_gem_check_aperture_space;
3803 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
Chris Wilson07e75892010-05-11 08:54:06 +01003804 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
Eric Anholtd70d6052009-10-06 12:40:42 -07003805 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3806 drm_intel_gem_get_pipe_from_crtc_id;
3807 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
Eric Anholt6a9eb082008-06-03 09:27:37 -07003808
Eric Anholt0ec768e2010-06-04 17:09:11 -07003809 init_cache_buckets(bufmgr_gem);
Eric Anholtd70d6052009-10-06 12:40:42 -07003810
Chris Wilsone4b60f22011-12-05 21:29:05 +00003811 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3812 bufmgr_gem->vma_max = -1; /* unlimited by default */
3813
Lionel Landwerlin743af592014-09-12 13:48:36 +01003814 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3815
3816exit:
3817 pthread_mutex_unlock(&bufmgr_list_mutex);
3818
3819 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
Eric Anholt6a9eb082008-06-03 09:27:37 -07003820}