blob: 54f00b350779733d41a0f3ca0a79445a00595eab [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __I915_GEM_OBJECT_H__
26#define __I915_GEM_OBJECT_H__
27
28#include <linux/reservation.h>
29
30#include <drm/drm_vma_manager.h>
31#include <drm/drm_gem.h>
32#include <drm/drmP.h>
33
34#include <drm/i915_drm.h>
35
Chris Wilsone61e0f52018-02-21 09:56:36 +000036#include "i915_request.h"
Chris Wilson8d28ba42017-02-13 17:15:39 +000037#include "i915_selftest.h"
38
Chris Wilsonb8f55be2017-08-11 12:11:16 +010039struct drm_i915_gem_object;
40
Chris Wilsond1b48c12017-08-16 09:52:08 +010041/*
42 * struct i915_lut_handle tracks the fast lookups from handle to vma used
43 * for execbuf. Although we use a radixtree for that mapping, in order to
44 * remove them as the object or context is closed, we need a secondary list
45 * and a translation entry (i915_lut_handle).
46 */
47struct i915_lut_handle {
48 struct list_head obj_link;
49 struct list_head ctx_link;
50 struct i915_gem_context *ctx;
51 u32 handle;
52};
53
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020054struct drm_i915_gem_object_ops {
55 unsigned int flags;
Tina Zhanga03f3952017-11-14 10:25:13 +000056#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
57#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
58#define I915_GEM_OBJECT_IS_PROXY BIT(2)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020059
60 /* Interface between the GEM object and its backing storage.
61 * get_pages() is called once prior to the use of the associated set
62 * of pages before to binding them into the GTT, and put_pages() is
63 * called after we no longer need them. As we expect there to be
64 * associated cost with migrating pages between the backing storage
65 * and making them available for the GPU (e.g. clflush), we may hold
66 * onto the pages after they are no longer referenced by the GPU
67 * in case they may be used again shortly (for example migrating the
68 * pages to a different memory domain within the GTT). put_pages()
69 * will therefore most likely be called when the object itself is
70 * being released or under memory pressure (where we attempt to
71 * reap pages for the shrinker).
72 */
Matthew Auldb91b09e2017-10-06 23:18:17 +010073 int (*get_pages)(struct drm_i915_gem_object *);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020074 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
75
Chris Wilson7c55e2c2017-03-07 12:03:38 +000076 int (*pwrite)(struct drm_i915_gem_object *,
77 const struct drm_i915_gem_pwrite *);
78
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020079 int (*dmabuf_export)(struct drm_i915_gem_object *);
80 void (*release)(struct drm_i915_gem_object *);
81};
82
83struct drm_i915_gem_object {
84 struct drm_gem_object base;
85
86 const struct drm_i915_gem_object_ops *ops;
87
Chris Wilsonb5a82422017-05-25 21:48:18 +010088 /**
89 * @vma_list: List of VMAs backed by this object
90 *
91 * The VMA on this list are ordered by type, all GGTT vma are placed
92 * at the head and all ppGTT vma are placed at the tail. The different
93 * types of GGTT vma are unordered between themselves, use the
94 * @vma_tree (which has a defined order between all VMA) to find an
95 * exact match.
96 */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020097 struct list_head vma_list;
Chris Wilsonb5a82422017-05-25 21:48:18 +010098 /**
99 * @vma_tree: Ordered tree of VMAs backed by this object
100 *
101 * All VMA created for this object are placed in the @vma_tree for
102 * fast retrieval via a binary search in i915_vma_instance().
103 * They are also added to @vma_list for easy iteration.
104 */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200105 struct rb_root vma_tree;
Chris Wilsond1b48c12017-08-16 09:52:08 +0100106
107 /**
108 * @lut_list: List of vma lookup entries in use for this object.
109 *
110 * If this object is closed, we need to remove all of its VMA from
111 * the fast lookup index in associated contexts; @lut_list provides
112 * this translation from object to context->handles_vma.
113 */
114 struct list_head lut_list;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200115
116 /** Stolen memory for this object, instead of being backed by shmem. */
117 struct drm_mm_node *stolen;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200118 union {
119 struct rcu_head rcu;
120 struct llist_node freed;
121 };
122
123 /**
124 * Whether the object is currently in the GGTT mmap.
125 */
Chris Wilsona65adaf2017-10-09 09:43:57 +0100126 unsigned int userfault_count;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200127 struct list_head userfault_link;
128
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200129 struct list_head batch_pool_link;
Chris Wilson8d28ba42017-02-13 17:15:39 +0000130 I915_SELFTEST_DECLARE(struct list_head st_link);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200131
132 unsigned long flags;
133
134 /**
135 * Have we taken a reference for the object for incomplete GPU
136 * activity?
137 */
138#define I915_BO_ACTIVE_REF 0
139
140 /*
141 * Is the object to be mapped as read-only to the GPU
142 * Only honoured if hardware has relevant pte bit
143 */
144 unsigned long gt_ro:1;
145 unsigned int cache_level:3;
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100146 unsigned int cache_coherent:2;
147#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
148#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200149 unsigned int cache_dirty:1;
150
Christian Königc0a51fd2018-02-16 13:43:38 +0100151 /**
152 * @read_domains: Read memory domains.
153 *
154 * These monitor which caches contain read/write data related to the
155 * object. When transitioning from one set of domains to another,
156 * the driver is called to ensure that caches are suitably flushed and
157 * invalidated.
158 */
159 u16 read_domains;
160
161 /**
162 * @write_domain: Corresponding unique write memory domain.
163 */
164 u16 write_domain;
165
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200166 atomic_t frontbuffer_bits;
167 unsigned int frontbuffer_ggtt_origin; /* write once */
Chris Wilson5b8c8ae2016-11-16 19:07:04 +0000168 struct i915_gem_active frontbuffer_write;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200169
170 /** Current tiling stride for the object, if it's tiled. */
171 unsigned int tiling_and_stride;
172#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
173#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
174#define STRIDE_MASK (~TILING_MASK)
175
176 /** Count of VMA actually bound by this object */
177 unsigned int bind_count;
178 unsigned int active_count;
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100179 /** Count of how many global VMA are currently pinned for use by HW */
180 unsigned int pin_global;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200181
182 struct {
183 struct mutex lock; /* protects the pages and their use */
184 atomic_t pages_pin_count;
185
186 struct sg_table *pages;
187 void *mapping;
188
Matthew Auldd9ec12f2017-10-06 23:18:27 +0100189 /* TODO: whack some of this into the error state */
Matthew Aulda5c081662017-10-06 23:18:18 +0100190 struct i915_page_sizes {
191 /**
192 * The sg mask of the pages sg_table. i.e the mask of
193 * of the lengths for each sg entry.
194 */
195 unsigned int phys;
196
197 /**
198 * The gtt page sizes we are allowed to use given the
199 * sg mask and the supported page sizes. This will
200 * express the smallest unit we can use for the whole
201 * object, as well as the larger sizes we may be able
202 * to use opportunistically.
203 */
204 unsigned int sg;
Matthew Auldd9ec12f2017-10-06 23:18:27 +0100205
206 /**
207 * The actual gtt page size usage. Since we can have
208 * multiple vma associated with this object we need to
209 * prevent any trampling of state, hence a copy of this
210 * struct also lives in each vma, therefore the gtt
211 * value here should only be read/write through the vma.
212 */
213 unsigned int gtt;
Matthew Aulda5c081662017-10-06 23:18:18 +0100214 } page_sizes;
215
Matthew Auld40498662017-10-06 23:18:29 +0100216 I915_SELFTEST_DECLARE(unsigned int page_mask);
217
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200218 struct i915_gem_object_page_iter {
219 struct scatterlist *sg_pos;
220 unsigned int sg_idx; /* in pages, but 32bit eek! */
221
222 struct radix_tree_root radix;
223 struct mutex lock; /* protects this cache */
224 } get_page;
225
226 /**
Chris Wilsonf2123812017-10-16 12:40:37 +0100227 * Element within i915->mm.unbound_list or i915->mm.bound_list,
228 * locked by i915->mm.obj_lock.
229 */
230 struct list_head link;
231
232 /**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200233 * Advice: are the backing pages purgeable?
234 */
235 unsigned int madv:2;
236
237 /**
238 * This is set if the object has been written to since the
239 * pages were last acquired.
240 */
241 bool dirty:1;
242
243 /**
244 * This is set if the object has been pinned due to unknown
245 * swizzling.
246 */
247 bool quirked:1;
248 } mm;
249
250 /** Breadcrumb of last rendering to the buffer.
251 * There can only be one writer, but we allow for multiple readers.
252 * If there is a writer that necessarily implies that all other
253 * read requests are complete - but we may only be lazily clearing
254 * the read requests. A read request is naturally the most recent
255 * request on a ring, so we may have two different write and read
256 * requests on one ring where the write request is older than the
257 * read request. This allows for the CPU to read from an active
258 * buffer by only waiting for the write to complete.
259 */
260 struct reservation_object *resv;
261
262 /** References from framebuffers, locks out tiling changes. */
Chris Wilsondd689282017-03-01 15:41:28 +0000263 unsigned int framebuffer_references;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200264
265 /** Record of address bit 17 of each page at last unbind. */
266 unsigned long *bit_17;
267
Chris Wilson44653982017-02-13 17:15:20 +0000268 union {
269 struct i915_gem_userptr {
270 uintptr_t ptr;
271 unsigned read_only :1;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200272
Chris Wilson44653982017-02-13 17:15:20 +0000273 struct i915_mm_struct *mm;
274 struct i915_mmu_object *mmu_object;
275 struct work_struct *work;
276 } userptr;
277
278 unsigned long scratch;
Tina Zhange546e282017-11-23 16:26:36 +0800279
280 void *gvt_info;
Chris Wilson44653982017-02-13 17:15:20 +0000281 };
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200282
283 /** for phys allocated objects */
284 struct drm_dma_handle *phys_handle;
285
286 struct reservation_object __builtin_resv;
287};
288
289static inline struct drm_i915_gem_object *
290to_intel_bo(struct drm_gem_object *gem)
291{
292 /* Assert that to_intel_bo(NULL) == NULL */
293 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
294
295 return container_of(gem, struct drm_i915_gem_object, base);
296}
297
298/**
299 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
300 * @filp: DRM file private date
301 * @handle: userspace handle
302 *
303 * Returns:
304 *
305 * A pointer to the object named by the handle if such exists on @filp, NULL
306 * otherwise. This object is only valid whilst under the RCU read lock, and
307 * note carefully the object may be in the process of being destroyed.
308 */
309static inline struct drm_i915_gem_object *
310i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
311{
312#ifdef CONFIG_LOCKDEP
313 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
314#endif
315 return idr_find(&file->object_idr, handle);
316}
317
318static inline struct drm_i915_gem_object *
319i915_gem_object_lookup(struct drm_file *file, u32 handle)
320{
321 struct drm_i915_gem_object *obj;
322
323 rcu_read_lock();
324 obj = i915_gem_object_lookup_rcu(file, handle);
325 if (obj && !kref_get_unless_zero(&obj->base.refcount))
326 obj = NULL;
327 rcu_read_unlock();
328
329 return obj;
330}
331
332__deprecated
333extern struct drm_gem_object *
334drm_gem_object_lookup(struct drm_file *file, u32 handle);
335
336__attribute__((nonnull))
337static inline struct drm_i915_gem_object *
338i915_gem_object_get(struct drm_i915_gem_object *obj)
339{
340 drm_gem_object_reference(&obj->base);
341 return obj;
342}
343
344__deprecated
345extern void drm_gem_object_reference(struct drm_gem_object *);
346
347__attribute__((nonnull))
348static inline void
349i915_gem_object_put(struct drm_i915_gem_object *obj)
350{
351 __drm_gem_object_unreference(&obj->base);
352}
353
354__deprecated
355extern void drm_gem_object_unreference(struct drm_gem_object *);
356
357__deprecated
358extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
359
Chris Wilsondd689282017-03-01 15:41:28 +0000360static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
361{
362 reservation_object_lock(obj->resv, NULL);
363}
364
365static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
366{
367 reservation_object_unlock(obj->resv);
368}
369
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200370static inline bool
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200371i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
372{
373 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
374}
375
376static inline bool
377i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
378{
379 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
380}
381
382static inline bool
Tina Zhanga03f3952017-11-14 10:25:13 +0000383i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
384{
385 return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
386}
387
388static inline bool
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200389i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
390{
391 return obj->active_count;
392}
393
394static inline bool
395i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
396{
397 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
398}
399
400static inline void
401i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
402{
403 lockdep_assert_held(&obj->base.dev->struct_mutex);
404 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
405}
406
407static inline void
408i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
409{
410 lockdep_assert_held(&obj->base.dev->struct_mutex);
411 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
412}
413
414void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
415
Chris Wilsondd689282017-03-01 15:41:28 +0000416static inline bool
417i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
418{
419 return READ_ONCE(obj->framebuffer_references);
420}
421
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200422static inline unsigned int
423i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
424{
425 return obj->tiling_and_stride & TILING_MASK;
426}
427
428static inline bool
429i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
430{
431 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
432}
433
434static inline unsigned int
435i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
436{
437 return obj->tiling_and_stride & STRIDE_MASK;
438}
439
Chris Wilson6649a0b2017-01-09 16:16:08 +0000440static inline unsigned int
441i915_gem_tile_height(unsigned int tiling)
442{
443 GEM_BUG_ON(!tiling);
444 return tiling == I915_TILING_Y ? 32 : 8;
445}
446
447static inline unsigned int
448i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
449{
450 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
451}
452
453static inline unsigned int
454i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
455{
456 return (i915_gem_object_get_stride(obj) *
457 i915_gem_object_get_tile_height(obj));
458}
459
Chris Wilson957870f2017-01-10 12:10:45 +0000460int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
461 unsigned int tiling, unsigned int stride);
462
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200463static inline struct intel_engine_cs *
464i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
465{
466 struct intel_engine_cs *engine = NULL;
467 struct dma_fence *fence;
468
469 rcu_read_lock();
470 fence = reservation_object_get_excl_rcu(obj->resv);
471 rcu_read_unlock();
472
473 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
474 engine = to_request(fence)->engine;
475 dma_fence_put(fence);
476
477 return engine;
478}
479
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100480void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
481 unsigned int cache_level);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000482void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
483
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200484#endif
485