blob: adb482b00271df454d12825fab5063e22e544cdd [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __I915_GEM_OBJECT_H__
26#define __I915_GEM_OBJECT_H__
27
28#include <linux/reservation.h>
29
30#include <drm/drm_vma_manager.h>
31#include <drm/drm_gem.h>
32#include <drm/drmP.h>
33
34#include <drm/i915_drm.h>
35
Chris Wilson8d28ba42017-02-13 17:15:39 +000036#include "i915_selftest.h"
37
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020038struct drm_i915_gem_object_ops {
39 unsigned int flags;
Chris Wilson22284f42017-05-23 11:31:16 +010040#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
41#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020042
43 /* Interface between the GEM object and its backing storage.
44 * get_pages() is called once prior to the use of the associated set
45 * of pages before to binding them into the GTT, and put_pages() is
46 * called after we no longer need them. As we expect there to be
47 * associated cost with migrating pages between the backing storage
48 * and making them available for the GPU (e.g. clflush), we may hold
49 * onto the pages after they are no longer referenced by the GPU
50 * in case they may be used again shortly (for example migrating the
51 * pages to a different memory domain within the GTT). put_pages()
52 * will therefore most likely be called when the object itself is
53 * being released or under memory pressure (where we attempt to
54 * reap pages for the shrinker).
55 */
56 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
57 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
58
Chris Wilson7c55e2c2017-03-07 12:03:38 +000059 int (*pwrite)(struct drm_i915_gem_object *,
60 const struct drm_i915_gem_pwrite *);
61
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020062 int (*dmabuf_export)(struct drm_i915_gem_object *);
63 void (*release)(struct drm_i915_gem_object *);
64};
65
66struct drm_i915_gem_object {
67 struct drm_gem_object base;
68
69 const struct drm_i915_gem_object_ops *ops;
70
Chris Wilsonb5a82422017-05-25 21:48:18 +010071 /**
72 * @vma_list: List of VMAs backed by this object
73 *
74 * The VMA on this list are ordered by type, all GGTT vma are placed
75 * at the head and all ppGTT vma are placed at the tail. The different
76 * types of GGTT vma are unordered between themselves, use the
77 * @vma_tree (which has a defined order between all VMA) to find an
78 * exact match.
79 */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020080 struct list_head vma_list;
Chris Wilsonb5a82422017-05-25 21:48:18 +010081 /**
82 * @vma_tree: Ordered tree of VMAs backed by this object
83 *
84 * All VMA created for this object are placed in the @vma_tree for
85 * fast retrieval via a binary search in i915_vma_instance().
86 * They are also added to @vma_list for easy iteration.
87 */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020088 struct rb_root vma_tree;
89
90 /** Stolen memory for this object, instead of being backed by shmem. */
91 struct drm_mm_node *stolen;
92 struct list_head global_link;
93 union {
94 struct rcu_head rcu;
95 struct llist_node freed;
96 };
97
98 /**
99 * Whether the object is currently in the GGTT mmap.
100 */
101 struct list_head userfault_link;
102
103 /** Used in execbuf to temporarily hold a ref */
104 struct list_head obj_exec_link;
105
106 struct list_head batch_pool_link;
Chris Wilson8d28ba42017-02-13 17:15:39 +0000107 I915_SELFTEST_DECLARE(struct list_head st_link);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200108
109 unsigned long flags;
110
111 /**
112 * Have we taken a reference for the object for incomplete GPU
113 * activity?
114 */
115#define I915_BO_ACTIVE_REF 0
116
117 /*
118 * Is the object to be mapped as read-only to the GPU
119 * Only honoured if hardware has relevant pte bit
120 */
121 unsigned long gt_ro:1;
122 unsigned int cache_level:3;
123 unsigned int cache_dirty:1;
Chris Wilson7fc92e92017-06-16 11:54:55 +0100124 unsigned int cache_coherent:1;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200125
126 atomic_t frontbuffer_bits;
127 unsigned int frontbuffer_ggtt_origin; /* write once */
Chris Wilson5b8c8ae2016-11-16 19:07:04 +0000128 struct i915_gem_active frontbuffer_write;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200129
130 /** Current tiling stride for the object, if it's tiled. */
131 unsigned int tiling_and_stride;
132#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
133#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
134#define STRIDE_MASK (~TILING_MASK)
135
136 /** Count of VMA actually bound by this object */
137 unsigned int bind_count;
138 unsigned int active_count;
139 unsigned int pin_display;
140
141 struct {
142 struct mutex lock; /* protects the pages and their use */
143 atomic_t pages_pin_count;
144
145 struct sg_table *pages;
146 void *mapping;
147
148 struct i915_gem_object_page_iter {
149 struct scatterlist *sg_pos;
150 unsigned int sg_idx; /* in pages, but 32bit eek! */
151
152 struct radix_tree_root radix;
153 struct mutex lock; /* protects this cache */
154 } get_page;
155
156 /**
157 * Advice: are the backing pages purgeable?
158 */
159 unsigned int madv:2;
160
161 /**
162 * This is set if the object has been written to since the
163 * pages were last acquired.
164 */
165 bool dirty:1;
166
167 /**
168 * This is set if the object has been pinned due to unknown
169 * swizzling.
170 */
171 bool quirked:1;
172 } mm;
173
174 /** Breadcrumb of last rendering to the buffer.
175 * There can only be one writer, but we allow for multiple readers.
176 * If there is a writer that necessarily implies that all other
177 * read requests are complete - but we may only be lazily clearing
178 * the read requests. A read request is naturally the most recent
179 * request on a ring, so we may have two different write and read
180 * requests on one ring where the write request is older than the
181 * read request. This allows for the CPU to read from an active
182 * buffer by only waiting for the write to complete.
183 */
184 struct reservation_object *resv;
185
186 /** References from framebuffers, locks out tiling changes. */
Chris Wilsondd689282017-03-01 15:41:28 +0000187 unsigned int framebuffer_references;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200188
189 /** Record of address bit 17 of each page at last unbind. */
190 unsigned long *bit_17;
191
Chris Wilson44653982017-02-13 17:15:20 +0000192 union {
193 struct i915_gem_userptr {
194 uintptr_t ptr;
195 unsigned read_only :1;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200196
Chris Wilson44653982017-02-13 17:15:20 +0000197 struct i915_mm_struct *mm;
198 struct i915_mmu_object *mmu_object;
199 struct work_struct *work;
200 } userptr;
201
202 unsigned long scratch;
203 };
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200204
205 /** for phys allocated objects */
206 struct drm_dma_handle *phys_handle;
207
208 struct reservation_object __builtin_resv;
209};
210
211static inline struct drm_i915_gem_object *
212to_intel_bo(struct drm_gem_object *gem)
213{
214 /* Assert that to_intel_bo(NULL) == NULL */
215 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
216
217 return container_of(gem, struct drm_i915_gem_object, base);
218}
219
220/**
221 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
222 * @filp: DRM file private date
223 * @handle: userspace handle
224 *
225 * Returns:
226 *
227 * A pointer to the object named by the handle if such exists on @filp, NULL
228 * otherwise. This object is only valid whilst under the RCU read lock, and
229 * note carefully the object may be in the process of being destroyed.
230 */
231static inline struct drm_i915_gem_object *
232i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
233{
234#ifdef CONFIG_LOCKDEP
235 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
236#endif
237 return idr_find(&file->object_idr, handle);
238}
239
240static inline struct drm_i915_gem_object *
241i915_gem_object_lookup(struct drm_file *file, u32 handle)
242{
243 struct drm_i915_gem_object *obj;
244
245 rcu_read_lock();
246 obj = i915_gem_object_lookup_rcu(file, handle);
247 if (obj && !kref_get_unless_zero(&obj->base.refcount))
248 obj = NULL;
249 rcu_read_unlock();
250
251 return obj;
252}
253
254__deprecated
255extern struct drm_gem_object *
256drm_gem_object_lookup(struct drm_file *file, u32 handle);
257
258__attribute__((nonnull))
259static inline struct drm_i915_gem_object *
260i915_gem_object_get(struct drm_i915_gem_object *obj)
261{
262 drm_gem_object_reference(&obj->base);
263 return obj;
264}
265
266__deprecated
267extern void drm_gem_object_reference(struct drm_gem_object *);
268
269__attribute__((nonnull))
270static inline void
271i915_gem_object_put(struct drm_i915_gem_object *obj)
272{
273 __drm_gem_object_unreference(&obj->base);
274}
275
276__deprecated
277extern void drm_gem_object_unreference(struct drm_gem_object *);
278
279__deprecated
280extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
281
Chris Wilsondd689282017-03-01 15:41:28 +0000282static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
283{
284 reservation_object_lock(obj->resv, NULL);
285}
286
287static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
288{
289 reservation_object_unlock(obj->resv);
290}
291
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200292static inline bool
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200293i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
294{
295 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
296}
297
298static inline bool
299i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
300{
301 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
302}
303
304static inline bool
305i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
306{
307 return obj->active_count;
308}
309
310static inline bool
311i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
312{
313 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
314}
315
316static inline void
317i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
318{
319 lockdep_assert_held(&obj->base.dev->struct_mutex);
320 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
321}
322
323static inline void
324i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
325{
326 lockdep_assert_held(&obj->base.dev->struct_mutex);
327 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
328}
329
330void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
331
Chris Wilsondd689282017-03-01 15:41:28 +0000332static inline bool
333i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
334{
335 return READ_ONCE(obj->framebuffer_references);
336}
337
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200338static inline unsigned int
339i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
340{
341 return obj->tiling_and_stride & TILING_MASK;
342}
343
344static inline bool
345i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
346{
347 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
348}
349
350static inline unsigned int
351i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
352{
353 return obj->tiling_and_stride & STRIDE_MASK;
354}
355
Chris Wilson6649a0b2017-01-09 16:16:08 +0000356static inline unsigned int
357i915_gem_tile_height(unsigned int tiling)
358{
359 GEM_BUG_ON(!tiling);
360 return tiling == I915_TILING_Y ? 32 : 8;
361}
362
363static inline unsigned int
364i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
365{
366 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
367}
368
369static inline unsigned int
370i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
371{
372 return (i915_gem_object_get_stride(obj) *
373 i915_gem_object_get_tile_height(obj));
374}
375
Chris Wilson957870f2017-01-10 12:10:45 +0000376int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
377 unsigned int tiling, unsigned int stride);
378
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200379static inline struct intel_engine_cs *
380i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
381{
382 struct intel_engine_cs *engine = NULL;
383 struct dma_fence *fence;
384
385 rcu_read_lock();
386 fence = reservation_object_get_excl_rcu(obj->resv);
387 rcu_read_unlock();
388
389 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
390 engine = to_request(fence)->engine;
391 dma_fence_put(fence);
392
393 return engine;
394}
395
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000396void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
397
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200398#endif
399