blob: 014f80392f18179c4a0eba22ba1a635ba0027c8b [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __I915_GEM_OBJECT_H__
26#define __I915_GEM_OBJECT_H__
27
28#include <linux/reservation.h>
29
30#include <drm/drm_vma_manager.h>
31#include <drm/drm_gem.h>
32#include <drm/drmP.h>
33
34#include <drm/i915_drm.h>
35
36struct drm_i915_gem_object_ops {
37 unsigned int flags;
38#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
39#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
40
41 /* Interface between the GEM object and its backing storage.
42 * get_pages() is called once prior to the use of the associated set
43 * of pages before to binding them into the GTT, and put_pages() is
44 * called after we no longer need them. As we expect there to be
45 * associated cost with migrating pages between the backing storage
46 * and making them available for the GPU (e.g. clflush), we may hold
47 * onto the pages after they are no longer referenced by the GPU
48 * in case they may be used again shortly (for example migrating the
49 * pages to a different memory domain within the GTT). put_pages()
50 * will therefore most likely be called when the object itself is
51 * being released or under memory pressure (where we attempt to
52 * reap pages for the shrinker).
53 */
54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
56
57 int (*dmabuf_export)(struct drm_i915_gem_object *);
58 void (*release)(struct drm_i915_gem_object *);
59};
60
61struct drm_i915_gem_object {
62 struct drm_gem_object base;
63
64 const struct drm_i915_gem_object_ops *ops;
65
66 /** List of VMAs backed by this object */
67 struct list_head vma_list;
68 struct rb_root vma_tree;
69
70 /** Stolen memory for this object, instead of being backed by shmem. */
71 struct drm_mm_node *stolen;
72 struct list_head global_link;
73 union {
74 struct rcu_head rcu;
75 struct llist_node freed;
76 };
77
78 /**
79 * Whether the object is currently in the GGTT mmap.
80 */
81 struct list_head userfault_link;
82
83 /** Used in execbuf to temporarily hold a ref */
84 struct list_head obj_exec_link;
85
86 struct list_head batch_pool_link;
87
88 unsigned long flags;
89
90 /**
91 * Have we taken a reference for the object for incomplete GPU
92 * activity?
93 */
94#define I915_BO_ACTIVE_REF 0
95
96 /*
97 * Is the object to be mapped as read-only to the GPU
98 * Only honoured if hardware has relevant pte bit
99 */
100 unsigned long gt_ro:1;
101 unsigned int cache_level:3;
102 unsigned int cache_dirty:1;
103
104 atomic_t frontbuffer_bits;
105 unsigned int frontbuffer_ggtt_origin; /* write once */
106
107 /** Current tiling stride for the object, if it's tiled. */
108 unsigned int tiling_and_stride;
109#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
110#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
111#define STRIDE_MASK (~TILING_MASK)
112
113 /** Count of VMA actually bound by this object */
114 unsigned int bind_count;
115 unsigned int active_count;
116 unsigned int pin_display;
117
118 struct {
119 struct mutex lock; /* protects the pages and their use */
120 atomic_t pages_pin_count;
121
122 struct sg_table *pages;
123 void *mapping;
124
125 struct i915_gem_object_page_iter {
126 struct scatterlist *sg_pos;
127 unsigned int sg_idx; /* in pages, but 32bit eek! */
128
129 struct radix_tree_root radix;
130 struct mutex lock; /* protects this cache */
131 } get_page;
132
133 /**
134 * Advice: are the backing pages purgeable?
135 */
136 unsigned int madv:2;
137
138 /**
139 * This is set if the object has been written to since the
140 * pages were last acquired.
141 */
142 bool dirty:1;
143
144 /**
145 * This is set if the object has been pinned due to unknown
146 * swizzling.
147 */
148 bool quirked:1;
149 } mm;
150
151 /** Breadcrumb of last rendering to the buffer.
152 * There can only be one writer, but we allow for multiple readers.
153 * If there is a writer that necessarily implies that all other
154 * read requests are complete - but we may only be lazily clearing
155 * the read requests. A read request is naturally the most recent
156 * request on a ring, so we may have two different write and read
157 * requests on one ring where the write request is older than the
158 * read request. This allows for the CPU to read from an active
159 * buffer by only waiting for the write to complete.
160 */
161 struct reservation_object *resv;
162
163 /** References from framebuffers, locks out tiling changes. */
164 unsigned long framebuffer_references;
165
166 /** Record of address bit 17 of each page at last unbind. */
167 unsigned long *bit_17;
168
169 struct i915_gem_userptr {
170 uintptr_t ptr;
171 unsigned read_only :1;
172
173 struct i915_mm_struct *mm;
174 struct i915_mmu_object *mmu_object;
175 struct work_struct *work;
176 } userptr;
177
178 /** for phys allocated objects */
179 struct drm_dma_handle *phys_handle;
180
181 struct reservation_object __builtin_resv;
182};
183
184static inline struct drm_i915_gem_object *
185to_intel_bo(struct drm_gem_object *gem)
186{
187 /* Assert that to_intel_bo(NULL) == NULL */
188 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
189
190 return container_of(gem, struct drm_i915_gem_object, base);
191}
192
193/**
194 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
195 * @filp: DRM file private date
196 * @handle: userspace handle
197 *
198 * Returns:
199 *
200 * A pointer to the object named by the handle if such exists on @filp, NULL
201 * otherwise. This object is only valid whilst under the RCU read lock, and
202 * note carefully the object may be in the process of being destroyed.
203 */
204static inline struct drm_i915_gem_object *
205i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
206{
207#ifdef CONFIG_LOCKDEP
208 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
209#endif
210 return idr_find(&file->object_idr, handle);
211}
212
213static inline struct drm_i915_gem_object *
214i915_gem_object_lookup(struct drm_file *file, u32 handle)
215{
216 struct drm_i915_gem_object *obj;
217
218 rcu_read_lock();
219 obj = i915_gem_object_lookup_rcu(file, handle);
220 if (obj && !kref_get_unless_zero(&obj->base.refcount))
221 obj = NULL;
222 rcu_read_unlock();
223
224 return obj;
225}
226
227__deprecated
228extern struct drm_gem_object *
229drm_gem_object_lookup(struct drm_file *file, u32 handle);
230
231__attribute__((nonnull))
232static inline struct drm_i915_gem_object *
233i915_gem_object_get(struct drm_i915_gem_object *obj)
234{
235 drm_gem_object_reference(&obj->base);
236 return obj;
237}
238
239__deprecated
240extern void drm_gem_object_reference(struct drm_gem_object *);
241
242__attribute__((nonnull))
243static inline void
244i915_gem_object_put(struct drm_i915_gem_object *obj)
245{
246 __drm_gem_object_unreference(&obj->base);
247}
248
249__deprecated
250extern void drm_gem_object_unreference(struct drm_gem_object *);
251
252__deprecated
253extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
254
255static inline bool
256i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
257{
258 return atomic_read(&obj->base.refcount.refcount) == 0;
259}
260
261static inline bool
262i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
263{
264 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
265}
266
267static inline bool
268i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
269{
270 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
271}
272
273static inline bool
274i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
275{
276 return obj->active_count;
277}
278
279static inline bool
280i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
281{
282 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
283}
284
285static inline void
286i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
287{
288 lockdep_assert_held(&obj->base.dev->struct_mutex);
289 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
290}
291
292static inline void
293i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
294{
295 lockdep_assert_held(&obj->base.dev->struct_mutex);
296 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
297}
298
299void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
300
301static inline unsigned int
302i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
303{
304 return obj->tiling_and_stride & TILING_MASK;
305}
306
307static inline bool
308i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
309{
310 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
311}
312
313static inline unsigned int
314i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
315{
316 return obj->tiling_and_stride & STRIDE_MASK;
317}
318
319static inline struct intel_engine_cs *
320i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
321{
322 struct intel_engine_cs *engine = NULL;
323 struct dma_fence *fence;
324
325 rcu_read_lock();
326 fence = reservation_object_get_excl_rcu(obj->resv);
327 rcu_read_unlock();
328
329 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
330 engine = to_request(fence)->engine;
331 dma_fence_put(fence);
332
333 return engine;
334}
335
336#endif
337